kevinoli commited on
Commit
c8fe846
1 Parent(s): dbec62b

Training in progress, step 4000, checkpoint

Browse files
checkpoint-4000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:026d4481f383b2a87f0cb94045f8baa817fb0a55bc69f2bc32be28a8999436a4
3
  size 1711848436
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a878d08f72540c9f3479285cc88310f0cdf60138a32db2e5150d64c4a5eccf34
3
  size 1711848436
checkpoint-4000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1b88defc3a89c4d8c55cd6ca0e38e0203ec283b45b98ce7709c5e93dd07789b
3
  size 3424043887
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a10dc97051a53b281e997547623dcdb90737be23513b9e3e7c89eaf1b21ab058
3
  size 3424043887
checkpoint-4000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8610ffd95364d971241bbea36f29abb2299a046b81442005be36cebcbe4f45b
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0cc521a4ce9508ca76ecef33ab44295ea7b55ac389a0be2ed51baa6f934287c
3
  size 623
checkpoint-4000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.7473344802856445,
3
  "best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l37-l/checkpoint-4000",
4
- "epoch": 0.7369196757553427,
5
  "eval_steps": 500,
6
  "global_step": 4000,
7
  "is_hyper_param_search": false,
@@ -9,128 +9,128 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.09211495946941783,
13
- "grad_norm": 8.437943458557129,
14
- "learning_rate": 2.907885040530582e-07,
15
- "loss": 0.3017,
16
  "step": 500
17
  },
18
  {
19
- "epoch": 0.09211495946941783,
20
- "eval_loss": 1.164457082748413,
21
- "eval_runtime": 75.2697,
22
- "eval_samples_per_second": 16.036,
23
- "eval_steps_per_second": 2.006,
24
  "step": 500
25
  },
26
  {
27
- "epoch": 0.18422991893883567,
28
- "grad_norm": 14.740598678588867,
29
- "learning_rate": 2.815770081061164e-07,
30
- "loss": 0.2899,
31
  "step": 1000
32
  },
33
  {
34
- "epoch": 0.18422991893883567,
35
- "eval_loss": 1.01627779006958,
36
- "eval_runtime": 76.4934,
37
- "eval_samples_per_second": 15.779,
38
- "eval_steps_per_second": 1.974,
39
  "step": 1000
40
  },
41
  {
42
- "epoch": 0.2763448784082535,
43
- "grad_norm": 0.17031528055667877,
44
- "learning_rate": 2.723655121591746e-07,
45
- "loss": 0.273,
46
  "step": 1500
47
  },
48
  {
49
- "epoch": 0.2763448784082535,
50
- "eval_loss": 0.9395213723182678,
51
- "eval_runtime": 76.6256,
52
- "eval_samples_per_second": 15.752,
53
- "eval_steps_per_second": 1.971,
54
  "step": 1500
55
  },
56
  {
57
- "epoch": 0.36845983787767134,
58
- "grad_norm": 0.0030106802005320787,
59
- "learning_rate": 2.6315401621223287e-07,
60
- "loss": 0.1818,
61
  "step": 2000
62
  },
63
  {
64
- "epoch": 0.36845983787767134,
65
- "eval_loss": 0.8942967057228088,
66
- "eval_runtime": 76.8089,
67
- "eval_samples_per_second": 15.714,
68
- "eval_steps_per_second": 1.966,
69
  "step": 2000
70
  },
71
  {
72
- "epoch": 0.46057479734708917,
73
- "grad_norm": 474.4410705566406,
74
- "learning_rate": 2.539425202652911e-07,
75
- "loss": 0.2094,
76
  "step": 2500
77
  },
78
  {
79
- "epoch": 0.46057479734708917,
80
- "eval_loss": 0.8330363631248474,
81
- "eval_runtime": 76.9939,
82
- "eval_samples_per_second": 15.677,
83
- "eval_steps_per_second": 1.961,
84
  "step": 2500
85
  },
86
  {
87
- "epoch": 0.552689756816507,
88
- "grad_norm": 314.3533935546875,
89
- "learning_rate": 2.447310243183493e-07,
90
- "loss": 0.1949,
91
  "step": 3000
92
  },
93
  {
94
- "epoch": 0.552689756816507,
95
- "eval_loss": 0.7885717749595642,
96
- "eval_runtime": 76.7534,
97
- "eval_samples_per_second": 15.726,
98
- "eval_steps_per_second": 1.967,
99
  "step": 3000
100
  },
101
  {
102
- "epoch": 0.6448047162859248,
103
- "grad_norm": 7.678028106689453,
104
- "learning_rate": 2.3551952837140753e-07,
105
- "loss": 0.1335,
106
  "step": 3500
107
  },
108
  {
109
- "epoch": 0.6448047162859248,
110
- "eval_loss": 0.7657251358032227,
111
- "eval_runtime": 76.791,
112
- "eval_samples_per_second": 15.718,
113
- "eval_steps_per_second": 1.966,
114
  "step": 3500
115
  },
116
  {
117
- "epoch": 0.7369196757553427,
118
- "grad_norm": 30.121461868286133,
119
- "learning_rate": 2.263080324244657e-07,
120
- "loss": 0.1541,
121
  "step": 4000
122
  },
123
  {
124
- "epoch": 0.7369196757553427,
125
- "eval_loss": 0.7473344802856445,
126
- "eval_runtime": 76.929,
127
- "eval_samples_per_second": 15.69,
128
- "eval_steps_per_second": 1.963,
129
  "step": 4000
130
  }
131
  ],
132
  "logging_steps": 500,
133
- "max_steps": 16284,
134
  "num_input_tokens_seen": 0,
135
  "num_train_epochs": 3,
136
  "save_steps": 500,
 
1
  {
2
+ "best_metric": 0.813768208026886,
3
  "best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l37-l/checkpoint-4000",
4
+ "epoch": 0.4502983226387482,
5
  "eval_steps": 500,
6
  "global_step": 4000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.056287290329843524,
13
+ "grad_norm": 96.92656707763672,
14
+ "learning_rate": 2.9437127096701565e-07,
15
+ "loss": 0.3748,
16
  "step": 500
17
  },
18
  {
19
+ "epoch": 0.056287290329843524,
20
+ "eval_loss": 1.2226382493972778,
21
+ "eval_runtime": 131.2779,
22
+ "eval_samples_per_second": 15.037,
23
+ "eval_steps_per_second": 1.882,
24
  "step": 500
25
  },
26
  {
27
+ "epoch": 0.11257458065968705,
28
+ "grad_norm": 402.31158447265625,
29
+ "learning_rate": 2.887425419340313e-07,
30
+ "loss": 0.3057,
31
  "step": 1000
32
  },
33
  {
34
+ "epoch": 0.11257458065968705,
35
+ "eval_loss": 1.0702259540557861,
36
+ "eval_runtime": 131.0109,
37
+ "eval_samples_per_second": 15.067,
38
+ "eval_steps_per_second": 1.885,
39
  "step": 1000
40
  },
41
  {
42
+ "epoch": 0.16886187098953057,
43
+ "grad_norm": 515.0911865234375,
44
+ "learning_rate": 2.831138129010469e-07,
45
+ "loss": 0.2239,
46
  "step": 1500
47
  },
48
  {
49
+ "epoch": 0.16886187098953057,
50
+ "eval_loss": 0.9957238435745239,
51
+ "eval_runtime": 134.8377,
52
+ "eval_samples_per_second": 14.64,
53
+ "eval_steps_per_second": 1.832,
54
  "step": 1500
55
  },
56
  {
57
+ "epoch": 0.2251491613193741,
58
+ "grad_norm": 3.1967105865478516,
59
+ "learning_rate": 2.774850838680626e-07,
60
+ "loss": 0.2229,
61
  "step": 2000
62
  },
63
  {
64
+ "epoch": 0.2251491613193741,
65
+ "eval_loss": 0.9504629373550415,
66
+ "eval_runtime": 129.0169,
67
+ "eval_samples_per_second": 15.3,
68
+ "eval_steps_per_second": 1.914,
69
  "step": 2000
70
  },
71
  {
72
+ "epoch": 0.2814364516492176,
73
+ "grad_norm": 276.22998046875,
74
+ "learning_rate": 2.718563548350782e-07,
75
+ "loss": 0.2098,
76
  "step": 2500
77
  },
78
  {
79
+ "epoch": 0.2814364516492176,
80
+ "eval_loss": 0.9006705284118652,
81
+ "eval_runtime": 129.1816,
82
+ "eval_samples_per_second": 15.281,
83
+ "eval_steps_per_second": 1.912,
84
  "step": 2500
85
  },
86
  {
87
+ "epoch": 0.33772374197906113,
88
+ "grad_norm": 1105.317138671875,
89
+ "learning_rate": 2.6622762580209386e-07,
90
+ "loss": 0.1938,
91
  "step": 3000
92
  },
93
  {
94
+ "epoch": 0.33772374197906113,
95
+ "eval_loss": 0.8782906532287598,
96
+ "eval_runtime": 128.6387,
97
+ "eval_samples_per_second": 15.345,
98
+ "eval_steps_per_second": 1.92,
99
  "step": 3000
100
  },
101
  {
102
+ "epoch": 0.39401103230890466,
103
+ "grad_norm": 0.00043465400813147426,
104
+ "learning_rate": 2.605988967691095e-07,
105
+ "loss": 0.1688,
106
  "step": 3500
107
  },
108
  {
109
+ "epoch": 0.39401103230890466,
110
+ "eval_loss": 0.8405746221542358,
111
+ "eval_runtime": 128.6989,
112
+ "eval_samples_per_second": 15.338,
113
+ "eval_steps_per_second": 1.919,
114
  "step": 3500
115
  },
116
  {
117
+ "epoch": 0.4502983226387482,
118
+ "grad_norm": 0.027426382526755333,
119
+ "learning_rate": 2.549701677361252e-07,
120
+ "loss": 0.1457,
121
  "step": 4000
122
  },
123
  {
124
+ "epoch": 0.4502983226387482,
125
+ "eval_loss": 0.813768208026886,
126
+ "eval_runtime": 128.7209,
127
+ "eval_samples_per_second": 15.336,
128
+ "eval_steps_per_second": 1.919,
129
  "step": 4000
130
  }
131
  ],
132
  "logging_steps": 500,
133
+ "max_steps": 26649,
134
  "num_input_tokens_seen": 0,
135
  "num_train_epochs": 3,
136
  "save_steps": 500,
checkpoint-4000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76f7dfc201ccc0039551250d06ef1282a727ca0a51016f874c2eff3c580dbf8e
3
  size 4847
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:074eb2b3aae1f3310fb0b27d83d259baa7ad74a32c8433c81af819e28558f6b0
3
  size 4847