aiyets commited on
Commit
7e2a5c7
1 Parent(s): 7f8ef2d

Training in progress, step 10, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
  "k_proj",
25
- "q_proj",
26
- "up_proj",
27
  "gate_proj",
 
28
  "down_proj",
29
- "o_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "k_proj",
 
 
24
  "gate_proj",
25
+ "o_proj",
26
  "down_proj",
27
+ "v_proj",
28
+ "up_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d1651f3601378af486d8705149df9a51b2def80ca66d31c03b802020e5a8db9
3
  size 83115256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21cb1efe47a7b9ec7582f9c4cad57e9fdc96cddc43b5fe446da11199b86121df
3
  size 83115256
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b50128d3b0c6a2edc34f6c7aabbd7e35a705b84c33a17a46e29421d95323100
3
  size 166351098
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab11797a5a48c8b5091a20e4ab8445d69bbcf0b52c3045370731dea57405fdcc
3
  size 166351098
last-checkpoint/trainer_state.json CHANGED
@@ -10,7 +10,7 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.0005925925925925926,
13
- "grad_norm": 10.913461685180664,
14
  "learning_rate": 5e-07,
15
  "logits/chosen": -4.444676399230957,
16
  "logits/rejected": -4.0909342765808105,
@@ -25,7 +25,7 @@
25
  },
26
  {
27
  "epoch": 0.0011851851851851852,
28
- "grad_norm": 7.564138412475586,
29
  "learning_rate": 4.849231551964771e-07,
30
  "logits/chosen": -4.530362606048584,
31
  "logits/rejected": -3.981240749359131,
@@ -40,122 +40,122 @@
40
  },
41
  {
42
  "epoch": 0.0017777777777777779,
43
- "grad_norm": 14.393203735351562,
44
  "learning_rate": 4.415111107797445e-07,
45
- "logits/chosen": -4.539660930633545,
46
- "logits/rejected": -4.32346248626709,
47
- "logps/chosen": -200.69912719726562,
48
- "logps/rejected": -224.77163696289062,
49
- "loss": 0.6526,
50
  "rewards/accuracies": 1.0,
51
- "rewards/chosen": 0.04995880275964737,
52
- "rewards/margins": 0.08375511318445206,
53
- "rewards/rejected": -0.03379631042480469,
54
  "step": 3
55
  },
56
  {
57
  "epoch": 0.0023703703703703703,
58
- "grad_norm": 12.62684440612793,
59
  "learning_rate": 3.75e-07,
60
- "logits/chosen": -3.9760360717773438,
61
- "logits/rejected": -4.4724321365356445,
62
- "logps/chosen": -239.9991455078125,
63
- "logps/rejected": -202.63072204589844,
64
- "loss": 0.7198,
65
  "rewards/accuracies": 0.0,
66
- "rewards/chosen": -0.041876986622810364,
67
- "rewards/margins": -0.052445217967033386,
68
- "rewards/rejected": 0.01056823693215847,
69
  "step": 4
70
  },
71
  {
72
  "epoch": 0.002962962962962963,
73
- "grad_norm": 6.745312213897705,
74
  "learning_rate": 2.934120444167326e-07,
75
- "logits/chosen": -4.136646270751953,
76
- "logits/rejected": -4.701557159423828,
77
- "logps/chosen": -169.00192260742188,
78
- "logps/rejected": -149.2642059326172,
79
- "loss": 0.6818,
80
- "rewards/accuracies": 0.5,
81
- "rewards/chosen": 0.02975158765912056,
82
- "rewards/margins": 0.023634720593690872,
83
- "rewards/rejected": 0.006116867531090975,
84
  "step": 5
85
  },
86
  {
87
  "epoch": 0.0035555555555555557,
88
- "grad_norm": 13.741002082824707,
89
  "learning_rate": 2.065879555832674e-07,
90
- "logits/chosen": -5.057063579559326,
91
- "logits/rejected": -5.147495746612549,
92
- "logps/chosen": -193.6820526123047,
93
- "logps/rejected": -202.20425415039062,
94
- "loss": 0.6913,
95
  "rewards/accuracies": 0.75,
96
- "rewards/chosen": 0.0083160400390625,
97
- "rewards/margins": 0.004281995818018913,
98
- "rewards/rejected": 0.0040340423583984375,
99
  "step": 6
100
  },
101
  {
102
  "epoch": 0.004148148148148148,
103
- "grad_norm": 6.920108795166016,
104
  "learning_rate": 1.2500000000000005e-07,
105
- "logits/chosen": -4.2448506355285645,
106
- "logits/rejected": -3.682173490524292,
107
- "logps/chosen": -176.11734008789062,
108
- "logps/rejected": -222.982666015625,
109
- "loss": 0.7008,
110
- "rewards/accuracies": 0.0,
111
- "rewards/chosen": 0.028499603271484375,
112
- "rewards/margins": -0.015093998052179813,
113
- "rewards/rejected": 0.043593600392341614,
114
  "step": 7
115
  },
116
  {
117
  "epoch": 0.004740740740740741,
118
- "grad_norm": 10.856575965881348,
119
  "learning_rate": 5.848888922025552e-08,
120
- "logits/chosen": -4.413946151733398,
121
- "logits/rejected": -4.419940948486328,
122
- "logps/chosen": -181.99610900878906,
123
- "logps/rejected": -196.20811462402344,
124
- "loss": 0.6967,
125
- "rewards/accuracies": 0.25,
126
- "rewards/chosen": -0.02880115620791912,
127
- "rewards/margins": -0.007002831436693668,
128
- "rewards/rejected": -0.021798323839902878,
129
  "step": 8
130
  },
131
  {
132
  "epoch": 0.005333333333333333,
133
- "grad_norm": 9.559263229370117,
134
  "learning_rate": 1.507684480352292e-08,
135
- "logits/chosen": -4.909343242645264,
136
- "logits/rejected": -4.601991176605225,
137
- "logps/chosen": -154.6862030029297,
138
- "logps/rejected": -179.27401733398438,
139
- "loss": 0.6929,
140
- "rewards/accuracies": 0.25,
141
- "rewards/chosen": 0.0032157916575670242,
142
- "rewards/margins": 0.0011837054044008255,
143
- "rewards/rejected": 0.0020320871844887733,
144
  "step": 9
145
  },
146
  {
147
  "epoch": 0.005925925925925926,
148
- "grad_norm": 10.816996574401855,
149
  "learning_rate": 0.0,
150
- "logits/chosen": -3.8037424087524414,
151
- "logits/rejected": -3.683922529220581,
152
- "logps/chosen": -232.978515625,
153
- "logps/rejected": -254.24078369140625,
154
- "loss": 0.7146,
155
  "rewards/accuracies": 0.5,
156
- "rewards/chosen": -0.05405044183135033,
157
- "rewards/margins": -0.040559008717536926,
158
- "rewards/rejected": -0.013491439633071423,
159
  "step": 10
160
  }
161
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.0005925925925925926,
13
+ "grad_norm": 10.896183013916016,
14
  "learning_rate": 5e-07,
15
  "logits/chosen": -4.444676399230957,
16
  "logits/rejected": -4.0909342765808105,
 
25
  },
26
  {
27
  "epoch": 0.0011851851851851852,
28
+ "grad_norm": 7.562353610992432,
29
  "learning_rate": 4.849231551964771e-07,
30
  "logits/chosen": -4.530362606048584,
31
  "logits/rejected": -3.981240749359131,
 
40
  },
41
  {
42
  "epoch": 0.0017777777777777779,
43
+ "grad_norm": 13.934762001037598,
44
  "learning_rate": 4.415111107797445e-07,
45
+ "logits/chosen": -4.541503429412842,
46
+ "logits/rejected": -4.324827194213867,
47
+ "logps/chosen": -200.9736328125,
48
+ "logps/rejected": -224.6981658935547,
49
+ "loss": 0.6691,
50
  "rewards/accuracies": 1.0,
51
+ "rewards/chosen": 0.022507095709443092,
52
+ "rewards/margins": 0.04895630106329918,
53
+ "rewards/rejected": -0.026449203491210938,
54
  "step": 3
55
  },
56
  {
57
  "epoch": 0.0023703703703703703,
58
+ "grad_norm": 13.249699592590332,
59
  "learning_rate": 3.75e-07,
60
+ "logits/chosen": -3.9642434120178223,
61
+ "logits/rejected": -4.461881160736084,
62
+ "logps/chosen": -239.5960693359375,
63
+ "logps/rejected": -202.10887145996094,
64
+ "loss": 0.7259,
65
  "rewards/accuracies": 0.0,
66
+ "rewards/chosen": -0.001570509746670723,
67
+ "rewards/margins": -0.06432266533374786,
68
+ "rewards/rejected": 0.06275215744972229,
69
  "step": 4
70
  },
71
  {
72
  "epoch": 0.002962962962962963,
73
+ "grad_norm": 6.773025989532471,
74
  "learning_rate": 2.934120444167326e-07,
75
+ "logits/chosen": -4.136728286743164,
76
+ "logits/rejected": -4.700671672821045,
77
+ "logps/chosen": -169.1282958984375,
78
+ "logps/rejected": -149.28570556640625,
79
+ "loss": 0.6866,
80
+ "rewards/accuracies": 0.75,
81
+ "rewards/chosen": 0.017114639282226562,
82
+ "rewards/margins": 0.013147544115781784,
83
+ "rewards/rejected": 0.003967094700783491,
84
  "step": 5
85
  },
86
  {
87
  "epoch": 0.0035555555555555557,
88
+ "grad_norm": 13.418476104736328,
89
  "learning_rate": 2.065879555832674e-07,
90
+ "logits/chosen": -5.0562872886657715,
91
+ "logits/rejected": -5.145793914794922,
92
+ "logps/chosen": -194.10568237304688,
93
+ "logps/rejected": -202.68751525878906,
94
+ "loss": 0.6884,
95
  "rewards/accuracies": 0.75,
96
+ "rewards/chosen": -0.034047700464725494,
97
+ "rewards/margins": 0.01024474948644638,
98
+ "rewards/rejected": -0.044292449951171875,
99
  "step": 6
100
  },
101
  {
102
  "epoch": 0.004148148148148148,
103
+ "grad_norm": 6.973136901855469,
104
  "learning_rate": 1.2500000000000005e-07,
105
+ "logits/chosen": -4.244935035705566,
106
+ "logits/rejected": -3.681328058242798,
107
+ "logps/chosen": -176.23780822753906,
108
+ "logps/rejected": -222.976318359375,
109
+ "loss": 0.7073,
110
+ "rewards/accuracies": 0.5,
111
+ "rewards/chosen": 0.016452789306640625,
112
+ "rewards/margins": -0.027773665264248848,
113
+ "rewards/rejected": 0.044226452708244324,
114
  "step": 7
115
  },
116
  {
117
  "epoch": 0.004740740740740741,
118
+ "grad_norm": 10.38602066040039,
119
  "learning_rate": 5.848888922025552e-08,
120
+ "logits/chosen": -4.409693241119385,
121
+ "logits/rejected": -4.41237735748291,
122
+ "logps/chosen": -181.64944458007812,
123
+ "logps/rejected": -196.00564575195312,
124
+ "loss": 0.6896,
125
+ "rewards/accuracies": 0.5,
126
+ "rewards/chosen": 0.005864143371582031,
127
+ "rewards/margins": 0.007416536100208759,
128
+ "rewards/rejected": -0.001552392728626728,
129
  "step": 8
130
  },
131
  {
132
  "epoch": 0.005333333333333333,
133
+ "grad_norm": 9.169404983520508,
134
  "learning_rate": 1.507684480352292e-08,
135
+ "logits/chosen": -4.903069972991943,
136
+ "logits/rejected": -4.5977277755737305,
137
+ "logps/chosen": -154.88751220703125,
138
+ "logps/rejected": -179.42715454101562,
139
+ "loss": 0.695,
140
+ "rewards/accuracies": 0.5,
141
+ "rewards/chosen": -0.016914749518036842,
142
+ "rewards/margins": -0.003633500775322318,
143
+ "rewards/rejected": -0.013281249441206455,
144
  "step": 9
145
  },
146
  {
147
  "epoch": 0.005925925925925926,
148
+ "grad_norm": 10.337571144104004,
149
  "learning_rate": 0.0,
150
+ "logits/chosen": -3.817504644393921,
151
+ "logits/rejected": -3.697187900543213,
152
+ "logps/chosen": -233.053466796875,
153
+ "logps/rejected": -254.43130493164062,
154
+ "loss": 0.7085,
155
  "rewards/accuracies": 0.5,
156
+ "rewards/chosen": -0.06154556944966316,
157
+ "rewards/margins": -0.029002761468291283,
158
+ "rewards/rejected": -0.03254280239343643,
159
  "step": 10
160
  }
161
  ],
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:500a27bb31e08eab12d9fb299fbdfd1af99d44a30059f736748d9f929e4cbfa9
3
  size 5944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:265f72a5d7c9bd4d95478bba01ec7511ef89dd45479699ac483e45fdedd76cef
3
  size 5944