|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.07581141909500118, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007581141909500118, |
|
"grad_norm": 1.77621328830719, |
|
"learning_rate": 2e-05, |
|
"loss": 1.4596, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0015162283819000236, |
|
"grad_norm": 1.7604256868362427, |
|
"learning_rate": 4e-05, |
|
"loss": 1.4375, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0022743425728500356, |
|
"grad_norm": 0.8544202446937561, |
|
"learning_rate": 6e-05, |
|
"loss": 1.3992, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0030324567638000473, |
|
"grad_norm": 0.7290700078010559, |
|
"learning_rate": 8e-05, |
|
"loss": 1.2861, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0037905709547500594, |
|
"grad_norm": 0.6216087341308594, |
|
"learning_rate": 0.0001, |
|
"loss": 1.1906, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004548685145700071, |
|
"grad_norm": 0.6320084929466248, |
|
"learning_rate": 0.00012, |
|
"loss": 1.0423, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005306799336650083, |
|
"grad_norm": 0.620789110660553, |
|
"learning_rate": 0.00014, |
|
"loss": 0.8656, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006064913527600095, |
|
"grad_norm": 0.509087085723877, |
|
"learning_rate": 0.00016, |
|
"loss": 0.6594, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.006823027718550106, |
|
"grad_norm": 1.0617144107818604, |
|
"learning_rate": 0.00018, |
|
"loss": 0.5612, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007581141909500119, |
|
"grad_norm": 0.43339547514915466, |
|
"learning_rate": 0.0002, |
|
"loss": 0.3805, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00833925610045013, |
|
"grad_norm": 0.4575250744819641, |
|
"learning_rate": 0.0001999390827019096, |
|
"loss": 0.3066, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009097370291400142, |
|
"grad_norm": 0.3520224392414093, |
|
"learning_rate": 0.00019975640502598244, |
|
"loss": 0.2506, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.009855484482350154, |
|
"grad_norm": 0.13390159606933594, |
|
"learning_rate": 0.00019945218953682734, |
|
"loss": 0.2106, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.010613598673300166, |
|
"grad_norm": 0.13238006830215454, |
|
"learning_rate": 0.00019902680687415705, |
|
"loss": 0.2027, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011371712864250177, |
|
"grad_norm": 0.1745949536561966, |
|
"learning_rate": 0.00019848077530122083, |
|
"loss": 0.178, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01212982705520019, |
|
"grad_norm": 0.1583506315946579, |
|
"learning_rate": 0.00019781476007338058, |
|
"loss": 0.1865, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.012887941246150201, |
|
"grad_norm": 0.10310098528862, |
|
"learning_rate": 0.00019702957262759965, |
|
"loss": 0.1722, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.013646055437100213, |
|
"grad_norm": 0.08868539333343506, |
|
"learning_rate": 0.0001961261695938319, |
|
"loss": 0.1685, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.014404169628050224, |
|
"grad_norm": 0.06325000524520874, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.1596, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.015162283819000238, |
|
"grad_norm": 0.07059483975172043, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 0.1617, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.015920398009950248, |
|
"grad_norm": 0.067365363240242, |
|
"learning_rate": 0.00019271838545667876, |
|
"loss": 0.1517, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01667851220090026, |
|
"grad_norm": 0.07025124877691269, |
|
"learning_rate": 0.0001913545457642601, |
|
"loss": 0.1639, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01743662639185027, |
|
"grad_norm": 0.06402155756950378, |
|
"learning_rate": 0.0001898794046299167, |
|
"loss": 0.1505, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.018194740582800285, |
|
"grad_norm": 0.074811652302742, |
|
"learning_rate": 0.00018829475928589271, |
|
"loss": 0.1481, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.018952854773750295, |
|
"grad_norm": 0.07151901721954346, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.1595, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.019710968964700308, |
|
"grad_norm": 0.06124258041381836, |
|
"learning_rate": 0.0001848048096156426, |
|
"loss": 0.146, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02046908315565032, |
|
"grad_norm": 0.06529385596513748, |
|
"learning_rate": 0.00018290375725550417, |
|
"loss": 0.1358, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02122719734660033, |
|
"grad_norm": 0.060589537024497986, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.1424, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.021985311537550345, |
|
"grad_norm": 0.06462966650724411, |
|
"learning_rate": 0.00017880107536067218, |
|
"loss": 0.1393, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.022743425728500355, |
|
"grad_norm": 0.06316369026899338, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 0.1221, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02350153991945037, |
|
"grad_norm": 0.06809717416763306, |
|
"learning_rate": 0.00017431448254773944, |
|
"loss": 0.1295, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.02425965411040038, |
|
"grad_norm": 0.055948369204998016, |
|
"learning_rate": 0.0001719339800338651, |
|
"loss": 0.1316, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.025017768301350392, |
|
"grad_norm": 0.06291463971138, |
|
"learning_rate": 0.00016946583704589973, |
|
"loss": 0.113, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.025775882492300402, |
|
"grad_norm": 0.06223299354314804, |
|
"learning_rate": 0.00016691306063588583, |
|
"loss": 0.1208, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.026533996683250415, |
|
"grad_norm": 0.06080786883831024, |
|
"learning_rate": 0.00016427876096865394, |
|
"loss": 0.1167, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.027292110874200425, |
|
"grad_norm": 0.052656594663858414, |
|
"learning_rate": 0.0001615661475325658, |
|
"loss": 0.1205, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02805022506515044, |
|
"grad_norm": 0.05521203577518463, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.1292, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02880833925610045, |
|
"grad_norm": 0.0701892152428627, |
|
"learning_rate": 0.0001559192903470747, |
|
"loss": 0.1186, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.029566453447050462, |
|
"grad_norm": 0.06418278068304062, |
|
"learning_rate": 0.0001529919264233205, |
|
"loss": 0.1193, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.030324567638000476, |
|
"grad_norm": 0.0714295357465744, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.1225, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.031082681828950486, |
|
"grad_norm": 0.05452275648713112, |
|
"learning_rate": 0.00014694715627858908, |
|
"loss": 0.1093, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.031840796019900496, |
|
"grad_norm": 0.08698414266109467, |
|
"learning_rate": 0.00014383711467890774, |
|
"loss": 0.1107, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03259891021085051, |
|
"grad_norm": 0.07267863303422928, |
|
"learning_rate": 0.00014067366430758004, |
|
"loss": 0.1195, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03335702440180052, |
|
"grad_norm": 0.07162914425134659, |
|
"learning_rate": 0.00013746065934159123, |
|
"loss": 0.1146, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03411513859275053, |
|
"grad_norm": 0.05877028778195381, |
|
"learning_rate": 0.00013420201433256689, |
|
"loss": 0.1096, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03487325278370054, |
|
"grad_norm": 0.0560929961502552, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.1202, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03563136697465056, |
|
"grad_norm": 0.07029124349355698, |
|
"learning_rate": 0.0001275637355816999, |
|
"loss": 0.1029, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03638948116560057, |
|
"grad_norm": 0.043535735458135605, |
|
"learning_rate": 0.00012419218955996676, |
|
"loss": 0.1079, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03714759535655058, |
|
"grad_norm": 0.05880381911993027, |
|
"learning_rate": 0.00012079116908177593, |
|
"loss": 0.1223, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03790570954750059, |
|
"grad_norm": 0.05926327407360077, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.116, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.038663823738450606, |
|
"grad_norm": 0.0628269761800766, |
|
"learning_rate": 0.00011391731009600654, |
|
"loss": 0.1068, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.039421937929400616, |
|
"grad_norm": 0.07782096415758133, |
|
"learning_rate": 0.00011045284632676536, |
|
"loss": 0.1124, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.040180052120350626, |
|
"grad_norm": 0.07580537348985672, |
|
"learning_rate": 0.00010697564737441252, |
|
"loss": 0.1038, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.04093816631130064, |
|
"grad_norm": 0.05523601919412613, |
|
"learning_rate": 0.00010348994967025012, |
|
"loss": 0.1, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.04169628050225065, |
|
"grad_norm": 0.056455422192811966, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1046, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.04245439469320066, |
|
"grad_norm": 0.052510254085063934, |
|
"learning_rate": 9.651005032974994e-05, |
|
"loss": 0.1025, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.04321250888415067, |
|
"grad_norm": 0.053201328963041306, |
|
"learning_rate": 9.302435262558747e-05, |
|
"loss": 0.0956, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.04397062307510069, |
|
"grad_norm": 0.05429815128445625, |
|
"learning_rate": 8.954715367323468e-05, |
|
"loss": 0.0915, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0447287372660507, |
|
"grad_norm": 0.05565072223544121, |
|
"learning_rate": 8.608268990399349e-05, |
|
"loss": 0.1037, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.04548685145700071, |
|
"grad_norm": 0.06064201518893242, |
|
"learning_rate": 8.263518223330697e-05, |
|
"loss": 0.0996, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.04624496564795072, |
|
"grad_norm": 0.053189653903245926, |
|
"learning_rate": 7.920883091822408e-05, |
|
"loss": 0.0972, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.04700307983890074, |
|
"grad_norm": 0.0630035251379013, |
|
"learning_rate": 7.580781044003324e-05, |
|
"loss": 0.0957, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.04776119402985075, |
|
"grad_norm": 0.05781060457229614, |
|
"learning_rate": 7.243626441830009e-05, |
|
"loss": 0.1116, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.04851930822080076, |
|
"grad_norm": 0.05752211809158325, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.1015, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.04927742241175077, |
|
"grad_norm": 0.05063620209693909, |
|
"learning_rate": 6.579798566743314e-05, |
|
"loss": 0.1083, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.050035536602700784, |
|
"grad_norm": 0.0517071969807148, |
|
"learning_rate": 6.25393406584088e-05, |
|
"loss": 0.0946, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.050793650793650794, |
|
"grad_norm": 0.04985162615776062, |
|
"learning_rate": 5.9326335692419995e-05, |
|
"loss": 0.1058, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.051551764984600804, |
|
"grad_norm": 0.04708476364612579, |
|
"learning_rate": 5.616288532109225e-05, |
|
"loss": 0.0921, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.05230987917555082, |
|
"grad_norm": 0.05587763711810112, |
|
"learning_rate": 5.305284372141095e-05, |
|
"loss": 0.0927, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.05306799336650083, |
|
"grad_norm": 0.05187711864709854, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.1022, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.05382610755745084, |
|
"grad_norm": 0.05270811542868614, |
|
"learning_rate": 4.700807357667952e-05, |
|
"loss": 0.1022, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.05458422174840085, |
|
"grad_norm": 0.050687965005636215, |
|
"learning_rate": 4.4080709652925336e-05, |
|
"loss": 0.0999, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.05534233593935087, |
|
"grad_norm": 0.04412781447172165, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 0.0989, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.05610045013030088, |
|
"grad_norm": 0.04820292443037033, |
|
"learning_rate": 3.843385246743417e-05, |
|
"loss": 0.0882, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.05685856432125089, |
|
"grad_norm": 0.05101883411407471, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 0.0896, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0576166785122009, |
|
"grad_norm": 0.056556086987257004, |
|
"learning_rate": 3.308693936411421e-05, |
|
"loss": 0.0951, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.058374792703150914, |
|
"grad_norm": 0.05035807937383652, |
|
"learning_rate": 3.053416295410026e-05, |
|
"loss": 0.0911, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.059132906894100924, |
|
"grad_norm": 0.043333761394023895, |
|
"learning_rate": 2.8066019966134904e-05, |
|
"loss": 0.089, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.059891021085050934, |
|
"grad_norm": 0.047586455941200256, |
|
"learning_rate": 2.5685517452260567e-05, |
|
"loss": 0.0874, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.06064913527600095, |
|
"grad_norm": 0.0488692931830883, |
|
"learning_rate": 2.339555568810221e-05, |
|
"loss": 0.0925, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.06140724946695096, |
|
"grad_norm": 0.05427223816514015, |
|
"learning_rate": 2.119892463932781e-05, |
|
"loss": 0.0927, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.06216536365790097, |
|
"grad_norm": 0.057675670832395554, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.0978, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.06292347784885098, |
|
"grad_norm": 0.05663346126675606, |
|
"learning_rate": 1.7096242744495837e-05, |
|
"loss": 0.1041, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.06368159203980099, |
|
"grad_norm": 0.054945193231105804, |
|
"learning_rate": 1.5195190384357404e-05, |
|
"loss": 0.0847, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.064439706230751, |
|
"grad_norm": 0.05341991409659386, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.0877, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.06519782042170102, |
|
"grad_norm": 0.0533662885427475, |
|
"learning_rate": 1.1705240714107302e-05, |
|
"loss": 0.1052, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.06595593461265103, |
|
"grad_norm": 0.045474544167518616, |
|
"learning_rate": 1.0120595370083318e-05, |
|
"loss": 0.0863, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.06671404880360104, |
|
"grad_norm": 0.043375492095947266, |
|
"learning_rate": 8.645454235739903e-06, |
|
"loss": 0.0904, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.06747216299455105, |
|
"grad_norm": 0.05601764842867851, |
|
"learning_rate": 7.281614543321269e-06, |
|
"loss": 0.094, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.06823027718550106, |
|
"grad_norm": 0.0548299178481102, |
|
"learning_rate": 6.030737921409169e-06, |
|
"loss": 0.0857, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.06898839137645107, |
|
"grad_norm": 0.04874192178249359, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 0.0885, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.06974650556740108, |
|
"grad_norm": 0.05287107825279236, |
|
"learning_rate": 3.873830406168111e-06, |
|
"loss": 0.1027, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.0705046197583511, |
|
"grad_norm": 0.04792382940649986, |
|
"learning_rate": 2.970427372400353e-06, |
|
"loss": 0.0849, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.07126273394930112, |
|
"grad_norm": 0.04913964122533798, |
|
"learning_rate": 2.1852399266194314e-06, |
|
"loss": 0.0848, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.07202084814025113, |
|
"grad_norm": 0.04982119798660278, |
|
"learning_rate": 1.5192246987791981e-06, |
|
"loss": 0.0959, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.07277896233120114, |
|
"grad_norm": 0.04819797724485397, |
|
"learning_rate": 9.731931258429638e-07, |
|
"loss": 0.0881, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.07353707652215115, |
|
"grad_norm": 0.050941213965415955, |
|
"learning_rate": 5.478104631726711e-07, |
|
"loss": 0.0957, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.07429519071310116, |
|
"grad_norm": 0.046391792595386505, |
|
"learning_rate": 2.4359497401758024e-07, |
|
"loss": 0.0859, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.07505330490405117, |
|
"grad_norm": 0.0439014732837677, |
|
"learning_rate": 6.09172980904238e-08, |
|
"loss": 0.0963, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.07581141909500118, |
|
"grad_norm": 0.04594559594988823, |
|
"learning_rate": 0.0, |
|
"loss": 0.091, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.162003273287598e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|