CooperW's picture
Training in progress, step 100, checkpoint
86ab12d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0379066075955365,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000379066075955365,
"grad_norm": 2.788883924484253,
"learning_rate": 2e-05,
"loss": 1.4892,
"step": 1
},
{
"epoch": 0.00075813215191073,
"grad_norm": 2.803041934967041,
"learning_rate": 4e-05,
"loss": 1.4967,
"step": 2
},
{
"epoch": 0.001137198227866095,
"grad_norm": 2.5324907302856445,
"learning_rate": 6e-05,
"loss": 1.3553,
"step": 3
},
{
"epoch": 0.00151626430382146,
"grad_norm": 1.693648099899292,
"learning_rate": 8e-05,
"loss": 0.983,
"step": 4
},
{
"epoch": 0.0018953303797768248,
"grad_norm": 1.2373262643814087,
"learning_rate": 0.0001,
"loss": 0.6743,
"step": 5
},
{
"epoch": 0.00227439645573219,
"grad_norm": 0.9896576404571533,
"learning_rate": 0.00012,
"loss": 0.4505,
"step": 6
},
{
"epoch": 0.0026534625316875548,
"grad_norm": 1.0764743089675903,
"learning_rate": 0.00014,
"loss": 0.3412,
"step": 7
},
{
"epoch": 0.00303252860764292,
"grad_norm": 0.4975992739200592,
"learning_rate": 0.00016,
"loss": 0.2131,
"step": 8
},
{
"epoch": 0.0034115946835982847,
"grad_norm": 0.3490230143070221,
"learning_rate": 0.00018,
"loss": 0.1971,
"step": 9
},
{
"epoch": 0.0037906607595536495,
"grad_norm": 0.2667368948459625,
"learning_rate": 0.0002,
"loss": 0.1833,
"step": 10
},
{
"epoch": 0.004169726835509015,
"grad_norm": 0.25213170051574707,
"learning_rate": 0.0001999390827019096,
"loss": 0.1652,
"step": 11
},
{
"epoch": 0.00454879291146438,
"grad_norm": 0.18969492614269257,
"learning_rate": 0.00019975640502598244,
"loss": 0.1606,
"step": 12
},
{
"epoch": 0.004927858987419744,
"grad_norm": 0.15731994807720184,
"learning_rate": 0.00019945218953682734,
"loss": 0.152,
"step": 13
},
{
"epoch": 0.0053069250633751095,
"grad_norm": 0.14271649718284607,
"learning_rate": 0.00019902680687415705,
"loss": 0.1435,
"step": 14
},
{
"epoch": 0.005685991139330475,
"grad_norm": 0.16122481226921082,
"learning_rate": 0.00019848077530122083,
"loss": 0.1609,
"step": 15
},
{
"epoch": 0.00606505721528584,
"grad_norm": 0.15835386514663696,
"learning_rate": 0.00019781476007338058,
"loss": 0.1444,
"step": 16
},
{
"epoch": 0.006444123291241204,
"grad_norm": 0.11050277203321457,
"learning_rate": 0.00019702957262759965,
"loss": 0.128,
"step": 17
},
{
"epoch": 0.0068231893671965695,
"grad_norm": 0.12347878515720367,
"learning_rate": 0.0001961261695938319,
"loss": 0.1326,
"step": 18
},
{
"epoch": 0.007202255443151935,
"grad_norm": 0.09975384920835495,
"learning_rate": 0.00019510565162951537,
"loss": 0.1187,
"step": 19
},
{
"epoch": 0.007581321519107299,
"grad_norm": 0.1068282350897789,
"learning_rate": 0.00019396926207859084,
"loss": 0.1202,
"step": 20
},
{
"epoch": 0.007960387595062665,
"grad_norm": 0.1021302118897438,
"learning_rate": 0.00019271838545667876,
"loss": 0.1297,
"step": 21
},
{
"epoch": 0.00833945367101803,
"grad_norm": 0.09192782640457153,
"learning_rate": 0.0001913545457642601,
"loss": 0.1227,
"step": 22
},
{
"epoch": 0.008718519746973394,
"grad_norm": 0.07424570620059967,
"learning_rate": 0.0001898794046299167,
"loss": 0.1104,
"step": 23
},
{
"epoch": 0.00909758582292876,
"grad_norm": 0.09622485190629959,
"learning_rate": 0.00018829475928589271,
"loss": 0.1279,
"step": 24
},
{
"epoch": 0.009476651898884124,
"grad_norm": 0.08250657469034195,
"learning_rate": 0.00018660254037844388,
"loss": 0.1152,
"step": 25
},
{
"epoch": 0.009855717974839489,
"grad_norm": 0.09128892421722412,
"learning_rate": 0.0001848048096156426,
"loss": 0.1072,
"step": 26
},
{
"epoch": 0.010234784050794855,
"grad_norm": 0.11655224114656448,
"learning_rate": 0.00018290375725550417,
"loss": 0.1156,
"step": 27
},
{
"epoch": 0.010613850126750219,
"grad_norm": 0.09212372452020645,
"learning_rate": 0.00018090169943749476,
"loss": 0.107,
"step": 28
},
{
"epoch": 0.010992916202705583,
"grad_norm": 0.1035194918513298,
"learning_rate": 0.00017880107536067218,
"loss": 0.1169,
"step": 29
},
{
"epoch": 0.01137198227866095,
"grad_norm": 0.09886885434389114,
"learning_rate": 0.0001766044443118978,
"loss": 0.114,
"step": 30
},
{
"epoch": 0.011751048354616314,
"grad_norm": 0.07283884286880493,
"learning_rate": 0.00017431448254773944,
"loss": 0.0962,
"step": 31
},
{
"epoch": 0.01213011443057168,
"grad_norm": 0.09377507120370865,
"learning_rate": 0.0001719339800338651,
"loss": 0.0925,
"step": 32
},
{
"epoch": 0.012509180506527044,
"grad_norm": 0.09097617864608765,
"learning_rate": 0.00016946583704589973,
"loss": 0.1045,
"step": 33
},
{
"epoch": 0.012888246582482409,
"grad_norm": 0.10535702854394913,
"learning_rate": 0.00016691306063588583,
"loss": 0.1005,
"step": 34
},
{
"epoch": 0.013267312658437775,
"grad_norm": 0.09806367754936218,
"learning_rate": 0.00016427876096865394,
"loss": 0.102,
"step": 35
},
{
"epoch": 0.013646378734393139,
"grad_norm": 0.09789954870939255,
"learning_rate": 0.0001615661475325658,
"loss": 0.0955,
"step": 36
},
{
"epoch": 0.014025444810348503,
"grad_norm": 0.1131892129778862,
"learning_rate": 0.00015877852522924732,
"loss": 0.0853,
"step": 37
},
{
"epoch": 0.01440451088630387,
"grad_norm": 0.08628207445144653,
"learning_rate": 0.0001559192903470747,
"loss": 0.0901,
"step": 38
},
{
"epoch": 0.014783576962259234,
"grad_norm": 0.11414898931980133,
"learning_rate": 0.0001529919264233205,
"loss": 0.0993,
"step": 39
},
{
"epoch": 0.015162643038214598,
"grad_norm": 0.07424012571573257,
"learning_rate": 0.00015000000000000001,
"loss": 0.1037,
"step": 40
},
{
"epoch": 0.015541709114169964,
"grad_norm": 0.08380600810050964,
"learning_rate": 0.00014694715627858908,
"loss": 0.0839,
"step": 41
},
{
"epoch": 0.01592077519012533,
"grad_norm": 0.06833647936582565,
"learning_rate": 0.00014383711467890774,
"loss": 0.0916,
"step": 42
},
{
"epoch": 0.016299841266080695,
"grad_norm": 0.06410624086856842,
"learning_rate": 0.00014067366430758004,
"loss": 0.0845,
"step": 43
},
{
"epoch": 0.01667890734203606,
"grad_norm": 0.06120501831173897,
"learning_rate": 0.00013746065934159123,
"loss": 0.0812,
"step": 44
},
{
"epoch": 0.017057973417991423,
"grad_norm": 0.06658154726028442,
"learning_rate": 0.00013420201433256689,
"loss": 0.0868,
"step": 45
},
{
"epoch": 0.017437039493946788,
"grad_norm": 0.07054657489061356,
"learning_rate": 0.00013090169943749476,
"loss": 0.0885,
"step": 46
},
{
"epoch": 0.017816105569902152,
"grad_norm": 0.05874604731798172,
"learning_rate": 0.0001275637355816999,
"loss": 0.0845,
"step": 47
},
{
"epoch": 0.01819517164585752,
"grad_norm": 0.07008881121873856,
"learning_rate": 0.00012419218955996676,
"loss": 0.0775,
"step": 48
},
{
"epoch": 0.018574237721812884,
"grad_norm": 0.07047414779663086,
"learning_rate": 0.00012079116908177593,
"loss": 0.0839,
"step": 49
},
{
"epoch": 0.01895330379776825,
"grad_norm": 0.07158780097961426,
"learning_rate": 0.00011736481776669306,
"loss": 0.0993,
"step": 50
},
{
"epoch": 0.019332369873723613,
"grad_norm": 0.06991136819124222,
"learning_rate": 0.00011391731009600654,
"loss": 0.0873,
"step": 51
},
{
"epoch": 0.019711435949678977,
"grad_norm": 0.06695719808340073,
"learning_rate": 0.00011045284632676536,
"loss": 0.0876,
"step": 52
},
{
"epoch": 0.020090502025634345,
"grad_norm": 0.07722793519496918,
"learning_rate": 0.00010697564737441252,
"loss": 0.0977,
"step": 53
},
{
"epoch": 0.02046956810158971,
"grad_norm": 0.06521150469779968,
"learning_rate": 0.00010348994967025012,
"loss": 0.0856,
"step": 54
},
{
"epoch": 0.020848634177545074,
"grad_norm": 0.07508374005556107,
"learning_rate": 0.0001,
"loss": 0.0876,
"step": 55
},
{
"epoch": 0.021227700253500438,
"grad_norm": 0.05650763958692551,
"learning_rate": 9.651005032974994e-05,
"loss": 0.0837,
"step": 56
},
{
"epoch": 0.021606766329455802,
"grad_norm": 0.07390966266393661,
"learning_rate": 9.302435262558747e-05,
"loss": 0.0879,
"step": 57
},
{
"epoch": 0.021985832405411167,
"grad_norm": 0.058437567204236984,
"learning_rate": 8.954715367323468e-05,
"loss": 0.0773,
"step": 58
},
{
"epoch": 0.022364898481366535,
"grad_norm": 0.06248459964990616,
"learning_rate": 8.608268990399349e-05,
"loss": 0.0788,
"step": 59
},
{
"epoch": 0.0227439645573219,
"grad_norm": 0.0653211697936058,
"learning_rate": 8.263518223330697e-05,
"loss": 0.0855,
"step": 60
},
{
"epoch": 0.023123030633277263,
"grad_norm": 0.062243081629276276,
"learning_rate": 7.920883091822408e-05,
"loss": 0.0886,
"step": 61
},
{
"epoch": 0.023502096709232628,
"grad_norm": 0.055676043033599854,
"learning_rate": 7.580781044003324e-05,
"loss": 0.0897,
"step": 62
},
{
"epoch": 0.023881162785187992,
"grad_norm": 0.05823361128568649,
"learning_rate": 7.243626441830009e-05,
"loss": 0.0753,
"step": 63
},
{
"epoch": 0.02426022886114336,
"grad_norm": 0.047967329621315,
"learning_rate": 6.909830056250527e-05,
"loss": 0.0804,
"step": 64
},
{
"epoch": 0.024639294937098724,
"grad_norm": 0.04651477187871933,
"learning_rate": 6.579798566743314e-05,
"loss": 0.0737,
"step": 65
},
{
"epoch": 0.02501836101305409,
"grad_norm": 0.06440019607543945,
"learning_rate": 6.25393406584088e-05,
"loss": 0.0931,
"step": 66
},
{
"epoch": 0.025397427089009453,
"grad_norm": 0.0673041045665741,
"learning_rate": 5.9326335692419995e-05,
"loss": 0.0926,
"step": 67
},
{
"epoch": 0.025776493164964817,
"grad_norm": 0.06460334360599518,
"learning_rate": 5.616288532109225e-05,
"loss": 0.1055,
"step": 68
},
{
"epoch": 0.02615555924092018,
"grad_norm": 0.06001191958785057,
"learning_rate": 5.305284372141095e-05,
"loss": 0.0815,
"step": 69
},
{
"epoch": 0.02653462531687555,
"grad_norm": 0.05296599864959717,
"learning_rate": 5.000000000000002e-05,
"loss": 0.0837,
"step": 70
},
{
"epoch": 0.026913691392830914,
"grad_norm": 0.0675460696220398,
"learning_rate": 4.700807357667952e-05,
"loss": 0.0848,
"step": 71
},
{
"epoch": 0.027292757468786278,
"grad_norm": 0.05409557744860649,
"learning_rate": 4.4080709652925336e-05,
"loss": 0.0779,
"step": 72
},
{
"epoch": 0.027671823544741642,
"grad_norm": 0.05015251785516739,
"learning_rate": 4.12214747707527e-05,
"loss": 0.0788,
"step": 73
},
{
"epoch": 0.028050889620697007,
"grad_norm": 0.05301973596215248,
"learning_rate": 3.843385246743417e-05,
"loss": 0.0764,
"step": 74
},
{
"epoch": 0.02842995569665237,
"grad_norm": 0.05640785023570061,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.0867,
"step": 75
},
{
"epoch": 0.02880902177260774,
"grad_norm": 0.055719390511512756,
"learning_rate": 3.308693936411421e-05,
"loss": 0.0788,
"step": 76
},
{
"epoch": 0.029188087848563103,
"grad_norm": 0.06163398548960686,
"learning_rate": 3.053416295410026e-05,
"loss": 0.0807,
"step": 77
},
{
"epoch": 0.029567153924518468,
"grad_norm": 0.06289924681186676,
"learning_rate": 2.8066019966134904e-05,
"loss": 0.0808,
"step": 78
},
{
"epoch": 0.029946220000473832,
"grad_norm": 0.056900832802057266,
"learning_rate": 2.5685517452260567e-05,
"loss": 0.0762,
"step": 79
},
{
"epoch": 0.030325286076429196,
"grad_norm": 0.06545019149780273,
"learning_rate": 2.339555568810221e-05,
"loss": 0.0929,
"step": 80
},
{
"epoch": 0.030704352152384564,
"grad_norm": 0.060016706585884094,
"learning_rate": 2.119892463932781e-05,
"loss": 0.0752,
"step": 81
},
{
"epoch": 0.03108341822833993,
"grad_norm": 0.055878885090351105,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.0753,
"step": 82
},
{
"epoch": 0.03146248430429529,
"grad_norm": 0.06015196815133095,
"learning_rate": 1.7096242744495837e-05,
"loss": 0.086,
"step": 83
},
{
"epoch": 0.03184155038025066,
"grad_norm": 0.05178290605545044,
"learning_rate": 1.5195190384357404e-05,
"loss": 0.0793,
"step": 84
},
{
"epoch": 0.032220616456206025,
"grad_norm": 0.05161258578300476,
"learning_rate": 1.339745962155613e-05,
"loss": 0.0809,
"step": 85
},
{
"epoch": 0.03259968253216139,
"grad_norm": 0.05504770204424858,
"learning_rate": 1.1705240714107302e-05,
"loss": 0.0772,
"step": 86
},
{
"epoch": 0.032978748608116754,
"grad_norm": 0.05566380172967911,
"learning_rate": 1.0120595370083318e-05,
"loss": 0.0726,
"step": 87
},
{
"epoch": 0.03335781468407212,
"grad_norm": 0.05389956384897232,
"learning_rate": 8.645454235739903e-06,
"loss": 0.0779,
"step": 88
},
{
"epoch": 0.03373688076002748,
"grad_norm": 0.05445749685168266,
"learning_rate": 7.281614543321269e-06,
"loss": 0.076,
"step": 89
},
{
"epoch": 0.03411594683598285,
"grad_norm": 0.04557236656546593,
"learning_rate": 6.030737921409169e-06,
"loss": 0.075,
"step": 90
},
{
"epoch": 0.03449501291193821,
"grad_norm": 0.050466809421777725,
"learning_rate": 4.8943483704846475e-06,
"loss": 0.0747,
"step": 91
},
{
"epoch": 0.034874078987893575,
"grad_norm": 0.04975885897874832,
"learning_rate": 3.873830406168111e-06,
"loss": 0.0746,
"step": 92
},
{
"epoch": 0.03525314506384894,
"grad_norm": 0.055442556738853455,
"learning_rate": 2.970427372400353e-06,
"loss": 0.0807,
"step": 93
},
{
"epoch": 0.035632211139804304,
"grad_norm": 0.05043969675898552,
"learning_rate": 2.1852399266194314e-06,
"loss": 0.0741,
"step": 94
},
{
"epoch": 0.036011277215759675,
"grad_norm": 0.05411124229431152,
"learning_rate": 1.5192246987791981e-06,
"loss": 0.0741,
"step": 95
},
{
"epoch": 0.03639034329171504,
"grad_norm": 0.06104011833667755,
"learning_rate": 9.731931258429638e-07,
"loss": 0.0822,
"step": 96
},
{
"epoch": 0.036769409367670404,
"grad_norm": 0.05103430524468422,
"learning_rate": 5.478104631726711e-07,
"loss": 0.0716,
"step": 97
},
{
"epoch": 0.03714847544362577,
"grad_norm": 0.057701822370290756,
"learning_rate": 2.4359497401758024e-07,
"loss": 0.0767,
"step": 98
},
{
"epoch": 0.03752754151958113,
"grad_norm": 0.05738105624914169,
"learning_rate": 6.09172980904238e-08,
"loss": 0.0837,
"step": 99
},
{
"epoch": 0.0379066075955365,
"grad_norm": 0.05406734347343445,
"learning_rate": 0.0,
"loss": 0.0856,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.888051858648392e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}