CooperW's picture
Training in progress, step 50, checkpoint
8e292e3 verified
raw
history blame
9.34 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.03790570954750059,
"eval_steps": 500,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007581141909500118,
"grad_norm": 1.77621328830719,
"learning_rate": 2e-05,
"loss": 1.4596,
"step": 1
},
{
"epoch": 0.0015162283819000236,
"grad_norm": 1.7604256868362427,
"learning_rate": 4e-05,
"loss": 1.4375,
"step": 2
},
{
"epoch": 0.0022743425728500356,
"grad_norm": 0.8544202446937561,
"learning_rate": 6e-05,
"loss": 1.3992,
"step": 3
},
{
"epoch": 0.0030324567638000473,
"grad_norm": 0.7290700078010559,
"learning_rate": 8e-05,
"loss": 1.2861,
"step": 4
},
{
"epoch": 0.0037905709547500594,
"grad_norm": 0.6216087341308594,
"learning_rate": 0.0001,
"loss": 1.1906,
"step": 5
},
{
"epoch": 0.004548685145700071,
"grad_norm": 0.6320084929466248,
"learning_rate": 0.00012,
"loss": 1.0423,
"step": 6
},
{
"epoch": 0.005306799336650083,
"grad_norm": 0.620789110660553,
"learning_rate": 0.00014,
"loss": 0.8656,
"step": 7
},
{
"epoch": 0.006064913527600095,
"grad_norm": 0.509087085723877,
"learning_rate": 0.00016,
"loss": 0.6594,
"step": 8
},
{
"epoch": 0.006823027718550106,
"grad_norm": 1.0617144107818604,
"learning_rate": 0.00018,
"loss": 0.5612,
"step": 9
},
{
"epoch": 0.007581141909500119,
"grad_norm": 0.43339547514915466,
"learning_rate": 0.0002,
"loss": 0.3805,
"step": 10
},
{
"epoch": 0.00833925610045013,
"grad_norm": 0.4575250744819641,
"learning_rate": 0.0001999390827019096,
"loss": 0.3066,
"step": 11
},
{
"epoch": 0.009097370291400142,
"grad_norm": 0.3520224392414093,
"learning_rate": 0.00019975640502598244,
"loss": 0.2506,
"step": 12
},
{
"epoch": 0.009855484482350154,
"grad_norm": 0.13390159606933594,
"learning_rate": 0.00019945218953682734,
"loss": 0.2106,
"step": 13
},
{
"epoch": 0.010613598673300166,
"grad_norm": 0.13238006830215454,
"learning_rate": 0.00019902680687415705,
"loss": 0.2027,
"step": 14
},
{
"epoch": 0.011371712864250177,
"grad_norm": 0.1745949536561966,
"learning_rate": 0.00019848077530122083,
"loss": 0.178,
"step": 15
},
{
"epoch": 0.01212982705520019,
"grad_norm": 0.1583506315946579,
"learning_rate": 0.00019781476007338058,
"loss": 0.1865,
"step": 16
},
{
"epoch": 0.012887941246150201,
"grad_norm": 0.10310098528862,
"learning_rate": 0.00019702957262759965,
"loss": 0.1722,
"step": 17
},
{
"epoch": 0.013646055437100213,
"grad_norm": 0.08868539333343506,
"learning_rate": 0.0001961261695938319,
"loss": 0.1685,
"step": 18
},
{
"epoch": 0.014404169628050224,
"grad_norm": 0.06325000524520874,
"learning_rate": 0.00019510565162951537,
"loss": 0.1596,
"step": 19
},
{
"epoch": 0.015162283819000238,
"grad_norm": 0.07059483975172043,
"learning_rate": 0.00019396926207859084,
"loss": 0.1617,
"step": 20
},
{
"epoch": 0.015920398009950248,
"grad_norm": 0.067365363240242,
"learning_rate": 0.00019271838545667876,
"loss": 0.1517,
"step": 21
},
{
"epoch": 0.01667851220090026,
"grad_norm": 0.07025124877691269,
"learning_rate": 0.0001913545457642601,
"loss": 0.1639,
"step": 22
},
{
"epoch": 0.01743662639185027,
"grad_norm": 0.06402155756950378,
"learning_rate": 0.0001898794046299167,
"loss": 0.1505,
"step": 23
},
{
"epoch": 0.018194740582800285,
"grad_norm": 0.074811652302742,
"learning_rate": 0.00018829475928589271,
"loss": 0.1481,
"step": 24
},
{
"epoch": 0.018952854773750295,
"grad_norm": 0.07151901721954346,
"learning_rate": 0.00018660254037844388,
"loss": 0.1595,
"step": 25
},
{
"epoch": 0.019710968964700308,
"grad_norm": 0.06124258041381836,
"learning_rate": 0.0001848048096156426,
"loss": 0.146,
"step": 26
},
{
"epoch": 0.02046908315565032,
"grad_norm": 0.06529385596513748,
"learning_rate": 0.00018290375725550417,
"loss": 0.1358,
"step": 27
},
{
"epoch": 0.02122719734660033,
"grad_norm": 0.060589537024497986,
"learning_rate": 0.00018090169943749476,
"loss": 0.1424,
"step": 28
},
{
"epoch": 0.021985311537550345,
"grad_norm": 0.06462966650724411,
"learning_rate": 0.00017880107536067218,
"loss": 0.1393,
"step": 29
},
{
"epoch": 0.022743425728500355,
"grad_norm": 0.06316369026899338,
"learning_rate": 0.0001766044443118978,
"loss": 0.1221,
"step": 30
},
{
"epoch": 0.02350153991945037,
"grad_norm": 0.06809717416763306,
"learning_rate": 0.00017431448254773944,
"loss": 0.1295,
"step": 31
},
{
"epoch": 0.02425965411040038,
"grad_norm": 0.055948369204998016,
"learning_rate": 0.0001719339800338651,
"loss": 0.1316,
"step": 32
},
{
"epoch": 0.025017768301350392,
"grad_norm": 0.06291463971138,
"learning_rate": 0.00016946583704589973,
"loss": 0.113,
"step": 33
},
{
"epoch": 0.025775882492300402,
"grad_norm": 0.06223299354314804,
"learning_rate": 0.00016691306063588583,
"loss": 0.1208,
"step": 34
},
{
"epoch": 0.026533996683250415,
"grad_norm": 0.06080786883831024,
"learning_rate": 0.00016427876096865394,
"loss": 0.1167,
"step": 35
},
{
"epoch": 0.027292110874200425,
"grad_norm": 0.052656594663858414,
"learning_rate": 0.0001615661475325658,
"loss": 0.1205,
"step": 36
},
{
"epoch": 0.02805022506515044,
"grad_norm": 0.05521203577518463,
"learning_rate": 0.00015877852522924732,
"loss": 0.1292,
"step": 37
},
{
"epoch": 0.02880833925610045,
"grad_norm": 0.0701892152428627,
"learning_rate": 0.0001559192903470747,
"loss": 0.1186,
"step": 38
},
{
"epoch": 0.029566453447050462,
"grad_norm": 0.06418278068304062,
"learning_rate": 0.0001529919264233205,
"loss": 0.1193,
"step": 39
},
{
"epoch": 0.030324567638000476,
"grad_norm": 0.0714295357465744,
"learning_rate": 0.00015000000000000001,
"loss": 0.1225,
"step": 40
},
{
"epoch": 0.031082681828950486,
"grad_norm": 0.05452275648713112,
"learning_rate": 0.00014694715627858908,
"loss": 0.1093,
"step": 41
},
{
"epoch": 0.031840796019900496,
"grad_norm": 0.08698414266109467,
"learning_rate": 0.00014383711467890774,
"loss": 0.1107,
"step": 42
},
{
"epoch": 0.03259891021085051,
"grad_norm": 0.07267863303422928,
"learning_rate": 0.00014067366430758004,
"loss": 0.1195,
"step": 43
},
{
"epoch": 0.03335702440180052,
"grad_norm": 0.07162914425134659,
"learning_rate": 0.00013746065934159123,
"loss": 0.1146,
"step": 44
},
{
"epoch": 0.03411513859275053,
"grad_norm": 0.05877028778195381,
"learning_rate": 0.00013420201433256689,
"loss": 0.1096,
"step": 45
},
{
"epoch": 0.03487325278370054,
"grad_norm": 0.0560929961502552,
"learning_rate": 0.00013090169943749476,
"loss": 0.1202,
"step": 46
},
{
"epoch": 0.03563136697465056,
"grad_norm": 0.07029124349355698,
"learning_rate": 0.0001275637355816999,
"loss": 0.1029,
"step": 47
},
{
"epoch": 0.03638948116560057,
"grad_norm": 0.043535735458135605,
"learning_rate": 0.00012419218955996676,
"loss": 0.1079,
"step": 48
},
{
"epoch": 0.03714759535655058,
"grad_norm": 0.05880381911993027,
"learning_rate": 0.00012079116908177593,
"loss": 0.1223,
"step": 49
},
{
"epoch": 0.03790570954750059,
"grad_norm": 0.05926327407360077,
"learning_rate": 0.00011736481776669306,
"loss": 0.116,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.812811709692314e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}