CooperW's picture
Training in progress, step 100, checkpoint
dfd3ded verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.64,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0064,
"grad_norm": 2.568751573562622,
"learning_rate": 2e-05,
"loss": 2.1237,
"step": 1
},
{
"epoch": 0.0128,
"grad_norm": 2.210172414779663,
"learning_rate": 4e-05,
"loss": 1.9939,
"step": 2
},
{
"epoch": 0.0192,
"grad_norm": 2.24556040763855,
"learning_rate": 6e-05,
"loss": 1.9278,
"step": 3
},
{
"epoch": 0.0256,
"grad_norm": 1.4301820993423462,
"learning_rate": 8e-05,
"loss": 1.4586,
"step": 4
},
{
"epoch": 0.032,
"grad_norm": 1.2345417737960815,
"learning_rate": 0.0001,
"loss": 1.2717,
"step": 5
},
{
"epoch": 0.0384,
"grad_norm": 1.1178274154663086,
"learning_rate": 0.00012,
"loss": 0.973,
"step": 6
},
{
"epoch": 0.0448,
"grad_norm": 0.8991392850875854,
"learning_rate": 0.00014,
"loss": 0.6798,
"step": 7
},
{
"epoch": 0.0512,
"grad_norm": 1.3577145338058472,
"learning_rate": 0.00016,
"loss": 0.7061,
"step": 8
},
{
"epoch": 0.0576,
"grad_norm": 0.7793099284172058,
"learning_rate": 0.00018,
"loss": 0.4772,
"step": 9
},
{
"epoch": 0.064,
"grad_norm": 0.4328145682811737,
"learning_rate": 0.0002,
"loss": 0.4894,
"step": 10
},
{
"epoch": 0.0704,
"grad_norm": 0.33954325318336487,
"learning_rate": 0.0001999390827019096,
"loss": 0.4181,
"step": 11
},
{
"epoch": 0.0768,
"grad_norm": 0.26666146516799927,
"learning_rate": 0.00019975640502598244,
"loss": 0.3721,
"step": 12
},
{
"epoch": 0.0832,
"grad_norm": 0.2060694545507431,
"learning_rate": 0.00019945218953682734,
"loss": 0.4174,
"step": 13
},
{
"epoch": 0.0896,
"grad_norm": 0.2562263607978821,
"learning_rate": 0.00019902680687415705,
"loss": 0.4412,
"step": 14
},
{
"epoch": 0.096,
"grad_norm": 0.22718767821788788,
"learning_rate": 0.00019848077530122083,
"loss": 0.3749,
"step": 15
},
{
"epoch": 0.1024,
"grad_norm": 0.3134597837924957,
"learning_rate": 0.00019781476007338058,
"loss": 0.3291,
"step": 16
},
{
"epoch": 0.1088,
"grad_norm": 0.24586430191993713,
"learning_rate": 0.00019702957262759965,
"loss": 0.2975,
"step": 17
},
{
"epoch": 0.1152,
"grad_norm": 0.33068349957466125,
"learning_rate": 0.0001961261695938319,
"loss": 0.3777,
"step": 18
},
{
"epoch": 0.1216,
"grad_norm": 0.18534499406814575,
"learning_rate": 0.00019510565162951537,
"loss": 0.302,
"step": 19
},
{
"epoch": 0.128,
"grad_norm": 0.21168865263462067,
"learning_rate": 0.00019396926207859084,
"loss": 0.3034,
"step": 20
},
{
"epoch": 0.1344,
"grad_norm": 0.1708526760339737,
"learning_rate": 0.00019271838545667876,
"loss": 0.331,
"step": 21
},
{
"epoch": 0.1408,
"grad_norm": 0.1510276347398758,
"learning_rate": 0.0001913545457642601,
"loss": 0.3169,
"step": 22
},
{
"epoch": 0.1472,
"grad_norm": 0.16446498036384583,
"learning_rate": 0.0001898794046299167,
"loss": 0.319,
"step": 23
},
{
"epoch": 0.1536,
"grad_norm": 0.29390275478363037,
"learning_rate": 0.00018829475928589271,
"loss": 0.2821,
"step": 24
},
{
"epoch": 0.16,
"grad_norm": 0.13030974566936493,
"learning_rate": 0.00018660254037844388,
"loss": 0.256,
"step": 25
},
{
"epoch": 0.1664,
"grad_norm": 0.12436527013778687,
"learning_rate": 0.0001848048096156426,
"loss": 0.3123,
"step": 26
},
{
"epoch": 0.1728,
"grad_norm": 0.16936242580413818,
"learning_rate": 0.00018290375725550417,
"loss": 0.262,
"step": 27
},
{
"epoch": 0.1792,
"grad_norm": 0.14882561564445496,
"learning_rate": 0.00018090169943749476,
"loss": 0.2825,
"step": 28
},
{
"epoch": 0.1856,
"grad_norm": 0.12063440680503845,
"learning_rate": 0.00017880107536067218,
"loss": 0.2362,
"step": 29
},
{
"epoch": 0.192,
"grad_norm": 0.12142356485128403,
"learning_rate": 0.0001766044443118978,
"loss": 0.2745,
"step": 30
},
{
"epoch": 0.1984,
"grad_norm": 0.115916408598423,
"learning_rate": 0.00017431448254773944,
"loss": 0.2727,
"step": 31
},
{
"epoch": 0.2048,
"grad_norm": 0.11815212666988373,
"learning_rate": 0.0001719339800338651,
"loss": 0.2276,
"step": 32
},
{
"epoch": 0.2112,
"grad_norm": 0.10431115329265594,
"learning_rate": 0.00016946583704589973,
"loss": 0.2662,
"step": 33
},
{
"epoch": 0.2176,
"grad_norm": 0.0971333459019661,
"learning_rate": 0.00016691306063588583,
"loss": 0.251,
"step": 34
},
{
"epoch": 0.224,
"grad_norm": 0.11603528261184692,
"learning_rate": 0.00016427876096865394,
"loss": 0.2405,
"step": 35
},
{
"epoch": 0.2304,
"grad_norm": 0.10909801721572876,
"learning_rate": 0.0001615661475325658,
"loss": 0.2515,
"step": 36
},
{
"epoch": 0.2368,
"grad_norm": 0.08744729310274124,
"learning_rate": 0.00015877852522924732,
"loss": 0.2748,
"step": 37
},
{
"epoch": 0.2432,
"grad_norm": 0.10513755679130554,
"learning_rate": 0.0001559192903470747,
"loss": 0.2292,
"step": 38
},
{
"epoch": 0.2496,
"grad_norm": 0.10443545877933502,
"learning_rate": 0.0001529919264233205,
"loss": 0.2688,
"step": 39
},
{
"epoch": 0.256,
"grad_norm": 0.10798890888690948,
"learning_rate": 0.00015000000000000001,
"loss": 0.2578,
"step": 40
},
{
"epoch": 0.2624,
"grad_norm": 0.10528898984193802,
"learning_rate": 0.00014694715627858908,
"loss": 0.266,
"step": 41
},
{
"epoch": 0.2688,
"grad_norm": 0.09310728311538696,
"learning_rate": 0.00014383711467890774,
"loss": 0.2059,
"step": 42
},
{
"epoch": 0.2752,
"grad_norm": 0.09954522550106049,
"learning_rate": 0.00014067366430758004,
"loss": 0.2992,
"step": 43
},
{
"epoch": 0.2816,
"grad_norm": 0.10648441314697266,
"learning_rate": 0.00013746065934159123,
"loss": 0.259,
"step": 44
},
{
"epoch": 0.288,
"grad_norm": 0.12005575746297836,
"learning_rate": 0.00013420201433256689,
"loss": 0.2654,
"step": 45
},
{
"epoch": 0.2944,
"grad_norm": 0.09197583049535751,
"learning_rate": 0.00013090169943749476,
"loss": 0.2088,
"step": 46
},
{
"epoch": 0.3008,
"grad_norm": 0.0905015841126442,
"learning_rate": 0.0001275637355816999,
"loss": 0.2659,
"step": 47
},
{
"epoch": 0.3072,
"grad_norm": 0.10011676698923111,
"learning_rate": 0.00012419218955996676,
"loss": 0.2573,
"step": 48
},
{
"epoch": 0.3136,
"grad_norm": 0.11158254742622375,
"learning_rate": 0.00012079116908177593,
"loss": 0.2734,
"step": 49
},
{
"epoch": 0.32,
"grad_norm": 0.10176081955432892,
"learning_rate": 0.00011736481776669306,
"loss": 0.2386,
"step": 50
},
{
"epoch": 0.3264,
"grad_norm": 0.1110386997461319,
"learning_rate": 0.00011391731009600654,
"loss": 0.2862,
"step": 51
},
{
"epoch": 0.3328,
"grad_norm": 0.11058996617794037,
"learning_rate": 0.00011045284632676536,
"loss": 0.3214,
"step": 52
},
{
"epoch": 0.3392,
"grad_norm": 0.09535147994756699,
"learning_rate": 0.00010697564737441252,
"loss": 0.267,
"step": 53
},
{
"epoch": 0.3456,
"grad_norm": 0.09183581173419952,
"learning_rate": 0.00010348994967025012,
"loss": 0.2545,
"step": 54
},
{
"epoch": 0.352,
"grad_norm": 0.11743531376123428,
"learning_rate": 0.0001,
"loss": 0.2664,
"step": 55
},
{
"epoch": 0.3584,
"grad_norm": 0.1046270951628685,
"learning_rate": 9.651005032974994e-05,
"loss": 0.2395,
"step": 56
},
{
"epoch": 0.3648,
"grad_norm": 0.11286702007055283,
"learning_rate": 9.302435262558747e-05,
"loss": 0.2463,
"step": 57
},
{
"epoch": 0.3712,
"grad_norm": 0.12298794090747833,
"learning_rate": 8.954715367323468e-05,
"loss": 0.254,
"step": 58
},
{
"epoch": 0.3776,
"grad_norm": 0.1068972498178482,
"learning_rate": 8.608268990399349e-05,
"loss": 0.2714,
"step": 59
},
{
"epoch": 0.384,
"grad_norm": 0.11853048950433731,
"learning_rate": 8.263518223330697e-05,
"loss": 0.2367,
"step": 60
},
{
"epoch": 0.3904,
"grad_norm": 0.0922948494553566,
"learning_rate": 7.920883091822408e-05,
"loss": 0.2219,
"step": 61
},
{
"epoch": 0.3968,
"grad_norm": 0.1000463217496872,
"learning_rate": 7.580781044003324e-05,
"loss": 0.2424,
"step": 62
},
{
"epoch": 0.4032,
"grad_norm": 0.0933864563703537,
"learning_rate": 7.243626441830009e-05,
"loss": 0.2304,
"step": 63
},
{
"epoch": 0.4096,
"grad_norm": 0.10009445250034332,
"learning_rate": 6.909830056250527e-05,
"loss": 0.2183,
"step": 64
},
{
"epoch": 0.416,
"grad_norm": 0.10700756311416626,
"learning_rate": 6.579798566743314e-05,
"loss": 0.2861,
"step": 65
},
{
"epoch": 0.4224,
"grad_norm": 0.08552579581737518,
"learning_rate": 6.25393406584088e-05,
"loss": 0.2508,
"step": 66
},
{
"epoch": 0.4288,
"grad_norm": 0.08115057647228241,
"learning_rate": 5.9326335692419995e-05,
"loss": 0.28,
"step": 67
},
{
"epoch": 0.4352,
"grad_norm": 0.09101542085409164,
"learning_rate": 5.616288532109225e-05,
"loss": 0.2905,
"step": 68
},
{
"epoch": 0.4416,
"grad_norm": 0.08541923761367798,
"learning_rate": 5.305284372141095e-05,
"loss": 0.2369,
"step": 69
},
{
"epoch": 0.448,
"grad_norm": 0.08494503796100616,
"learning_rate": 5.000000000000002e-05,
"loss": 0.2535,
"step": 70
},
{
"epoch": 0.4544,
"grad_norm": 0.09222583472728729,
"learning_rate": 4.700807357667952e-05,
"loss": 0.2164,
"step": 71
},
{
"epoch": 0.4608,
"grad_norm": 0.09007549285888672,
"learning_rate": 4.4080709652925336e-05,
"loss": 0.2911,
"step": 72
},
{
"epoch": 0.4672,
"grad_norm": 0.10070206969976425,
"learning_rate": 4.12214747707527e-05,
"loss": 0.2687,
"step": 73
},
{
"epoch": 0.4736,
"grad_norm": 0.11217208206653595,
"learning_rate": 3.843385246743417e-05,
"loss": 0.2376,
"step": 74
},
{
"epoch": 0.48,
"grad_norm": 0.0864848643541336,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.2826,
"step": 75
},
{
"epoch": 0.4864,
"grad_norm": 0.08239398896694183,
"learning_rate": 3.308693936411421e-05,
"loss": 0.2344,
"step": 76
},
{
"epoch": 0.4928,
"grad_norm": 0.10607580840587616,
"learning_rate": 3.053416295410026e-05,
"loss": 0.272,
"step": 77
},
{
"epoch": 0.4992,
"grad_norm": 0.11652331054210663,
"learning_rate": 2.8066019966134904e-05,
"loss": 0.2398,
"step": 78
},
{
"epoch": 0.5056,
"grad_norm": 0.08097366243600845,
"learning_rate": 2.5685517452260567e-05,
"loss": 0.2289,
"step": 79
},
{
"epoch": 0.512,
"grad_norm": 0.08761543780565262,
"learning_rate": 2.339555568810221e-05,
"loss": 0.2297,
"step": 80
},
{
"epoch": 0.5184,
"grad_norm": 0.09609895944595337,
"learning_rate": 2.119892463932781e-05,
"loss": 0.2214,
"step": 81
},
{
"epoch": 0.5248,
"grad_norm": 0.10124152898788452,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.2442,
"step": 82
},
{
"epoch": 0.5312,
"grad_norm": 0.08449672907590866,
"learning_rate": 1.7096242744495837e-05,
"loss": 0.257,
"step": 83
},
{
"epoch": 0.5376,
"grad_norm": 0.10185690224170685,
"learning_rate": 1.5195190384357404e-05,
"loss": 0.2331,
"step": 84
},
{
"epoch": 0.544,
"grad_norm": 0.10394009202718735,
"learning_rate": 1.339745962155613e-05,
"loss": 0.2679,
"step": 85
},
{
"epoch": 0.5504,
"grad_norm": 0.10671041160821915,
"learning_rate": 1.1705240714107302e-05,
"loss": 0.2953,
"step": 86
},
{
"epoch": 0.5568,
"grad_norm": 0.09869690239429474,
"learning_rate": 1.0120595370083318e-05,
"loss": 0.2363,
"step": 87
},
{
"epoch": 0.5632,
"grad_norm": 0.10397417098283768,
"learning_rate": 8.645454235739903e-06,
"loss": 0.2221,
"step": 88
},
{
"epoch": 0.5696,
"grad_norm": 0.10206641256809235,
"learning_rate": 7.281614543321269e-06,
"loss": 0.2329,
"step": 89
},
{
"epoch": 0.576,
"grad_norm": 0.09630835801362991,
"learning_rate": 6.030737921409169e-06,
"loss": 0.2193,
"step": 90
},
{
"epoch": 0.5824,
"grad_norm": 0.09056926518678665,
"learning_rate": 4.8943483704846475e-06,
"loss": 0.2507,
"step": 91
},
{
"epoch": 0.5888,
"grad_norm": 0.09456932544708252,
"learning_rate": 3.873830406168111e-06,
"loss": 0.2413,
"step": 92
},
{
"epoch": 0.5952,
"grad_norm": 0.10614177584648132,
"learning_rate": 2.970427372400353e-06,
"loss": 0.2294,
"step": 93
},
{
"epoch": 0.6016,
"grad_norm": 0.11094526201486588,
"learning_rate": 2.1852399266194314e-06,
"loss": 0.2282,
"step": 94
},
{
"epoch": 0.608,
"grad_norm": 0.10783498734235764,
"learning_rate": 1.5192246987791981e-06,
"loss": 0.2099,
"step": 95
},
{
"epoch": 0.6144,
"grad_norm": 0.12005529552698135,
"learning_rate": 9.731931258429638e-07,
"loss": 0.2289,
"step": 96
},
{
"epoch": 0.6208,
"grad_norm": 0.09397509694099426,
"learning_rate": 5.478104631726711e-07,
"loss": 0.2478,
"step": 97
},
{
"epoch": 0.6272,
"grad_norm": 0.08096156269311905,
"learning_rate": 2.4359497401758024e-07,
"loss": 0.2515,
"step": 98
},
{
"epoch": 0.6336,
"grad_norm": 0.0987912192940712,
"learning_rate": 6.09172980904238e-08,
"loss": 0.2299,
"step": 99
},
{
"epoch": 0.64,
"grad_norm": 0.10416976362466812,
"learning_rate": 0.0,
"loss": 0.1932,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.92661873012351e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}