CooperW's picture
Training in progress, step 50, checkpoint
4a2afe5 verified
raw
history blame
9.35 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01895330379776825,
"eval_steps": 500,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000379066075955365,
"grad_norm": 2.788883924484253,
"learning_rate": 2e-05,
"loss": 1.4892,
"step": 1
},
{
"epoch": 0.00075813215191073,
"grad_norm": 2.803041934967041,
"learning_rate": 4e-05,
"loss": 1.4967,
"step": 2
},
{
"epoch": 0.001137198227866095,
"grad_norm": 2.5324907302856445,
"learning_rate": 6e-05,
"loss": 1.3553,
"step": 3
},
{
"epoch": 0.00151626430382146,
"grad_norm": 1.693648099899292,
"learning_rate": 8e-05,
"loss": 0.983,
"step": 4
},
{
"epoch": 0.0018953303797768248,
"grad_norm": 1.2373262643814087,
"learning_rate": 0.0001,
"loss": 0.6743,
"step": 5
},
{
"epoch": 0.00227439645573219,
"grad_norm": 0.9896576404571533,
"learning_rate": 0.00012,
"loss": 0.4505,
"step": 6
},
{
"epoch": 0.0026534625316875548,
"grad_norm": 1.0764743089675903,
"learning_rate": 0.00014,
"loss": 0.3412,
"step": 7
},
{
"epoch": 0.00303252860764292,
"grad_norm": 0.4975992739200592,
"learning_rate": 0.00016,
"loss": 0.2131,
"step": 8
},
{
"epoch": 0.0034115946835982847,
"grad_norm": 0.3490230143070221,
"learning_rate": 0.00018,
"loss": 0.1971,
"step": 9
},
{
"epoch": 0.0037906607595536495,
"grad_norm": 0.2667368948459625,
"learning_rate": 0.0002,
"loss": 0.1833,
"step": 10
},
{
"epoch": 0.004169726835509015,
"grad_norm": 0.25213170051574707,
"learning_rate": 0.0001999390827019096,
"loss": 0.1652,
"step": 11
},
{
"epoch": 0.00454879291146438,
"grad_norm": 0.18969492614269257,
"learning_rate": 0.00019975640502598244,
"loss": 0.1606,
"step": 12
},
{
"epoch": 0.004927858987419744,
"grad_norm": 0.15731994807720184,
"learning_rate": 0.00019945218953682734,
"loss": 0.152,
"step": 13
},
{
"epoch": 0.0053069250633751095,
"grad_norm": 0.14271649718284607,
"learning_rate": 0.00019902680687415705,
"loss": 0.1435,
"step": 14
},
{
"epoch": 0.005685991139330475,
"grad_norm": 0.16122481226921082,
"learning_rate": 0.00019848077530122083,
"loss": 0.1609,
"step": 15
},
{
"epoch": 0.00606505721528584,
"grad_norm": 0.15835386514663696,
"learning_rate": 0.00019781476007338058,
"loss": 0.1444,
"step": 16
},
{
"epoch": 0.006444123291241204,
"grad_norm": 0.11050277203321457,
"learning_rate": 0.00019702957262759965,
"loss": 0.128,
"step": 17
},
{
"epoch": 0.0068231893671965695,
"grad_norm": 0.12347878515720367,
"learning_rate": 0.0001961261695938319,
"loss": 0.1326,
"step": 18
},
{
"epoch": 0.007202255443151935,
"grad_norm": 0.09975384920835495,
"learning_rate": 0.00019510565162951537,
"loss": 0.1187,
"step": 19
},
{
"epoch": 0.007581321519107299,
"grad_norm": 0.1068282350897789,
"learning_rate": 0.00019396926207859084,
"loss": 0.1202,
"step": 20
},
{
"epoch": 0.007960387595062665,
"grad_norm": 0.1021302118897438,
"learning_rate": 0.00019271838545667876,
"loss": 0.1297,
"step": 21
},
{
"epoch": 0.00833945367101803,
"grad_norm": 0.09192782640457153,
"learning_rate": 0.0001913545457642601,
"loss": 0.1227,
"step": 22
},
{
"epoch": 0.008718519746973394,
"grad_norm": 0.07424570620059967,
"learning_rate": 0.0001898794046299167,
"loss": 0.1104,
"step": 23
},
{
"epoch": 0.00909758582292876,
"grad_norm": 0.09622485190629959,
"learning_rate": 0.00018829475928589271,
"loss": 0.1279,
"step": 24
},
{
"epoch": 0.009476651898884124,
"grad_norm": 0.08250657469034195,
"learning_rate": 0.00018660254037844388,
"loss": 0.1152,
"step": 25
},
{
"epoch": 0.009855717974839489,
"grad_norm": 0.09128892421722412,
"learning_rate": 0.0001848048096156426,
"loss": 0.1072,
"step": 26
},
{
"epoch": 0.010234784050794855,
"grad_norm": 0.11655224114656448,
"learning_rate": 0.00018290375725550417,
"loss": 0.1156,
"step": 27
},
{
"epoch": 0.010613850126750219,
"grad_norm": 0.09212372452020645,
"learning_rate": 0.00018090169943749476,
"loss": 0.107,
"step": 28
},
{
"epoch": 0.010992916202705583,
"grad_norm": 0.1035194918513298,
"learning_rate": 0.00017880107536067218,
"loss": 0.1169,
"step": 29
},
{
"epoch": 0.01137198227866095,
"grad_norm": 0.09886885434389114,
"learning_rate": 0.0001766044443118978,
"loss": 0.114,
"step": 30
},
{
"epoch": 0.011751048354616314,
"grad_norm": 0.07283884286880493,
"learning_rate": 0.00017431448254773944,
"loss": 0.0962,
"step": 31
},
{
"epoch": 0.01213011443057168,
"grad_norm": 0.09377507120370865,
"learning_rate": 0.0001719339800338651,
"loss": 0.0925,
"step": 32
},
{
"epoch": 0.012509180506527044,
"grad_norm": 0.09097617864608765,
"learning_rate": 0.00016946583704589973,
"loss": 0.1045,
"step": 33
},
{
"epoch": 0.012888246582482409,
"grad_norm": 0.10535702854394913,
"learning_rate": 0.00016691306063588583,
"loss": 0.1005,
"step": 34
},
{
"epoch": 0.013267312658437775,
"grad_norm": 0.09806367754936218,
"learning_rate": 0.00016427876096865394,
"loss": 0.102,
"step": 35
},
{
"epoch": 0.013646378734393139,
"grad_norm": 0.09789954870939255,
"learning_rate": 0.0001615661475325658,
"loss": 0.0955,
"step": 36
},
{
"epoch": 0.014025444810348503,
"grad_norm": 0.1131892129778862,
"learning_rate": 0.00015877852522924732,
"loss": 0.0853,
"step": 37
},
{
"epoch": 0.01440451088630387,
"grad_norm": 0.08628207445144653,
"learning_rate": 0.0001559192903470747,
"loss": 0.0901,
"step": 38
},
{
"epoch": 0.014783576962259234,
"grad_norm": 0.11414898931980133,
"learning_rate": 0.0001529919264233205,
"loss": 0.0993,
"step": 39
},
{
"epoch": 0.015162643038214598,
"grad_norm": 0.07424012571573257,
"learning_rate": 0.00015000000000000001,
"loss": 0.1037,
"step": 40
},
{
"epoch": 0.015541709114169964,
"grad_norm": 0.08380600810050964,
"learning_rate": 0.00014694715627858908,
"loss": 0.0839,
"step": 41
},
{
"epoch": 0.01592077519012533,
"grad_norm": 0.06833647936582565,
"learning_rate": 0.00014383711467890774,
"loss": 0.0916,
"step": 42
},
{
"epoch": 0.016299841266080695,
"grad_norm": 0.06410624086856842,
"learning_rate": 0.00014067366430758004,
"loss": 0.0845,
"step": 43
},
{
"epoch": 0.01667890734203606,
"grad_norm": 0.06120501831173897,
"learning_rate": 0.00013746065934159123,
"loss": 0.0812,
"step": 44
},
{
"epoch": 0.017057973417991423,
"grad_norm": 0.06658154726028442,
"learning_rate": 0.00013420201433256689,
"loss": 0.0868,
"step": 45
},
{
"epoch": 0.017437039493946788,
"grad_norm": 0.07054657489061356,
"learning_rate": 0.00013090169943749476,
"loss": 0.0885,
"step": 46
},
{
"epoch": 0.017816105569902152,
"grad_norm": 0.05874604731798172,
"learning_rate": 0.0001275637355816999,
"loss": 0.0845,
"step": 47
},
{
"epoch": 0.01819517164585752,
"grad_norm": 0.07008881121873856,
"learning_rate": 0.00012419218955996676,
"loss": 0.0775,
"step": 48
},
{
"epoch": 0.018574237721812884,
"grad_norm": 0.07047414779663086,
"learning_rate": 0.00012079116908177593,
"loss": 0.0839,
"step": 49
},
{
"epoch": 0.01895330379776825,
"grad_norm": 0.07158780097961426,
"learning_rate": 0.00011736481776669306,
"loss": 0.0993,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.9437929254121472e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}