{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.8044243338360986, "eval_steps": 500, "global_step": 1000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.01608848667672197, "grad_norm": 6.964079856872559, "learning_rate": 4e-05, "loss": 2.7497, "step": 20 }, { "epoch": 0.03217697335344394, "grad_norm": 2.735353946685791, "learning_rate": 8e-05, "loss": 1.4988, "step": 40 }, { "epoch": 0.048265460030165915, "grad_norm": 2.258044481277466, "learning_rate": 0.00012, "loss": 1.3571, "step": 60 }, { "epoch": 0.06435394670688788, "grad_norm": 2.981090784072876, "learning_rate": 0.00016, "loss": 1.3767, "step": 80 }, { "epoch": 0.08044243338360986, "grad_norm": 2.452106475830078, "learning_rate": 0.0002, "loss": 1.309, "step": 100 }, { "epoch": 0.09653092006033183, "grad_norm": 2.4377968311309814, "learning_rate": 0.00019650043744531934, "loss": 1.3168, "step": 120 }, { "epoch": 0.1126194067370538, "grad_norm": 2.38288950920105, "learning_rate": 0.00019300087489063867, "loss": 1.3587, "step": 140 }, { "epoch": 0.12870789341377575, "grad_norm": 2.5989363193511963, "learning_rate": 0.000189501312335958, "loss": 1.325, "step": 160 }, { "epoch": 0.14479638009049775, "grad_norm": 2.084688901901245, "learning_rate": 0.00018600174978127736, "loss": 1.3187, "step": 180 }, { "epoch": 0.16088486676721972, "grad_norm": 2.4337613582611084, "learning_rate": 0.0001825021872265967, "loss": 1.2917, "step": 200 }, { "epoch": 0.1769733534439417, "grad_norm": 2.2334115505218506, "learning_rate": 0.00017900262467191602, "loss": 1.279, "step": 220 }, { "epoch": 0.19306184012066366, "grad_norm": 2.3266961574554443, "learning_rate": 0.00017550306211723535, "loss": 1.2932, "step": 240 }, { "epoch": 0.20915032679738563, "grad_norm": 2.333585500717163, "learning_rate": 0.00017200349956255468, "loss": 1.2868, "step": 260 }, { "epoch": 0.2252388134741076, "grad_norm": 2.11601185798645, "learning_rate": 0.000168503937007874, "loss": 1.2997, "step": 280 }, { "epoch": 0.24132730015082957, "grad_norm": 2.2596933841705322, "learning_rate": 0.00016500437445319337, "loss": 1.2587, "step": 300 }, { "epoch": 0.2574157868275515, "grad_norm": 2.280146598815918, "learning_rate": 0.0001615048118985127, "loss": 1.2571, "step": 320 }, { "epoch": 0.27350427350427353, "grad_norm": 2.0850765705108643, "learning_rate": 0.00015800524934383203, "loss": 1.2706, "step": 340 }, { "epoch": 0.2895927601809955, "grad_norm": 1.557936668395996, "learning_rate": 0.00015450568678915136, "loss": 1.2334, "step": 360 }, { "epoch": 0.30568124685771747, "grad_norm": 1.4320595264434814, "learning_rate": 0.0001510061242344707, "loss": 1.2243, "step": 380 }, { "epoch": 0.32176973353443944, "grad_norm": 2.060842514038086, "learning_rate": 0.00014750656167979002, "loss": 1.2652, "step": 400 }, { "epoch": 0.3378582202111614, "grad_norm": 1.5861530303955078, "learning_rate": 0.00014400699912510938, "loss": 1.1902, "step": 420 }, { "epoch": 0.3539467068878834, "grad_norm": 2.1270854473114014, "learning_rate": 0.0001405074365704287, "loss": 1.2269, "step": 440 }, { "epoch": 0.37003519356460535, "grad_norm": 1.6317099332809448, "learning_rate": 0.00013700787401574804, "loss": 1.1482, "step": 460 }, { "epoch": 0.3861236802413273, "grad_norm": 1.7842175960540771, "learning_rate": 0.00013350831146106737, "loss": 1.2013, "step": 480 }, { "epoch": 0.4022121669180493, "grad_norm": 2.0094027519226074, "learning_rate": 0.0001300087489063867, "loss": 1.1789, "step": 500 }, { "epoch": 0.41830065359477125, "grad_norm": 1.899143934249878, "learning_rate": 0.00012650918635170603, "loss": 1.1602, "step": 520 }, { "epoch": 0.4343891402714932, "grad_norm": 2.16550350189209, "learning_rate": 0.0001230096237970254, "loss": 1.1804, "step": 540 }, { "epoch": 0.4504776269482152, "grad_norm": 1.6540337800979614, "learning_rate": 0.00011951006124234472, "loss": 1.202, "step": 560 }, { "epoch": 0.46656611362493716, "grad_norm": 2.159453868865967, "learning_rate": 0.00011601049868766405, "loss": 1.1415, "step": 580 }, { "epoch": 0.48265460030165913, "grad_norm": 1.7094770669937134, "learning_rate": 0.00011251093613298338, "loss": 1.177, "step": 600 }, { "epoch": 0.4987430869783811, "grad_norm": 1.721779227256775, "learning_rate": 0.00010901137357830271, "loss": 1.1343, "step": 620 }, { "epoch": 0.514831573655103, "grad_norm": 1.6354072093963623, "learning_rate": 0.00010551181102362204, "loss": 1.1501, "step": 640 }, { "epoch": 0.530920060331825, "grad_norm": 1.8426846265792847, "learning_rate": 0.0001020122484689414, "loss": 1.095, "step": 660 }, { "epoch": 0.5470085470085471, "grad_norm": 1.441419005393982, "learning_rate": 9.851268591426073e-05, "loss": 1.1536, "step": 680 }, { "epoch": 0.563097033685269, "grad_norm": 2.076209545135498, "learning_rate": 9.501312335958006e-05, "loss": 1.0939, "step": 700 }, { "epoch": 0.579185520361991, "grad_norm": 1.6805191040039062, "learning_rate": 9.151356080489939e-05, "loss": 1.1148, "step": 720 }, { "epoch": 0.5952740070387129, "grad_norm": 1.650302767753601, "learning_rate": 8.801399825021873e-05, "loss": 1.1416, "step": 740 }, { "epoch": 0.6113624937154349, "grad_norm": 1.9106342792510986, "learning_rate": 8.451443569553806e-05, "loss": 1.1169, "step": 760 }, { "epoch": 0.6274509803921569, "grad_norm": 1.8341450691223145, "learning_rate": 8.10148731408574e-05, "loss": 1.0639, "step": 780 }, { "epoch": 0.6435394670688789, "grad_norm": 1.4700909852981567, "learning_rate": 7.751531058617674e-05, "loss": 1.0996, "step": 800 }, { "epoch": 0.6596279537456008, "grad_norm": 1.8903424739837646, "learning_rate": 7.401574803149607e-05, "loss": 1.1005, "step": 820 }, { "epoch": 0.6757164404223228, "grad_norm": 2.014308214187622, "learning_rate": 7.05161854768154e-05, "loss": 1.0935, "step": 840 }, { "epoch": 0.6918049270990447, "grad_norm": 1.742974877357483, "learning_rate": 6.701662292213474e-05, "loss": 1.089, "step": 860 }, { "epoch": 0.7078934137757668, "grad_norm": 1.8661373853683472, "learning_rate": 6.351706036745407e-05, "loss": 1.1048, "step": 880 }, { "epoch": 0.7239819004524887, "grad_norm": 1.6748634576797485, "learning_rate": 6.00174978127734e-05, "loss": 1.0903, "step": 900 }, { "epoch": 0.7400703871292107, "grad_norm": 1.7615855932235718, "learning_rate": 5.6517935258092734e-05, "loss": 1.0356, "step": 920 }, { "epoch": 0.7561588738059326, "grad_norm": 1.7509955167770386, "learning_rate": 5.301837270341208e-05, "loss": 1.0446, "step": 940 }, { "epoch": 0.7722473604826546, "grad_norm": 1.871839165687561, "learning_rate": 4.951881014873141e-05, "loss": 1.0521, "step": 960 }, { "epoch": 0.7883358471593765, "grad_norm": 1.7496371269226074, "learning_rate": 4.6019247594050745e-05, "loss": 1.0813, "step": 980 }, { "epoch": 0.8044243338360986, "grad_norm": 1.7232415676116943, "learning_rate": 4.251968503937008e-05, "loss": 1.0358, "step": 1000 } ], "logging_steps": 20, "max_steps": 1243, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 5.363282340552499e+16, "train_batch_size": 1, "trial_name": null, "trial_params": null }