{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0, "eval_steps": 500, "global_step": 273, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0, "grad_norm": 1.609375, "learning_rate": 2e-05, "loss": 0.6806, "step": 1 }, { "epoch": 0.01, "grad_norm": 1.703125, "learning_rate": 4e-05, "loss": 0.4976, "step": 2 }, { "epoch": 0.01, "grad_norm": 1.671875, "learning_rate": 6e-05, "loss": 0.811, "step": 3 }, { "epoch": 0.01, "grad_norm": 1.8203125, "learning_rate": 8e-05, "loss": 0.7326, "step": 4 }, { "epoch": 0.02, "grad_norm": 1.046875, "learning_rate": 0.0001, "loss": 0.4289, "step": 5 }, { "epoch": 0.02, "grad_norm": 0.296875, "learning_rate": 0.00012, "loss": 0.0229, "step": 6 }, { "epoch": 0.03, "grad_norm": 1.15625, "learning_rate": 0.00014, "loss": 0.2144, "step": 7 }, { "epoch": 0.03, "grad_norm": 1.40625, "learning_rate": 0.00016, "loss": 0.2301, "step": 8 }, { "epoch": 0.03, "grad_norm": 1.4453125, "learning_rate": 0.00018, "loss": 0.2772, "step": 9 }, { "epoch": 0.04, "grad_norm": 0.51171875, "learning_rate": 0.0002, "loss": 0.1217, "step": 10 }, { "epoch": 0.04, "grad_norm": 0.90625, "learning_rate": 0.00019999286567172776, "loss": 0.283, "step": 11 }, { "epoch": 0.04, "grad_norm": 0.376953125, "learning_rate": 0.00019997146370488383, "loss": 0.181, "step": 12 }, { "epoch": 0.05, "grad_norm": 0.216796875, "learning_rate": 0.00019993579715324135, "loss": 0.1375, "step": 13 }, { "epoch": 0.05, "grad_norm": 0.326171875, "learning_rate": 0.00019988587110593808, "loss": 0.1605, "step": 14 }, { "epoch": 0.05, "grad_norm": 0.1708984375, "learning_rate": 0.00019982169268675023, "loss": 0.1368, "step": 15 }, { "epoch": 0.06, "grad_norm": 0.1220703125, "learning_rate": 0.00019974327105307604, "loss": 0.1352, "step": 16 }, { "epoch": 0.06, "grad_norm": 0.2255859375, "learning_rate": 0.00019965061739462902, "loss": 0.1368, "step": 17 }, { "epoch": 0.07, "grad_norm": 0.1552734375, "learning_rate": 0.00019954374493184152, "loss": 0.133, "step": 18 }, { "epoch": 0.07, "grad_norm": 0.1552734375, "learning_rate": 0.00019942266891397815, "loss": 0.1212, "step": 19 }, { "epoch": 0.07, "grad_norm": 0.09130859375, "learning_rate": 0.0001992874066169601, "loss": 0.1162, "step": 20 }, { "epoch": 0.08, "grad_norm": 0.1572265625, "learning_rate": 0.00019913797734089997, "loss": 0.1086, "step": 21 }, { "epoch": 0.08, "grad_norm": 0.2294921875, "learning_rate": 0.00019897440240734788, "loss": 0.1138, "step": 22 }, { "epoch": 0.08, "grad_norm": 0.158203125, "learning_rate": 0.00019879670515624936, "loss": 0.1119, "step": 23 }, { "epoch": 0.09, "grad_norm": 0.1435546875, "learning_rate": 0.0001986049109426148, "loss": 0.0804, "step": 24 }, { "epoch": 0.09, "grad_norm": 0.5859375, "learning_rate": 0.00019839904713290184, "loss": 0.1984, "step": 25 }, { "epoch": 0.1, "grad_norm": 0.208984375, "learning_rate": 0.00019817914310111046, "loss": 0.0655, "step": 26 }, { "epoch": 0.1, "grad_norm": 0.1396484375, "learning_rate": 0.00019794523022459166, "loss": 0.1026, "step": 27 }, { "epoch": 0.1, "grad_norm": 0.42578125, "learning_rate": 0.00019769734187957038, "loss": 0.1749, "step": 28 }, { "epoch": 0.11, "grad_norm": 0.33203125, "learning_rate": 0.00019743551343638324, "loss": 0.191, "step": 29 }, { "epoch": 0.11, "grad_norm": 0.2216796875, "learning_rate": 0.00019715978225443148, "loss": 0.1551, "step": 30 }, { "epoch": 0.11, "grad_norm": 0.05126953125, "learning_rate": 0.0001968701876768505, "loss": 0.1128, "step": 31 }, { "epoch": 0.12, "grad_norm": 0.08447265625, "learning_rate": 0.00019656677102489588, "loss": 0.1195, "step": 32 }, { "epoch": 0.12, "grad_norm": 0.08544921875, "learning_rate": 0.00019624957559204761, "loss": 0.1093, "step": 33 }, { "epoch": 0.12, "grad_norm": 0.08642578125, "learning_rate": 0.0001959186466378326, "loss": 0.1163, "step": 34 }, { "epoch": 0.13, "grad_norm": 0.13671875, "learning_rate": 0.00019557403138136672, "loss": 0.1314, "step": 35 }, { "epoch": 0.13, "grad_norm": 0.0615234375, "learning_rate": 0.00019521577899461731, "loss": 0.1091, "step": 36 }, { "epoch": 0.14, "grad_norm": 0.11865234375, "learning_rate": 0.000194843940595387, "loss": 0.1141, "step": 37 }, { "epoch": 0.14, "grad_norm": 0.07080078125, "learning_rate": 0.0001944585692400199, "loss": 0.1056, "step": 38 }, { "epoch": 0.14, "grad_norm": 0.1533203125, "learning_rate": 0.00019405971991583108, "loss": 0.1257, "step": 39 }, { "epoch": 0.15, "grad_norm": 0.05712890625, "learning_rate": 0.00019364744953326074, "loss": 0.1166, "step": 40 }, { "epoch": 0.15, "grad_norm": 0.16796875, "learning_rate": 0.00019322181691775386, "loss": 0.1177, "step": 41 }, { "epoch": 0.15, "grad_norm": 0.10791015625, "learning_rate": 0.00019278288280136647, "loss": 0.1162, "step": 42 }, { "epoch": 0.16, "grad_norm": 0.130859375, "learning_rate": 0.00019233070981410007, "loss": 0.1177, "step": 43 }, { "epoch": 0.16, "grad_norm": 0.0732421875, "learning_rate": 0.00019186536247496518, "loss": 0.1173, "step": 44 }, { "epoch": 0.16, "grad_norm": 0.0947265625, "learning_rate": 0.00019138690718277542, "loss": 0.1203, "step": 45 }, { "epoch": 0.17, "grad_norm": 0.130859375, "learning_rate": 0.0001908954122066731, "loss": 0.1223, "step": 46 }, { "epoch": 0.17, "grad_norm": 0.12890625, "learning_rate": 0.00019039094767638832, "loss": 0.1167, "step": 47 }, { "epoch": 0.18, "grad_norm": 0.0712890625, "learning_rate": 0.00018987358557223232, "loss": 0.1071, "step": 48 }, { "epoch": 0.18, "grad_norm": 0.1083984375, "learning_rate": 0.00018934339971482674, "loss": 0.102, "step": 49 }, { "epoch": 0.18, "grad_norm": 0.1025390625, "learning_rate": 0.00018880046575457074, "loss": 0.1141, "step": 50 }, { "epoch": 0.19, "grad_norm": 0.12255859375, "learning_rate": 0.0001882448611608463, "loss": 0.1055, "step": 51 }, { "epoch": 0.19, "grad_norm": 0.1787109375, "learning_rate": 0.00018767666521096466, "loss": 0.1366, "step": 52 }, { "epoch": 0.19, "grad_norm": 0.1806640625, "learning_rate": 0.00018709595897885439, "loss": 0.1267, "step": 53 }, { "epoch": 0.2, "grad_norm": 0.16796875, "learning_rate": 0.00018650282532349332, "loss": 0.1128, "step": 54 }, { "epoch": 0.2, "grad_norm": 0.06005859375, "learning_rate": 0.00018589734887708556, "loss": 0.1116, "step": 55 }, { "epoch": 0.21, "grad_norm": 0.04541015625, "learning_rate": 0.00018527961603298572, "loss": 0.097, "step": 56 }, { "epoch": 0.21, "grad_norm": 0.1728515625, "learning_rate": 0.00018464971493337167, "loss": 0.167, "step": 57 }, { "epoch": 0.21, "grad_norm": 0.138671875, "learning_rate": 0.00018400773545666787, "loss": 0.1611, "step": 58 }, { "epoch": 0.22, "grad_norm": 0.037353515625, "learning_rate": 0.00018335376920472097, "loss": 0.1157, "step": 59 }, { "epoch": 0.22, "grad_norm": 0.08544921875, "learning_rate": 0.0001826879094897294, "loss": 0.1383, "step": 60 }, { "epoch": 0.22, "grad_norm": 0.07373046875, "learning_rate": 0.00018201025132092889, "loss": 0.1336, "step": 61 }, { "epoch": 0.23, "grad_norm": 0.07568359375, "learning_rate": 0.00018132089139103613, "loss": 0.1026, "step": 62 }, { "epoch": 0.23, "grad_norm": 0.103515625, "learning_rate": 0.00018061992806245184, "loss": 0.1365, "step": 63 }, { "epoch": 0.23, "grad_norm": 0.043701171875, "learning_rate": 0.00017990746135322592, "loss": 0.1014, "step": 64 }, { "epoch": 0.24, "grad_norm": 0.04052734375, "learning_rate": 0.00017918359292278611, "loss": 0.1093, "step": 65 }, { "epoch": 0.24, "grad_norm": 0.12109375, "learning_rate": 0.00017844842605743258, "loss": 0.1131, "step": 66 }, { "epoch": 0.25, "grad_norm": 0.0908203125, "learning_rate": 0.00017770206565560033, "loss": 0.1105, "step": 67 }, { "epoch": 0.25, "grad_norm": 0.06884765625, "learning_rate": 0.00017694461821289172, "loss": 0.0952, "step": 68 }, { "epoch": 0.25, "grad_norm": 0.0703125, "learning_rate": 0.00017617619180688085, "loss": 0.1139, "step": 69 }, { "epoch": 0.26, "grad_norm": 0.044189453125, "learning_rate": 0.00017539689608169238, "loss": 0.0941, "step": 70 }, { "epoch": 0.26, "grad_norm": 0.134765625, "learning_rate": 0.0001746068422323568, "loss": 0.1306, "step": 71 }, { "epoch": 0.26, "grad_norm": 0.12353515625, "learning_rate": 0.00017380614298894442, "loss": 0.1205, "step": 72 }, { "epoch": 0.27, "grad_norm": 0.11572265625, "learning_rate": 0.0001729949126004802, "loss": 0.0854, "step": 73 }, { "epoch": 0.27, "grad_norm": 0.06201171875, "learning_rate": 0.00017217326681864207, "loss": 0.0948, "step": 74 }, { "epoch": 0.27, "grad_norm": 0.20703125, "learning_rate": 0.00017134132288124465, "loss": 0.1466, "step": 75 }, { "epoch": 0.28, "grad_norm": 0.1044921875, "learning_rate": 0.00017049919949551102, "loss": 0.1167, "step": 76 }, { "epoch": 0.28, "grad_norm": 0.044921875, "learning_rate": 0.00016964701682113475, "loss": 0.1052, "step": 77 }, { "epoch": 0.29, "grad_norm": 0.1318359375, "learning_rate": 0.0001687848964531348, "loss": 0.1252, "step": 78 }, { "epoch": 0.29, "grad_norm": 0.0810546875, "learning_rate": 0.00016791296140450545, "loss": 0.1048, "step": 79 }, { "epoch": 0.29, "grad_norm": 0.095703125, "learning_rate": 0.00016703133608866414, "loss": 0.1167, "step": 80 }, { "epoch": 0.3, "grad_norm": 0.09326171875, "learning_rate": 0.00016614014630169917, "loss": 0.1167, "step": 81 }, { "epoch": 0.3, "grad_norm": 0.05859375, "learning_rate": 0.00016523951920442034, "loss": 0.1091, "step": 82 }, { "epoch": 0.3, "grad_norm": 0.07666015625, "learning_rate": 0.00016432958330421497, "loss": 0.1055, "step": 83 }, { "epoch": 0.31, "grad_norm": 0.09765625, "learning_rate": 0.00016341046843671144, "loss": 0.1157, "step": 84 }, { "epoch": 0.31, "grad_norm": 0.045654296875, "learning_rate": 0.0001624823057472534, "loss": 0.109, "step": 85 }, { "epoch": 0.32, "grad_norm": 0.09130859375, "learning_rate": 0.00016154522767218725, "loss": 0.0934, "step": 86 }, { "epoch": 0.32, "grad_norm": 0.1103515625, "learning_rate": 0.000160599367919965, "loss": 0.113, "step": 87 }, { "epoch": 0.32, "grad_norm": 0.09619140625, "learning_rate": 0.0001596448614520661, "loss": 0.1089, "step": 88 }, { "epoch": 0.33, "grad_norm": 0.05419921875, "learning_rate": 0.0001586818444637402, "loss": 0.1066, "step": 89 }, { "epoch": 0.33, "grad_norm": 0.09130859375, "learning_rate": 0.0001577104543645738, "loss": 0.1249, "step": 90 }, { "epoch": 0.33, "grad_norm": 0.09375, "learning_rate": 0.00015673082975888386, "loss": 0.0955, "step": 91 }, { "epoch": 0.34, "grad_norm": 0.0634765625, "learning_rate": 0.0001557431104259408, "loss": 0.0873, "step": 92 }, { "epoch": 0.34, "grad_norm": 0.12451171875, "learning_rate": 0.0001547474373000238, "loss": 0.1362, "step": 93 }, { "epoch": 0.34, "grad_norm": 0.053466796875, "learning_rate": 0.0001537439524503116, "loss": 0.1005, "step": 94 }, { "epoch": 0.35, "grad_norm": 0.0810546875, "learning_rate": 0.00015273279906061082, "loss": 0.1119, "step": 95 }, { "epoch": 0.35, "grad_norm": 0.068359375, "learning_rate": 0.00015171412140892575, "loss": 0.099, "step": 96 }, { "epoch": 0.36, "grad_norm": 0.0947265625, "learning_rate": 0.0001506880648468719, "loss": 0.0794, "step": 97 }, { "epoch": 0.36, "grad_norm": 0.07177734375, "learning_rate": 0.00014965477577893598, "loss": 0.0942, "step": 98 }, { "epoch": 0.36, "grad_norm": 0.05224609375, "learning_rate": 0.0001486144016415862, "loss": 0.0958, "step": 99 }, { "epoch": 0.37, "grad_norm": 0.140625, "learning_rate": 0.0001475670908822351, "loss": 0.099, "step": 100 }, { "epoch": 0.37, "grad_norm": 0.07470703125, "learning_rate": 0.00014651299293805774, "loss": 0.0978, "step": 101 }, { "epoch": 0.37, "grad_norm": 0.205078125, "learning_rate": 0.0001454522582146695, "loss": 0.1197, "step": 102 }, { "epoch": 0.38, "grad_norm": 0.15234375, "learning_rate": 0.0001443850380646649, "loss": 0.1078, "step": 103 }, { "epoch": 0.38, "grad_norm": 0.1572265625, "learning_rate": 0.0001433114847660217, "loss": 0.0835, "step": 104 }, { "epoch": 0.38, "grad_norm": 0.134765625, "learning_rate": 0.00014223175150037296, "loss": 0.0961, "step": 105 }, { "epoch": 0.39, "grad_norm": 0.09130859375, "learning_rate": 0.00014114599233114986, "loss": 0.1005, "step": 106 }, { "epoch": 0.39, "grad_norm": 0.109375, "learning_rate": 0.00014005436218159927, "loss": 0.1072, "step": 107 }, { "epoch": 0.4, "grad_norm": 0.1806640625, "learning_rate": 0.00013895701681267784, "loss": 0.0974, "step": 108 }, { "epoch": 0.4, "grad_norm": 0.15234375, "learning_rate": 0.00013785411280082746, "loss": 0.1143, "step": 109 }, { "epoch": 0.4, "grad_norm": 0.1630859375, "learning_rate": 0.00013674580751563356, "loss": 0.1296, "step": 110 }, { "epoch": 0.41, "grad_norm": 0.1689453125, "learning_rate": 0.00013563225909737076, "loss": 0.1022, "step": 111 }, { "epoch": 0.41, "grad_norm": 0.0927734375, "learning_rate": 0.00013451362643443832, "loss": 0.0633, "step": 112 }, { "epoch": 0.41, "grad_norm": 0.1748046875, "learning_rate": 0.0001333900691406889, "loss": 0.1276, "step": 113 }, { "epoch": 0.42, "grad_norm": 0.1591796875, "learning_rate": 0.0001322617475326538, "loss": 0.117, "step": 114 }, { "epoch": 0.42, "grad_norm": 0.07568359375, "learning_rate": 0.00013112882260666805, "loss": 0.0645, "step": 115 }, { "epoch": 0.42, "grad_norm": 0.1357421875, "learning_rate": 0.00012999145601589823, "loss": 0.0632, "step": 116 }, { "epoch": 0.43, "grad_norm": 0.1181640625, "learning_rate": 0.00012884981004727676, "loss": 0.0832, "step": 117 }, { "epoch": 0.43, "grad_norm": 0.12109375, "learning_rate": 0.00012770404759834594, "loss": 0.0769, "step": 118 }, { "epoch": 0.44, "grad_norm": 0.1259765625, "learning_rate": 0.00012655433215401438, "loss": 0.0413, "step": 119 }, { "epoch": 0.44, "grad_norm": 0.220703125, "learning_rate": 0.00012540082776323007, "loss": 0.0832, "step": 120 }, { "epoch": 0.44, "grad_norm": 0.0810546875, "learning_rate": 0.0001242436990155728, "loss": 0.0197, "step": 121 }, { "epoch": 0.45, "grad_norm": 0.2392578125, "learning_rate": 0.00012308311101776932, "loss": 0.0549, "step": 122 }, { "epoch": 0.45, "grad_norm": 0.38671875, "learning_rate": 0.00012191922937013489, "loss": 0.0665, "step": 123 }, { "epoch": 0.45, "grad_norm": 0.32421875, "learning_rate": 0.00012075222014294447, "loss": 0.1273, "step": 124 }, { "epoch": 0.46, "grad_norm": 0.2353515625, "learning_rate": 0.00011958224985273648, "loss": 0.0877, "step": 125 }, { "epoch": 0.46, "grad_norm": 0.279296875, "learning_rate": 0.00011840948543855335, "loss": 0.1127, "step": 126 }, { "epoch": 0.47, "grad_norm": 0.1318359375, "learning_rate": 0.00011723409423812134, "loss": 0.0416, "step": 127 }, { "epoch": 0.47, "grad_norm": 0.2109375, "learning_rate": 0.00011605624396397398, "loss": 0.0571, "step": 128 }, { "epoch": 0.47, "grad_norm": 0.1806640625, "learning_rate": 0.00011487610267952142, "loss": 0.1001, "step": 129 }, { "epoch": 0.48, "grad_norm": 0.0791015625, "learning_rate": 0.00011369383877507034, "loss": 0.0259, "step": 130 }, { "epoch": 0.48, "grad_norm": 0.1953125, "learning_rate": 0.0001125096209437967, "loss": 0.0805, "step": 131 }, { "epoch": 0.48, "grad_norm": 0.07275390625, "learning_rate": 0.00011132361815767554, "loss": 0.0239, "step": 132 }, { "epoch": 0.49, "grad_norm": 0.279296875, "learning_rate": 0.00011013599964337107, "loss": 0.0768, "step": 133 }, { "epoch": 0.49, "grad_norm": 0.048095703125, "learning_rate": 0.00010894693485809016, "loss": 0.0132, "step": 134 }, { "epoch": 0.49, "grad_norm": 0.2314453125, "learning_rate": 0.00010775659346540303, "loss": 0.0746, "step": 135 }, { "epoch": 0.5, "grad_norm": 0.11279296875, "learning_rate": 0.00010656514531103483, "loss": 0.0221, "step": 136 }, { "epoch": 0.5, "grad_norm": 0.1220703125, "learning_rate": 0.00010537276039863049, "loss": 0.0503, "step": 137 }, { "epoch": 0.51, "grad_norm": 0.10546875, "learning_rate": 0.00010417960886549798, "loss": 0.0546, "step": 138 }, { "epoch": 0.51, "grad_norm": 0.039306640625, "learning_rate": 0.00010298586095833151, "loss": 0.0095, "step": 139 }, { "epoch": 0.51, "grad_norm": 0.251953125, "learning_rate": 0.00010179168700892001, "loss": 0.1242, "step": 140 }, { "epoch": 0.52, "grad_norm": 0.2890625, "learning_rate": 0.00010059725740984284, "loss": 0.1958, "step": 141 }, { "epoch": 0.52, "grad_norm": 0.1298828125, "learning_rate": 9.940274259015721e-05, "loss": 0.0345, "step": 142 }, { "epoch": 0.52, "grad_norm": 0.12109375, "learning_rate": 9.820831299108003e-05, "loss": 0.0371, "step": 143 }, { "epoch": 0.53, "grad_norm": 0.26171875, "learning_rate": 9.701413904166852e-05, "loss": 0.0501, "step": 144 }, { "epoch": 0.53, "grad_norm": 0.318359375, "learning_rate": 9.582039113450208e-05, "loss": 0.0902, "step": 145 }, { "epoch": 0.53, "grad_norm": 0.1826171875, "learning_rate": 9.462723960136952e-05, "loss": 0.0528, "step": 146 }, { "epoch": 0.54, "grad_norm": 0.15625, "learning_rate": 9.34348546889652e-05, "loss": 0.0657, "step": 147 }, { "epoch": 0.54, "grad_norm": 0.1748046875, "learning_rate": 9.224340653459698e-05, "loss": 0.0547, "step": 148 }, { "epoch": 0.55, "grad_norm": 0.26171875, "learning_rate": 9.10530651419099e-05, "loss": 0.1411, "step": 149 }, { "epoch": 0.55, "grad_norm": 0.375, "learning_rate": 8.986400035662896e-05, "loss": 0.1156, "step": 150 }, { "epoch": 0.55, "grad_norm": 0.322265625, "learning_rate": 8.867638184232446e-05, "loss": 0.1539, "step": 151 }, { "epoch": 0.56, "grad_norm": 0.17578125, "learning_rate": 8.749037905620334e-05, "loss": 0.1194, "step": 152 }, { "epoch": 0.56, "grad_norm": 0.15234375, "learning_rate": 8.630616122492967e-05, "loss": 0.0595, "step": 153 }, { "epoch": 0.56, "grad_norm": 0.10400390625, "learning_rate": 8.512389732047859e-05, "loss": 0.0666, "step": 154 }, { "epoch": 0.57, "grad_norm": 0.115234375, "learning_rate": 8.394375603602603e-05, "loss": 0.0883, "step": 155 }, { "epoch": 0.57, "grad_norm": 0.103515625, "learning_rate": 8.276590576187869e-05, "loss": 0.0889, "step": 156 }, { "epoch": 0.58, "grad_norm": 0.10107421875, "learning_rate": 8.159051456144668e-05, "loss": 0.0751, "step": 157 }, { "epoch": 0.58, "grad_norm": 0.0927734375, "learning_rate": 8.041775014726353e-05, "loss": 0.0864, "step": 158 }, { "epoch": 0.58, "grad_norm": 0.10107421875, "learning_rate": 7.924777985705556e-05, "loss": 0.0768, "step": 159 }, { "epoch": 0.59, "grad_norm": 0.09814453125, "learning_rate": 7.808077062986514e-05, "loss": 0.0617, "step": 160 }, { "epoch": 0.59, "grad_norm": 0.1474609375, "learning_rate": 7.69168889822307e-05, "loss": 0.0586, "step": 161 }, { "epoch": 0.59, "grad_norm": 0.15625, "learning_rate": 7.575630098442723e-05, "loss": 0.1025, "step": 162 }, { "epoch": 0.6, "grad_norm": 0.06591796875, "learning_rate": 7.459917223676993e-05, "loss": 0.0485, "step": 163 }, { "epoch": 0.6, "grad_norm": 0.19140625, "learning_rate": 7.344566784598564e-05, "loss": 0.1085, "step": 164 }, { "epoch": 0.6, "grad_norm": 0.10205078125, "learning_rate": 7.229595240165405e-05, "loss": 0.0757, "step": 165 }, { "epoch": 0.61, "grad_norm": 0.08544921875, "learning_rate": 7.115018995272325e-05, "loss": 0.0769, "step": 166 }, { "epoch": 0.61, "grad_norm": 0.1884765625, "learning_rate": 7.000854398410182e-05, "loss": 0.1229, "step": 167 }, { "epoch": 0.62, "grad_norm": 0.1025390625, "learning_rate": 6.887117739333196e-05, "loss": 0.0633, "step": 168 }, { "epoch": 0.62, "grad_norm": 0.07861328125, "learning_rate": 6.773825246734622e-05, "loss": 0.0513, "step": 169 }, { "epoch": 0.62, "grad_norm": 0.07275390625, "learning_rate": 6.660993085931113e-05, "loss": 0.0394, "step": 170 }, { "epoch": 0.63, "grad_norm": 0.125, "learning_rate": 6.54863735655617e-05, "loss": 0.0949, "step": 171 }, { "epoch": 0.63, "grad_norm": 0.11279296875, "learning_rate": 6.436774090262925e-05, "loss": 0.0946, "step": 172 }, { "epoch": 0.63, "grad_norm": 0.07861328125, "learning_rate": 6.325419248436649e-05, "loss": 0.0683, "step": 173 }, { "epoch": 0.64, "grad_norm": 0.08349609375, "learning_rate": 6.214588719917256e-05, "loss": 0.0412, "step": 174 }, { "epoch": 0.64, "grad_norm": 0.2119140625, "learning_rate": 6.104298318732218e-05, "loss": 0.1105, "step": 175 }, { "epoch": 0.64, "grad_norm": 0.09423828125, "learning_rate": 5.994563781840079e-05, "loss": 0.0435, "step": 176 }, { "epoch": 0.65, "grad_norm": 0.0595703125, "learning_rate": 5.885400766885015e-05, "loss": 0.0325, "step": 177 }, { "epoch": 0.65, "grad_norm": 0.12890625, "learning_rate": 5.776824849962705e-05, "loss": 0.069, "step": 178 }, { "epoch": 0.66, "grad_norm": 0.050537109375, "learning_rate": 5.668851523397829e-05, "loss": 0.0448, "step": 179 }, { "epoch": 0.66, "grad_norm": 0.1142578125, "learning_rate": 5.561496193533515e-05, "loss": 0.0747, "step": 180 }, { "epoch": 0.66, "grad_norm": 0.140625, "learning_rate": 5.4547741785330505e-05, "loss": 0.0832, "step": 181 }, { "epoch": 0.67, "grad_norm": 0.11865234375, "learning_rate": 5.3487007061942276e-05, "loss": 0.0725, "step": 182 }, { "epoch": 0.67, "grad_norm": 0.0888671875, "learning_rate": 5.243290911776497e-05, "loss": 0.0729, "step": 183 }, { "epoch": 0.67, "grad_norm": 0.126953125, "learning_rate": 5.138559835841381e-05, "loss": 0.0649, "step": 184 }, { "epoch": 0.68, "grad_norm": 0.10107421875, "learning_rate": 5.0345224221064025e-05, "loss": 0.0435, "step": 185 }, { "epoch": 0.68, "grad_norm": 0.06298828125, "learning_rate": 4.931193515312813e-05, "loss": 0.0313, "step": 186 }, { "epoch": 0.68, "grad_norm": 0.1552734375, "learning_rate": 4.828587859107425e-05, "loss": 0.0618, "step": 187 }, { "epoch": 0.69, "grad_norm": 0.09033203125, "learning_rate": 4.726720093938921e-05, "loss": 0.0298, "step": 188 }, { "epoch": 0.69, "grad_norm": 0.0830078125, "learning_rate": 4.625604754968839e-05, "loss": 0.0169, "step": 189 }, { "epoch": 0.7, "grad_norm": 0.23046875, "learning_rate": 4.525256269997621e-05, "loss": 0.1029, "step": 190 }, { "epoch": 0.7, "grad_norm": 0.11962890625, "learning_rate": 4.4256889574059236e-05, "loss": 0.0387, "step": 191 }, { "epoch": 0.7, "grad_norm": 0.1103515625, "learning_rate": 4.3269170241116155e-05, "loss": 0.0474, "step": 192 }, { "epoch": 0.71, "grad_norm": 0.10595703125, "learning_rate": 4.2289545635426206e-05, "loss": 0.0285, "step": 193 }, { "epoch": 0.71, "grad_norm": 0.12451171875, "learning_rate": 4.1318155536259795e-05, "loss": 0.0821, "step": 194 }, { "epoch": 0.71, "grad_norm": 0.216796875, "learning_rate": 4.035513854793389e-05, "loss": 0.0698, "step": 195 }, { "epoch": 0.72, "grad_norm": 0.146484375, "learning_rate": 3.940063208003503e-05, "loss": 0.0526, "step": 196 }, { "epoch": 0.72, "grad_norm": 0.1396484375, "learning_rate": 3.845477232781278e-05, "loss": 0.0513, "step": 197 }, { "epoch": 0.73, "grad_norm": 0.126953125, "learning_rate": 3.751769425274661e-05, "loss": 0.0394, "step": 198 }, { "epoch": 0.73, "grad_norm": 0.10546875, "learning_rate": 3.658953156328857e-05, "loss": 0.032, "step": 199 }, { "epoch": 0.73, "grad_norm": 0.171875, "learning_rate": 3.567041669578507e-05, "loss": 0.0509, "step": 200 }, { "epoch": 0.74, "grad_norm": 0.083984375, "learning_rate": 3.476048079557967e-05, "loss": 0.0255, "step": 201 }, { "epoch": 0.74, "grad_norm": 0.326171875, "learning_rate": 3.3859853698300855e-05, "loss": 0.1141, "step": 202 }, { "epoch": 0.74, "grad_norm": 0.1728515625, "learning_rate": 3.2968663911335894e-05, "loss": 0.0673, "step": 203 }, { "epoch": 0.75, "grad_norm": 0.0625, "learning_rate": 3.208703859549457e-05, "loss": 0.0184, "step": 204 }, { "epoch": 0.75, "grad_norm": 0.119140625, "learning_rate": 3.12151035468652e-05, "loss": 0.0542, "step": 205 }, { "epoch": 0.75, "grad_norm": 0.1005859375, "learning_rate": 3.0352983178865256e-05, "loss": 0.0334, "step": 206 }, { "epoch": 0.76, "grad_norm": 0.265625, "learning_rate": 2.9500800504489022e-05, "loss": 0.0897, "step": 207 }, { "epoch": 0.76, "grad_norm": 0.197265625, "learning_rate": 2.8658677118755382e-05, "loss": 0.1091, "step": 208 }, { "epoch": 0.77, "grad_norm": 0.1875, "learning_rate": 2.7826733181357932e-05, "loss": 0.1194, "step": 209 }, { "epoch": 0.77, "grad_norm": 0.21875, "learning_rate": 2.7005087399519835e-05, "loss": 0.0697, "step": 210 }, { "epoch": 0.77, "grad_norm": 0.1806640625, "learning_rate": 2.6193857011055622e-05, "loss": 0.1141, "step": 211 }, { "epoch": 0.78, "grad_norm": 0.1494140625, "learning_rate": 2.5393157767643228e-05, "loss": 0.07, "step": 212 }, { "epoch": 0.78, "grad_norm": 0.1376953125, "learning_rate": 2.4603103918307625e-05, "loss": 0.0618, "step": 213 }, { "epoch": 0.78, "grad_norm": 0.0498046875, "learning_rate": 2.3823808193119178e-05, "loss": 0.0224, "step": 214 }, { "epoch": 0.79, "grad_norm": 0.1787109375, "learning_rate": 2.305538178710831e-05, "loss": 0.075, "step": 215 }, { "epoch": 0.79, "grad_norm": 0.11669921875, "learning_rate": 2.2297934344399695e-05, "loss": 0.0293, "step": 216 }, { "epoch": 0.79, "grad_norm": 0.064453125, "learning_rate": 2.155157394256745e-05, "loss": 0.0287, "step": 217 }, { "epoch": 0.8, "grad_norm": 0.1298828125, "learning_rate": 2.0816407077213896e-05, "loss": 0.1017, "step": 218 }, { "epoch": 0.8, "grad_norm": 0.06201171875, "learning_rate": 2.0092538646774072e-05, "loss": 0.0204, "step": 219 }, { "epoch": 0.81, "grad_norm": 0.057373046875, "learning_rate": 1.938007193754816e-05, "loss": 0.0253, "step": 220 }, { "epoch": 0.81, "grad_norm": 0.0908203125, "learning_rate": 1.8679108608963903e-05, "loss": 0.047, "step": 221 }, { "epoch": 0.81, "grad_norm": 0.10986328125, "learning_rate": 1.7989748679071138e-05, "loss": 0.0752, "step": 222 }, { "epoch": 0.82, "grad_norm": 0.11279296875, "learning_rate": 1.7312090510270627e-05, "loss": 0.0387, "step": 223 }, { "epoch": 0.82, "grad_norm": 0.08154296875, "learning_rate": 1.6646230795279026e-05, "loss": 0.0577, "step": 224 }, { "epoch": 0.82, "grad_norm": 0.115234375, "learning_rate": 1.5992264543332124e-05, "loss": 0.0637, "step": 225 }, { "epoch": 0.83, "grad_norm": 0.1376953125, "learning_rate": 1.5350285066628343e-05, "loss": 0.0656, "step": 226 }, { "epoch": 0.83, "grad_norm": 0.123046875, "learning_rate": 1.4720383967014306e-05, "loss": 0.0257, "step": 227 }, { "epoch": 0.84, "grad_norm": 0.12109375, "learning_rate": 1.4102651122914434e-05, "loss": 0.099, "step": 228 }, { "epoch": 0.84, "grad_norm": 0.203125, "learning_rate": 1.3497174676506674e-05, "loss": 0.0496, "step": 229 }, { "epoch": 0.84, "grad_norm": 0.14453125, "learning_rate": 1.2904041021145596e-05, "loss": 0.0549, "step": 230 }, { "epoch": 0.85, "grad_norm": 0.1572265625, "learning_rate": 1.2323334789035367e-05, "loss": 0.07, "step": 231 }, { "epoch": 0.85, "grad_norm": 0.2216796875, "learning_rate": 1.17551388391537e-05, "loss": 0.0498, "step": 232 }, { "epoch": 0.85, "grad_norm": 0.12109375, "learning_rate": 1.1199534245429255e-05, "loss": 0.0514, "step": 233 }, { "epoch": 0.86, "grad_norm": 0.0908203125, "learning_rate": 1.0656600285173258e-05, "loss": 0.0231, "step": 234 }, { "epoch": 0.86, "grad_norm": 0.04443359375, "learning_rate": 1.0126414427767717e-05, "loss": 0.0215, "step": 235 }, { "epoch": 0.86, "grad_norm": 0.0927734375, "learning_rate": 9.609052323611666e-06, "loss": 0.0237, "step": 236 }, { "epoch": 0.87, "grad_norm": 0.049072265625, "learning_rate": 9.104587793326901e-06, "loss": 0.0196, "step": 237 }, { "epoch": 0.87, "grad_norm": 0.2255859375, "learning_rate": 8.613092817224611e-06, "loss": 0.0842, "step": 238 }, { "epoch": 0.88, "grad_norm": 0.1513671875, "learning_rate": 8.134637525034839e-06, "loss": 0.0692, "step": 239 }, { "epoch": 0.88, "grad_norm": 0.1259765625, "learning_rate": 7.669290185899946e-06, "loss": 0.0384, "step": 240 }, { "epoch": 0.88, "grad_norm": 0.07568359375, "learning_rate": 7.217117198633561e-06, "loss": 0.0359, "step": 241 }, { "epoch": 0.89, "grad_norm": 0.06591796875, "learning_rate": 6.778183082246148e-06, "loss": 0.0195, "step": 242 }, { "epoch": 0.89, "grad_norm": 0.11962890625, "learning_rate": 6.3525504667392595e-06, "loss": 0.0531, "step": 243 }, { "epoch": 0.89, "grad_norm": 0.2099609375, "learning_rate": 5.940280084168947e-06, "loss": 0.0815, "step": 244 }, { "epoch": 0.9, "grad_norm": 0.06884765625, "learning_rate": 5.541430759980138e-06, "loss": 0.0291, "step": 245 }, { "epoch": 0.9, "grad_norm": 0.11474609375, "learning_rate": 5.1560594046130115e-06, "loss": 0.0621, "step": 246 }, { "epoch": 0.9, "grad_norm": 0.1328125, "learning_rate": 4.784221005382705e-06, "loss": 0.0761, "step": 247 }, { "epoch": 0.91, "grad_norm": 0.08642578125, "learning_rate": 4.425968618633292e-06, "loss": 0.0197, "step": 248 }, { "epoch": 0.91, "grad_norm": 0.0791015625, "learning_rate": 4.081353362167406e-06, "loss": 0.0318, "step": 249 }, { "epoch": 0.92, "grad_norm": 0.1259765625, "learning_rate": 3.750424407952402e-06, "loss": 0.0758, "step": 250 }, { "epoch": 0.92, "grad_norm": 0.045166015625, "learning_rate": 3.4332289751041526e-06, "loss": 0.0201, "step": 251 }, { "epoch": 0.92, "grad_norm": 0.212890625, "learning_rate": 3.129812323149528e-06, "loss": 0.0379, "step": 252 }, { "epoch": 0.93, "grad_norm": 0.0908203125, "learning_rate": 2.8402177455685296e-06, "loss": 0.0331, "step": 253 }, { "epoch": 0.93, "grad_norm": 0.09814453125, "learning_rate": 2.564486563616786e-06, "loss": 0.0441, "step": 254 }, { "epoch": 0.93, "grad_norm": 0.078125, "learning_rate": 2.3026581204296347e-06, "loss": 0.025, "step": 255 }, { "epoch": 0.94, "grad_norm": 0.2236328125, "learning_rate": 2.0547697754083605e-06, "loss": 0.0885, "step": 256 }, { "epoch": 0.94, "grad_norm": 0.0791015625, "learning_rate": 1.8208568988895558e-06, "loss": 0.0393, "step": 257 }, { "epoch": 0.95, "grad_norm": 0.0546875, "learning_rate": 1.6009528670981711e-06, "loss": 0.0358, "step": 258 }, { "epoch": 0.95, "grad_norm": 0.1884765625, "learning_rate": 1.3950890573852126e-06, "loss": 0.0784, "step": 259 }, { "epoch": 0.95, "grad_norm": 0.1103515625, "learning_rate": 1.2032948437506576e-06, "loss": 0.0441, "step": 260 }, { "epoch": 0.96, "grad_norm": 0.1220703125, "learning_rate": 1.0255975926521166e-06, "loss": 0.0274, "step": 261 }, { "epoch": 0.96, "grad_norm": 0.080078125, "learning_rate": 8.620226591000479e-07, "loss": 0.0153, "step": 262 }, { "epoch": 0.96, "grad_norm": 0.130859375, "learning_rate": 7.125933830398945e-07, "loss": 0.0462, "step": 263 }, { "epoch": 0.97, "grad_norm": 0.05810546875, "learning_rate": 5.773310860218373e-07, "loss": 0.0356, "step": 264 }, { "epoch": 0.97, "grad_norm": 0.171875, "learning_rate": 4.562550681584954e-07, "loss": 0.0545, "step": 265 }, { "epoch": 0.97, "grad_norm": 0.09033203125, "learning_rate": 3.4938260537098476e-07, "loss": 0.0547, "step": 266 }, { "epoch": 0.98, "grad_norm": 0.0947265625, "learning_rate": 2.567289469239786e-07, "loss": 0.0342, "step": 267 }, { "epoch": 0.98, "grad_norm": 0.10400390625, "learning_rate": 1.7830731324977036e-07, "loss": 0.0276, "step": 268 }, { "epoch": 0.99, "grad_norm": 0.12158203125, "learning_rate": 1.1412889406192673e-07, "loss": 0.0597, "step": 269 }, { "epoch": 0.99, "grad_norm": 0.050048828125, "learning_rate": 6.420284675865418e-08, "loss": 0.0197, "step": 270 }, { "epoch": 0.99, "grad_norm": 0.1611328125, "learning_rate": 2.853629511617717e-08, "loss": 0.0884, "step": 271 }, { "epoch": 1.0, "grad_norm": 0.16015625, "learning_rate": 7.134328272240254e-09, "loss": 0.1165, "step": 272 }, { "epoch": 1.0, "grad_norm": 0.04638671875, "learning_rate": 0.0, "loss": 0.0183, "step": 273 }, { "epoch": 1.0, "eval_loss": 0.03699357807636261, "eval_runtime": 12.8888, "eval_samples_per_second": 8.922, "eval_steps_per_second": 1.164, "step": 273 } ], "logging_steps": 1, "max_steps": 273, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "total_flos": 2.1059186691407872e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }