vdaita's picture
Upload folder using huggingface_hub
53bc3aa verified
raw
history blame contribute delete
No virus
7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9801324503311258,
"eval_steps": 10,
"global_step": 37,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 0.06000334769487381,
"learning_rate": 2e-05,
"loss": 0.6668,
"step": 1
},
{
"epoch": 0.03,
"eval_loss": 0.7461267113685608,
"eval_runtime": 323.0571,
"eval_samples_per_second": 0.696,
"eval_steps_per_second": 0.176,
"step": 1
},
{
"epoch": 0.05,
"grad_norm": 0.06872428953647614,
"learning_rate": 4e-05,
"loss": 0.7291,
"step": 2
},
{
"epoch": 0.08,
"grad_norm": 0.06414613127708435,
"learning_rate": 6e-05,
"loss": 0.6424,
"step": 3
},
{
"epoch": 0.11,
"grad_norm": 0.07566527277231216,
"learning_rate": 8e-05,
"loss": 0.7077,
"step": 4
},
{
"epoch": 0.13,
"grad_norm": 0.0865904837846756,
"learning_rate": 0.0001,
"loss": 0.6572,
"step": 5
},
{
"epoch": 0.16,
"grad_norm": 0.1051245629787445,
"learning_rate": 0.00012,
"loss": 0.7091,
"step": 6
},
{
"epoch": 0.19,
"grad_norm": 0.1256977915763855,
"learning_rate": 0.00014,
"loss": 0.6362,
"step": 7
},
{
"epoch": 0.21,
"grad_norm": 0.15082339942455292,
"learning_rate": 0.00016,
"loss": 0.6448,
"step": 8
},
{
"epoch": 0.24,
"grad_norm": 0.12878084182739258,
"learning_rate": 0.00018,
"loss": 0.5384,
"step": 9
},
{
"epoch": 0.26,
"grad_norm": 0.10468369722366333,
"learning_rate": 0.0002,
"loss": 0.5084,
"step": 10
},
{
"epoch": 0.26,
"eval_loss": 0.45858803391456604,
"eval_runtime": 327.4151,
"eval_samples_per_second": 0.687,
"eval_steps_per_second": 0.174,
"step": 10
},
{
"epoch": 0.29,
"grad_norm": 0.10410615801811218,
"learning_rate": 0.00019932383577419432,
"loss": 0.4551,
"step": 11
},
{
"epoch": 0.32,
"grad_norm": 0.08829599618911743,
"learning_rate": 0.00019730448705798239,
"loss": 0.3825,
"step": 12
},
{
"epoch": 0.34,
"grad_norm": 0.10538941621780396,
"learning_rate": 0.00019396926207859084,
"loss": 0.4109,
"step": 13
},
{
"epoch": 0.37,
"grad_norm": 0.10487955063581467,
"learning_rate": 0.00018936326403234125,
"loss": 0.3916,
"step": 14
},
{
"epoch": 0.4,
"grad_norm": 0.08832171559333801,
"learning_rate": 0.00018354878114129367,
"loss": 0.2869,
"step": 15
},
{
"epoch": 0.42,
"grad_norm": 0.07985535264015198,
"learning_rate": 0.0001766044443118978,
"loss": 0.3073,
"step": 16
},
{
"epoch": 0.45,
"grad_norm": 0.05276218056678772,
"learning_rate": 0.0001686241637868734,
"loss": 0.2549,
"step": 17
},
{
"epoch": 0.48,
"grad_norm": 0.07263408601284027,
"learning_rate": 0.00015971585917027862,
"loss": 0.2838,
"step": 18
},
{
"epoch": 0.5,
"grad_norm": 0.06012401357293129,
"learning_rate": 0.00015000000000000001,
"loss": 0.2672,
"step": 19
},
{
"epoch": 0.53,
"grad_norm": 0.05461546778678894,
"learning_rate": 0.0001396079766039157,
"loss": 0.241,
"step": 20
},
{
"epoch": 0.53,
"eval_loss": 0.24859268963336945,
"eval_runtime": 327.3187,
"eval_samples_per_second": 0.687,
"eval_steps_per_second": 0.174,
"step": 20
},
{
"epoch": 0.56,
"grad_norm": 0.057535674422979355,
"learning_rate": 0.00012868032327110904,
"loss": 0.2472,
"step": 21
},
{
"epoch": 0.58,
"grad_norm": 0.046681083738803864,
"learning_rate": 0.00011736481776669306,
"loss": 0.2441,
"step": 22
},
{
"epoch": 0.61,
"grad_norm": 0.03651268407702446,
"learning_rate": 0.00010581448289104758,
"loss": 0.2368,
"step": 23
},
{
"epoch": 0.64,
"grad_norm": 0.03193233534693718,
"learning_rate": 9.418551710895243e-05,
"loss": 0.2838,
"step": 24
},
{
"epoch": 0.66,
"grad_norm": 0.04706702381372452,
"learning_rate": 8.263518223330697e-05,
"loss": 0.2752,
"step": 25
},
{
"epoch": 0.69,
"grad_norm": 0.03650536388158798,
"learning_rate": 7.131967672889101e-05,
"loss": 0.2326,
"step": 26
},
{
"epoch": 0.72,
"grad_norm": 0.030436988919973373,
"learning_rate": 6.039202339608432e-05,
"loss": 0.2333,
"step": 27
},
{
"epoch": 0.74,
"grad_norm": 0.03042667545378208,
"learning_rate": 5.000000000000002e-05,
"loss": 0.2543,
"step": 28
},
{
"epoch": 0.77,
"grad_norm": 0.028066903352737427,
"learning_rate": 4.028414082972141e-05,
"loss": 0.2221,
"step": 29
},
{
"epoch": 0.79,
"grad_norm": 0.029378369450569153,
"learning_rate": 3.137583621312665e-05,
"loss": 0.2553,
"step": 30
},
{
"epoch": 0.79,
"eval_loss": 0.22415944933891296,
"eval_runtime": 329.7004,
"eval_samples_per_second": 0.682,
"eval_steps_per_second": 0.173,
"step": 30
},
{
"epoch": 0.82,
"grad_norm": 0.03442079573869705,
"learning_rate": 2.339555568810221e-05,
"loss": 0.2678,
"step": 31
},
{
"epoch": 0.85,
"grad_norm": 0.026801805943250656,
"learning_rate": 1.6451218858706374e-05,
"loss": 0.2198,
"step": 32
},
{
"epoch": 0.87,
"grad_norm": 0.02642691694200039,
"learning_rate": 1.0636735967658784e-05,
"loss": 0.2596,
"step": 33
},
{
"epoch": 0.9,
"grad_norm": 0.026598341763019562,
"learning_rate": 6.030737921409169e-06,
"loss": 0.2378,
"step": 34
},
{
"epoch": 0.93,
"grad_norm": 0.02618224173784256,
"learning_rate": 2.6955129420176196e-06,
"loss": 0.2497,
"step": 35
},
{
"epoch": 0.95,
"grad_norm": 0.027151504531502724,
"learning_rate": 6.761642258056978e-07,
"loss": 0.2273,
"step": 36
},
{
"epoch": 0.98,
"grad_norm": 0.026682227849960327,
"learning_rate": 0.0,
"loss": 0.2324,
"step": 37
}
],
"logging_steps": 1,
"max_steps": 37,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 9.73086940767191e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}