allstax's picture
Upload folder using huggingface_hub
2d6f02f verified
raw
history blame contribute delete
No virus
2.61 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.07335075413744098,
"eval_steps": 200,
"global_step": 800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"eval_bertscore": 0.7401605248451233,
"eval_loss": 1.9530484676361084,
"eval_rouge1": 0.6562857460474375,
"eval_rouge2": 0.3640670727106235,
"eval_rougeL": 0.5655212336424695,
"eval_rougeLsum": 0.6414840198810386,
"eval_runtime": 21.7196,
"eval_samples_per_second": 1.381,
"eval_steps_per_second": 0.691,
"step": 200
},
{
"epoch": 0.02,
"grad_norm": 0.25105270743370056,
"learning_rate": 0.00019771674842969145,
"loss": 1.7353,
"step": 250
},
{
"epoch": 0.04,
"eval_bertscore": 0.7432050108909607,
"eval_loss": 1.9583823680877686,
"eval_rouge1": 0.6554226269617707,
"eval_rouge2": 0.36661086995296877,
"eval_rougeL": 0.5637448790342183,
"eval_rougeLsum": 0.6419796784912521,
"eval_runtime": 21.9623,
"eval_samples_per_second": 1.366,
"eval_steps_per_second": 0.683,
"step": 400
},
{
"epoch": 0.05,
"grad_norm": 0.26550447940826416,
"learning_rate": 0.00019542432717436156,
"loss": 1.7786,
"step": 500
},
{
"epoch": 0.06,
"eval_bertscore": 0.7469045519828796,
"eval_loss": 1.9245686531066895,
"eval_rouge1": 0.6662431635890791,
"eval_rouge2": 0.3735263724826765,
"eval_rougeL": 0.5755071616151013,
"eval_rougeLsum": 0.6538383087686117,
"eval_runtime": 21.5302,
"eval_samples_per_second": 1.393,
"eval_steps_per_second": 0.697,
"step": 600
},
{
"epoch": 0.07,
"grad_norm": 0.1538015753030777,
"learning_rate": 0.0001931319059190317,
"loss": 1.8851,
"step": 750
},
{
"epoch": 0.07,
"eval_bertscore": 0.7442477941513062,
"eval_loss": 1.9187489748001099,
"eval_rouge1": 0.6606221897489035,
"eval_rouge2": 0.368654563659435,
"eval_rougeL": 0.5731546210408094,
"eval_rougeLsum": 0.6470590823125606,
"eval_runtime": 21.9831,
"eval_samples_per_second": 1.365,
"eval_steps_per_second": 0.682,
"step": 800
}
],
"logging_steps": 250,
"max_steps": 21812,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 800,
"total_flos": 5.3929253535744e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}