{ "best_metric": 0.5091743119266054, "best_model_checkpoint": "electra-distilled-sst\\run-2\\checkpoint-527", "epoch": 6.0, "eval_steps": 500, "global_step": 3162, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 3.510751485824585, "learning_rate": 0.0007235764985208607, "loss": 4.9041, "step": 527 }, { "epoch": 1.0, "eval_accuracy": 0.5091743119266054, "eval_loss": 4.841418266296387, "eval_runtime": 1.4537, "eval_samples_per_second": 599.861, "eval_steps_per_second": 4.815, "step": 527 }, { "epoch": 2.0, "grad_norm": 0.5188323259353638, "learning_rate": 0.0005789160983992775, "loss": 4.9002, "step": 1054 }, { "epoch": 2.0, "eval_accuracy": 0.5091743119266054, "eval_loss": 4.816441059112549, "eval_runtime": 1.4412, "eval_samples_per_second": 605.042, "eval_steps_per_second": 4.857, "step": 1054 }, { "epoch": 3.0, "grad_norm": 1.3247452974319458, "learning_rate": 0.00043425569827769417, "loss": 4.9, "step": 1581 }, { "epoch": 3.0, "eval_accuracy": 0.5091743119266054, "eval_loss": 4.82835054397583, "eval_runtime": 1.4366, "eval_samples_per_second": 606.994, "eval_steps_per_second": 4.873, "step": 1581 }, { "epoch": 4.0, "grad_norm": 0.6336215734481812, "learning_rate": 0.0002895952981561108, "loss": 4.893, "step": 2108 }, { "epoch": 4.0, "eval_accuracy": 0.5091743119266054, "eval_loss": 4.868070602416992, "eval_runtime": 1.4415, "eval_samples_per_second": 604.928, "eval_steps_per_second": 4.856, "step": 2108 }, { "epoch": 5.0, "grad_norm": 1.2643113136291504, "learning_rate": 0.0001449348980345275, "loss": 4.8991, "step": 2635 }, { "epoch": 5.0, "eval_accuracy": 0.5091743119266054, "eval_loss": 4.832038879394531, "eval_runtime": 1.448, "eval_samples_per_second": 602.191, "eval_steps_per_second": 4.834, "step": 2635 }, { "epoch": 6.0, "grad_norm": 1.2564785480499268, "learning_rate": 2.744979129441809e-07, "loss": 4.8938, "step": 3162 }, { "epoch": 6.0, "eval_accuracy": 0.5091743119266054, "eval_loss": 4.896666526794434, "eval_runtime": 1.4438, "eval_samples_per_second": 603.96, "eval_steps_per_second": 4.848, "step": 3162 } ], "logging_steps": 500, "max_steps": 3162, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 500, "total_flos": 1123721737396596.0, "train_batch_size": 128, "trial_name": null, "trial_params": { "alpha": 0.2011698337552854, "learning_rate": 0.0008679624007294999, "num_train_epochs": 6, "temperature": 5 } }