File size: 3,272 Bytes
0a59dd8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
{
  "output_dir": "/root/LLaMA-LoRA-Tuner/data/lora_models/historied-secondary-2023-07-16-14-52-41",
  "overwrite_output_dir": false,
  "do_train": false,
  "do_eval": false,
  "do_predict": false,
  "evaluation_strategy": "no",
  "prediction_loss_only": false,
  "per_device_train_batch_size": 1,
  "per_device_eval_batch_size": 8,
  "per_gpu_train_batch_size": null,
  "per_gpu_eval_batch_size": null,
  "gradient_accumulation_steps": 1,
  "eval_accumulation_steps": null,
  "eval_delay": 0,
  "learning_rate": 0.0003,
  "weight_decay": 0.0,
  "adam_beta1": 0.9,
  "adam_beta2": 0.999,
  "adam_epsilon": 1e-08,
  "max_grad_norm": 1.0,
  "num_train_epochs": 3,
  "max_steps": -1,
  "lr_scheduler_type": "linear",
  "warmup_ratio": 0.0,
  "warmup_steps": 100,
  "log_level": "passive",
  "log_level_replica": "warning",
  "log_on_each_node": true,
  "logging_dir": "/root/LLaMA-LoRA-Tuner/data/lora_models/historied-secondary-2023-07-16-14-52-41/runs/Jul16_15-01-32_c6ac1eef89ee",
  "logging_strategy": "steps",
  "logging_first_step": false,
  "logging_steps": 10,
  "logging_nan_inf_filter": true,
  "save_strategy": "steps",
  "save_steps": 250,
  "save_total_limit": 2,
  "save_safetensors": false,
  "save_on_each_node": false,
  "no_cuda": false,
  "use_mps_device": false,
  "seed": 42,
  "data_seed": null,
  "jit_mode_eval": false,
  "use_ipex": false,
  "bf16": false,
  "fp16": true,
  "fp16_opt_level": "O1",
  "half_precision_backend": "cuda_amp",
  "bf16_full_eval": false,
  "fp16_full_eval": false,
  "tf32": null,
  "local_rank": -1,
  "xpu_backend": null,
  "tpu_num_cores": null,
  "tpu_metrics_debug": false,
  "debug": [],
  "dataloader_drop_last": false,
  "eval_steps": null,
  "dataloader_num_workers": 0,
  "past_index": -1,
  "run_name": "/root/LLaMA-LoRA-Tuner/data/lora_models/historied-secondary-2023-07-16-14-52-41",
  "disable_tqdm": false,
  "remove_unused_columns": true,
  "label_names": null,
  "load_best_model_at_end": false,
  "metric_for_best_model": null,
  "greater_is_better": null,
  "ignore_data_skip": false,
  "sharded_ddp": [],
  "fsdp": [],
  "fsdp_min_num_params": 0,
  "fsdp_config": {
    "fsdp_min_num_params": 0,
    "xla": false,
    "xla_fsdp_grad_ckpt": false
  },
  "fsdp_transformer_layer_cls_to_wrap": null,
  "deepspeed": null,
  "label_smoothing_factor": 0.0,
  "optim": "adamw_torch",
  "optim_args": null,
  "adafactor": false,
  "group_by_length": false,
  "length_column_name": "length",
  "report_to": [],
  "ddp_find_unused_parameters": null,
  "ddp_bucket_cap_mb": null,
  "dataloader_pin_memory": true,
  "skip_memory_metrics": true,
  "use_legacy_prediction_loop": false,
  "push_to_hub": false,
  "resume_from_checkpoint": null,
  "hub_model_id": null,
  "hub_strategy": "every_save",
  "hub_token": "<HUB_TOKEN>",
  "hub_private_repo": false,
  "gradient_checkpointing": false,
  "include_inputs_for_metrics": false,
  "fp16_backend": "auto",
  "push_to_hub_model_id": null,
  "push_to_hub_organization": null,
  "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>",
  "mp_parameters": "",
  "auto_find_batch_size": false,
  "full_determinism": false,
  "torchdynamo": null,
  "ray_scope": "last",
  "ddp_timeout": 1800,
  "torch_compile": false,
  "torch_compile_backend": null,
  "torch_compile_mode": null
}