Wenboz commited on
Commit
49256fd
1 Parent(s): 189cf2b

Model save

Browse files
Files changed (4) hide show
  1. README.md +63 -0
  2. all_results.json +9 -0
  3. train_results.json +9 -0
  4. trainer_state.json +57 -0
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - trl
6
+ - dpo
7
+ - generated_from_trainer
8
+ base_model: microsoft/Phi-3-mini-4k-instruct
9
+ model-index:
10
+ - name: phi_3-offline-dpo-noise-0.0-42
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/causal/huggingface/runs/pnu5z3m9)
18
+ # phi_3-offline-dpo-noise-0.0-42
19
+
20
+ This model is a fine-tuned version of [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) on the None dataset.
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 5e-06
40
+ - train_batch_size: 4
41
+ - eval_batch_size: 4
42
+ - seed: 42
43
+ - distributed_type: multi-GPU
44
+ - num_devices: 4
45
+ - gradient_accumulation_steps: 4
46
+ - total_train_batch_size: 64
47
+ - total_eval_batch_size: 16
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: cosine
50
+ - lr_scheduler_warmup_ratio: 0.1
51
+ - num_epochs: 1
52
+
53
+ ### Training results
54
+
55
+
56
+
57
+ ### Framework versions
58
+
59
+ - PEFT 0.7.1
60
+ - Transformers 4.42.3
61
+ - Pytorch 2.3.0+cu121
62
+ - Datasets 2.14.6
63
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9230769230769231,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.6940749088923136,
5
+ "train_runtime": 53.4359,
6
+ "train_samples": 200,
7
+ "train_samples_per_second": 3.743,
8
+ "train_steps_per_second": 0.056
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9230769230769231,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.6940749088923136,
5
+ "train_runtime": 53.4359,
6
+ "train_samples": 200,
7
+ "train_samples_per_second": 3.743,
8
+ "train_steps_per_second": 0.056
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9230769230769231,
5
+ "eval_steps": 100,
6
+ "global_step": 3,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.3076923076923077,
13
+ "grad_norm": 0.22362739856280167,
14
+ "learning_rate": 5e-06,
15
+ "logits/chosen": 11.287857055664062,
16
+ "logits/rejected": 11.603424072265625,
17
+ "logps/chosen": -380.2095642089844,
18
+ "logps/rejected": -391.62744140625,
19
+ "loss": 0.6931,
20
+ "rewards/accuracies": 0.0,
21
+ "rewards/chosen": 0.0,
22
+ "rewards/margins": 0.0,
23
+ "rewards/rejected": 0.0,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.9230769230769231,
28
+ "step": 3,
29
+ "total_flos": 0.0,
30
+ "train_loss": 0.6940749088923136,
31
+ "train_runtime": 53.4359,
32
+ "train_samples_per_second": 3.743,
33
+ "train_steps_per_second": 0.056
34
+ }
35
+ ],
36
+ "logging_steps": 10,
37
+ "max_steps": 3,
38
+ "num_input_tokens_seen": 0,
39
+ "num_train_epochs": 1,
40
+ "save_steps": 100,
41
+ "stateful_callbacks": {
42
+ "TrainerControl": {
43
+ "args": {
44
+ "should_epoch_stop": false,
45
+ "should_evaluate": false,
46
+ "should_log": false,
47
+ "should_save": true,
48
+ "should_training_stop": true
49
+ },
50
+ "attributes": {}
51
+ }
52
+ },
53
+ "total_flos": 0.0,
54
+ "train_batch_size": 4,
55
+ "trial_name": null,
56
+ "trial_params": null
57
+ }