csikasote commited on
Commit
0938039
1 Parent(s): 76a9d01

End of training

Browse files
README.md CHANGED
@@ -2,6 +2,8 @@
2
  license: apache-2.0
3
  base_model: facebook/wav2vec2-xls-r-1b
4
  tags:
 
 
5
  - generated_from_trainer
6
  metrics:
7
  - wer
@@ -16,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/cicasote/huggingface/runs/4a183449)
17
  # xls-r-1b-bem-fsv
18
 
19
- This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.2680
22
  - Wer: 0.8798
 
2
  license: apache-2.0
3
  base_model: facebook/wav2vec2-xls-r-1b
4
  tags:
5
+ - automatic-speech-recognition
6
+ - BembaSpeech
7
  - generated_from_trainer
8
  metrics:
9
  - wer
 
18
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/cicasote/huggingface/runs/4a183449)
19
  # xls-r-1b-bem-fsv
20
 
21
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the BEMBASPEECH - BEM dataset.
22
  It achieves the following results on the evaluation set:
23
  - Loss: 0.2680
24
  - Wer: 0.8798
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.990403071017274,
3
+ "eval_loss": 0.26802200078964233,
4
+ "eval_runtime": 47.3898,
5
+ "eval_samples": 499,
6
+ "eval_samples_per_second": 10.53,
7
+ "eval_steps_per_second": 1.329,
8
+ "eval_wer": 0.8797595190380761,
9
+ "total_flos": 1.1936063667019008e+19,
10
+ "train_loss": 1.3302699947357177,
11
+ "train_runtime": 3428.0062,
12
+ "train_samples": 4163,
13
+ "train_samples_per_second": 6.072,
14
+ "train_steps_per_second": 0.379
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.990403071017274,
3
+ "eval_loss": 0.26802200078964233,
4
+ "eval_runtime": 47.3898,
5
+ "eval_samples": 499,
6
+ "eval_samples_per_second": 10.53,
7
+ "eval_steps_per_second": 1.329,
8
+ "eval_wer": 0.8797595190380761
9
+ }
runs/Sep01_00-46-25_7b977e2c913a/events.out.tfevents.1725155320.7b977e2c913a.61687.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fa7d2190736cd194a894682069e8eccbbdec0f2747cd8dfa04a1ed3eae2969d
3
+ size 406
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.990403071017274,
3
+ "total_flos": 1.1936063667019008e+19,
4
+ "train_loss": 1.3302699947357177,
5
+ "train_runtime": 3428.0062,
6
+ "train_samples": 4163,
7
+ "train_samples_per_second": 6.072,
8
+ "train_steps_per_second": 0.379
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.26802200078964233,
3
+ "best_model_checkpoint": "./xls-r-1b-bem-fsv/checkpoint-1000",
4
+ "epoch": 4.990403071017274,
5
+ "eval_steps": 500,
6
+ "global_step": 1300,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.7677543186180422,
13
+ "grad_norm": 7.090035915374756,
14
+ "learning_rate": 1.9600000000000002e-05,
15
+ "loss": 4.9139,
16
+ "step": 200
17
+ },
18
+ {
19
+ "epoch": 1.5355086372360844,
20
+ "grad_norm": 2.271395683288574,
21
+ "learning_rate": 3.960000000000001e-05,
22
+ "loss": 2.6967,
23
+ "step": 400
24
+ },
25
+ {
26
+ "epoch": 1.9193857965451055,
27
+ "eval_loss": 0.4789997339248657,
28
+ "eval_runtime": 47.1718,
29
+ "eval_samples_per_second": 10.578,
30
+ "eval_steps_per_second": 1.336,
31
+ "eval_wer": 0.9859719438877755,
32
+ "step": 500
33
+ },
34
+ {
35
+ "epoch": 2.3032629558541267,
36
+ "grad_norm": 1.4331015348434448,
37
+ "learning_rate": 4.4000000000000006e-05,
38
+ "loss": 0.3851,
39
+ "step": 600
40
+ },
41
+ {
42
+ "epoch": 3.071017274472169,
43
+ "grad_norm": 0.58216392993927,
44
+ "learning_rate": 3.15e-05,
45
+ "loss": 0.257,
46
+ "step": 800
47
+ },
48
+ {
49
+ "epoch": 3.838771593090211,
50
+ "grad_norm": 0.7515180110931396,
51
+ "learning_rate": 1.9e-05,
52
+ "loss": 0.1815,
53
+ "step": 1000
54
+ },
55
+ {
56
+ "epoch": 3.838771593090211,
57
+ "eval_loss": 0.26802200078964233,
58
+ "eval_runtime": 46.97,
59
+ "eval_samples_per_second": 10.624,
60
+ "eval_steps_per_second": 1.341,
61
+ "eval_wer": 0.8797595190380761,
62
+ "step": 1000
63
+ },
64
+ {
65
+ "epoch": 4.606525911708253,
66
+ "grad_norm": 0.4551451504230499,
67
+ "learning_rate": 6.5000000000000004e-06,
68
+ "loss": 0.1455,
69
+ "step": 1200
70
+ },
71
+ {
72
+ "epoch": 4.990403071017274,
73
+ "step": 1300,
74
+ "total_flos": 1.1936063667019008e+19,
75
+ "train_loss": 1.3302699947357177,
76
+ "train_runtime": 3428.0062,
77
+ "train_samples_per_second": 6.072,
78
+ "train_steps_per_second": 0.379
79
+ }
80
+ ],
81
+ "logging_steps": 200,
82
+ "max_steps": 1300,
83
+ "num_input_tokens_seen": 0,
84
+ "num_train_epochs": 5,
85
+ "save_steps": 500,
86
+ "stateful_callbacks": {
87
+ "TrainerControl": {
88
+ "args": {
89
+ "should_epoch_stop": false,
90
+ "should_evaluate": false,
91
+ "should_log": false,
92
+ "should_save": true,
93
+ "should_training_stop": true
94
+ },
95
+ "attributes": {}
96
+ }
97
+ },
98
+ "total_flos": 1.1936063667019008e+19,
99
+ "train_batch_size": 8,
100
+ "trial_name": null,
101
+ "trial_params": null
102
+ }