jorgeduardo13 commited on
Commit
abab497
1 Parent(s): d157026

NLP Roberta Version! 🤗

Browse files
Files changed (4) hide show
  1. README.md +8 -7
  2. all_results.json +13 -0
  3. eval_results.json +9 -0
  4. train_results.json +7 -0
README.md CHANGED
@@ -2,6 +2,7 @@
2
  license: apache-2.0
3
  base_model: distilroberta-base
4
  tags:
 
5
  - generated_from_trainer
6
  datasets:
7
  - glue
@@ -15,7 +16,7 @@ model-index:
15
  name: Text Classification
16
  type: text-classification
17
  dataset:
18
- name: glue
19
  type: glue
20
  config: mrpc
21
  split: validation
@@ -23,10 +24,10 @@ model-index:
23
  metrics:
24
  - name: Accuracy
25
  type: accuracy
26
- value: 0.8137254901960784
27
  - name: F1
28
  type: f1
29
- value: 0.8685121107266436
30
  ---
31
 
32
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -34,11 +35,11 @@ should probably proofread and complete it, then remove this comment. -->
34
 
35
  # platzi_nlp_model_roberta_similaritytext
36
 
37
- This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the glue dataset.
38
  It achieves the following results on the evaluation set:
39
- - Loss: 1.3339
40
- - Accuracy: 0.8137
41
- - F1: 0.8685
42
 
43
  ## Model description
44
 
 
2
  license: apache-2.0
3
  base_model: distilroberta-base
4
  tags:
5
+ - text-classification
6
  - generated_from_trainer
7
  datasets:
8
  - glue
 
16
  name: Text Classification
17
  type: text-classification
18
  dataset:
19
+ name: datasetX
20
  type: glue
21
  config: mrpc
22
  split: validation
 
24
  metrics:
25
  - name: Accuracy
26
  type: accuracy
27
+ value: 0.7965686274509803
28
  - name: F1
29
  type: f1
30
+ value: 0.8482632541133455
31
  ---
32
 
33
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
35
 
36
  # platzi_nlp_model_roberta_similaritytext
37
 
38
+ This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the datasetX dataset.
39
  It achieves the following results on the evaluation set:
40
+ - Loss: 0.9276
41
+ - Accuracy: 0.7966
42
+ - F1: 0.8483
43
 
44
  ## Model description
45
 
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.7965686274509803,
4
+ "eval_f1": 0.8482632541133455,
5
+ "eval_loss": 0.9275822639465332,
6
+ "eval_runtime": 1.4492,
7
+ "eval_samples_per_second": 281.543,
8
+ "eval_steps_per_second": 35.193,
9
+ "train_loss": 0.13423540535056253,
10
+ "train_runtime": 158.7008,
11
+ "train_samples_per_second": 115.563,
12
+ "train_steps_per_second": 14.461
13
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.7965686274509803,
4
+ "eval_f1": 0.8482632541133455,
5
+ "eval_loss": 0.9275822639465332,
6
+ "eval_runtime": 1.4492,
7
+ "eval_samples_per_second": 281.543,
8
+ "eval_steps_per_second": 35.193
9
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.13423540535056253,
4
+ "train_runtime": 158.7008,
5
+ "train_samples_per_second": 115.563,
6
+ "train_steps_per_second": 14.461
7
+ }