lailamt commited on
Commit
1c15f62
1 Parent(s): b446a0b

Training in progress, step 100

Browse files
Files changed (4) hide show
  1. README.md +35 -49
  2. config.json +16 -16
  3. model.safetensors +2 -2
  4. training_args.bin +1 -1
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  license: mit
3
- base_model: neuralmind/bert-base-portuguese-cased
4
  tags:
5
  - generated_from_trainer
6
  model-index:
@@ -13,9 +13,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # e3_lr2e-05
15
 
16
- This model is a fine-tuned version of [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 1.5717
19
 
20
  ## Model description
21
 
@@ -35,11 +35,11 @@ More information needed
35
 
36
  The following hyperparameters were used during training:
37
  - learning_rate: 2e-05
38
- - train_batch_size: 8
39
- - eval_batch_size: 16
40
  - seed: 42
41
  - gradient_accumulation_steps: 16
42
- - total_train_batch_size: 128
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: linear
45
  - num_epochs: 3
@@ -49,53 +49,39 @@ The following hyperparameters were used during training:
49
 
50
  | Training Loss | Epoch | Step | Validation Loss |
51
  |:-------------:|:------:|:----:|:---------------:|
52
- | 2.2771 | 0.0707 | 100 | 1.9875 |
53
- | 2.0486 | 0.1414 | 200 | 1.8946 |
54
- | 1.993 | 0.2121 | 300 | 1.8415 |
55
- | 1.9532 | 0.2828 | 400 | 1.8133 |
56
- | 1.9145 | 0.3535 | 500 | 1.7807 |
57
- | 1.8872 | 0.4242 | 600 | 1.7534 |
58
- | 1.8593 | 0.4949 | 700 | 1.7357 |
59
- | 1.8447 | 0.5656 | 800 | 1.7173 |
60
- | 1.8149 | 0.6363 | 900 | 1.7074 |
61
- | 1.7966 | 0.7070 | 1000 | 1.7036 |
62
- | 1.8034 | 0.7777 | 1100 | 1.6883 |
63
- | 1.7854 | 0.8484 | 1200 | 1.6740 |
64
- | 1.7779 | 0.9191 | 1300 | 1.6642 |
65
- | 1.7706 | 0.9897 | 1400 | 1.6582 |
66
- | 1.7723 | 1.0604 | 1500 | 1.6475 |
67
- | 1.746 | 1.1311 | 1600 | 1.6463 |
68
- | 1.7386 | 1.2018 | 1700 | 1.6399 |
69
- | 1.7319 | 1.2725 | 1800 | 1.6385 |
70
- | 1.7292 | 1.3432 | 1900 | 1.6230 |
71
- | 1.7121 | 1.4139 | 2000 | 1.6204 |
72
- | 1.7245 | 1.4846 | 2100 | 1.6152 |
73
- | 1.7159 | 1.5553 | 2200 | 1.6103 |
74
- | 1.7232 | 1.6260 | 2300 | 1.6114 |
75
- | 1.6952 | 1.6967 | 2400 | 1.6099 |
76
- | 1.6944 | 1.7674 | 2500 | 1.6012 |
77
- | 1.6991 | 1.8381 | 2600 | 1.5970 |
78
- | 1.6954 | 1.9088 | 2700 | 1.5933 |
79
- | 1.698 | 1.9795 | 2800 | 1.5918 |
80
- | 1.6857 | 2.0502 | 2900 | 1.5915 |
81
- | 1.6783 | 2.1209 | 3000 | 1.5840 |
82
- | 1.679 | 2.1916 | 3100 | 1.5817 |
83
- | 1.6796 | 2.2623 | 3200 | 1.5835 |
84
- | 1.6709 | 2.3330 | 3300 | 1.5769 |
85
- | 1.6626 | 2.4037 | 3400 | 1.5819 |
86
- | 1.6732 | 2.4744 | 3500 | 1.5824 |
87
- | 1.6726 | 2.5458 | 3600 | 1.5720 |
88
- | 1.6822 | 2.6165 | 3700 | 1.5758 |
89
- | 1.6578 | 2.6872 | 3800 | 1.5739 |
90
- | 1.6756 | 2.7579 | 3900 | 1.5743 |
91
- | 1.6747 | 2.8286 | 4000 | 1.5695 |
92
- | 1.659 | 2.8993 | 4100 | 1.5713 |
93
- | 1.6587 | 2.9700 | 4200 | 1.5750 |
94
 
95
 
96
  ### Framework versions
97
 
98
  - Transformers 4.41.2
99
  - Pytorch 2.3.0+cu121
100
- - Datasets 2.20.0
101
  - Tokenizers 0.19.1
 
1
  ---
2
  license: mit
3
+ base_model: PORTULAN/albertina-ptbr-base
4
  tags:
5
  - generated_from_trainer
6
  model-index:
 
13
 
14
  # e3_lr2e-05
15
 
16
+ This model is a fine-tuned version of [PORTULAN/albertina-ptbr-base](https://huggingface.co/PORTULAN/albertina-ptbr-base) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 0.9281
19
 
20
  ## Model description
21
 
 
35
 
36
  The following hyperparameters were used during training:
37
  - learning_rate: 2e-05
38
+ - train_batch_size: 16
39
+ - eval_batch_size: 32
40
  - seed: 42
41
  - gradient_accumulation_steps: 16
42
+ - total_train_batch_size: 256
43
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
  - lr_scheduler_type: linear
45
  - num_epochs: 3
 
49
 
50
  | Training Loss | Epoch | Step | Validation Loss |
51
  |:-------------:|:------:|:----:|:---------------:|
52
+ | 1.4061 | 0.1040 | 100 | 1.1920 |
53
+ | 1.2553 | 0.2080 | 200 | 1.1209 |
54
+ | 1.2102 | 0.3120 | 300 | 1.0971 |
55
+ | 1.1773 | 0.4160 | 400 | 1.0738 |
56
+ | 1.1432 | 0.5200 | 500 | 1.0481 |
57
+ | 1.1302 | 0.6240 | 600 | 1.0320 |
58
+ | 1.1153 | 0.7280 | 700 | 1.0243 |
59
+ | 1.1057 | 0.8320 | 800 | 1.0107 |
60
+ | 1.0976 | 0.9360 | 900 | 1.0002 |
61
+ | 1.0889 | 1.0400 | 1000 | 0.9907 |
62
+ | 1.0797 | 1.1440 | 1100 | 0.9836 |
63
+ | 1.0633 | 1.2480 | 1200 | 0.9788 |
64
+ | 1.0582 | 1.3521 | 1300 | 0.9761 |
65
+ | 1.0578 | 1.4561 | 1400 | 0.9635 |
66
+ | 1.0423 | 1.5601 | 1500 | 0.9601 |
67
+ | 1.0411 | 1.6641 | 1600 | 0.9578 |
68
+ | 1.0406 | 1.7681 | 1700 | 0.9527 |
69
+ | 1.0436 | 1.8721 | 1800 | 0.9520 |
70
+ | 1.0363 | 1.9761 | 1900 | 0.9443 |
71
+ | 1.0274 | 2.0801 | 2000 | 0.9419 |
72
+ | 1.03 | 2.1841 | 2100 | 0.9417 |
73
+ | 1.0232 | 2.2881 | 2200 | 0.9392 |
74
+ | 1.0237 | 2.3921 | 2300 | 0.9374 |
75
+ | 1.0199 | 2.4961 | 2400 | 0.9354 |
76
+ | 1.0095 | 2.6001 | 2500 | 0.9399 |
77
+ | 1.0145 | 2.7041 | 2600 | 0.9343 |
78
+ | 1.0179 | 2.8081 | 2700 | 0.9297 |
79
+ | 1.0148 | 2.9121 | 2800 | 0.9328 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
 
82
  ### Framework versions
83
 
84
  - Transformers 4.41.2
85
  - Pytorch 2.3.0+cu121
86
+ - Datasets 2.19.2
87
  - Tokenizers 0.19.1
config.json CHANGED
@@ -1,32 +1,32 @@
1
  {
2
- "_name_or_path": "neuralmind/bert-base-portuguese-cased",
3
  "architectures": [
4
- "BertForMaskedLM"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
- "classifier_dropout": null,
8
- "directionality": "bidi",
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
  "initializer_range": 0.02,
13
  "intermediate_size": 3072,
14
- "layer_norm_eps": 1e-12,
15
  "max_position_embeddings": 512,
16
- "model_type": "bert",
 
17
  "num_attention_heads": 12,
18
  "num_hidden_layers": 12,
19
- "output_past": true,
20
  "pad_token_id": 0,
21
- "pooler_fc_size": 768,
22
- "pooler_num_attention_heads": 12,
23
- "pooler_num_fc_layers": 3,
24
- "pooler_size_per_head": 128,
25
- "pooler_type": "first_token_transform",
26
- "position_embedding_type": "absolute",
 
 
 
27
  "torch_dtype": "float32",
28
  "transformers_version": "4.41.2",
29
- "type_vocab_size": 2,
30
- "use_cache": true,
31
- "vocab_size": 29794
32
  }
 
1
  {
2
+ "_name_or_path": "PORTULAN/albertina-ptbr-base",
3
  "architectures": [
4
+ "DebertaForMaskedLM"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
 
7
  "hidden_act": "gelu",
8
  "hidden_dropout_prob": 0.1,
9
  "hidden_size": 768,
10
  "initializer_range": 0.02,
11
  "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-07,
13
  "max_position_embeddings": 512,
14
+ "max_relative_positions": -1,
15
+ "model_type": "deberta",
16
  "num_attention_heads": 12,
17
  "num_hidden_layers": 12,
 
18
  "pad_token_id": 0,
19
+ "pooler_dropout": 0,
20
+ "pooler_hidden_act": "gelu",
21
+ "pooler_hidden_size": 768,
22
+ "pos_att_type": [
23
+ "c2p",
24
+ "p2c"
25
+ ],
26
+ "position_biased_input": false,
27
+ "relative_attention": true,
28
  "torch_dtype": "float32",
29
  "transformers_version": "4.41.2",
30
+ "type_vocab_size": 0,
31
+ "vocab_size": 50265
 
32
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17ebf513f7657ce3e8ef9de2b939cba1f3280e14c50800e9ec92f041e0cd51ba
3
- size 435841560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9c940a02ad0318cfbd92e63e03200a28dc5baf81062423a5ef68982600a3845
3
+ size 557000804
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3113e6a7f4491d0a9edd764cc2aacd41d4459f47702bd7038bbb727e5a8cbb7
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6625608206e0211e37aea30e8795c5d9ac17a3078892d73782e5c65f5ae941b0
3
  size 5240