SidharthanRajendran commited on
Commit
2c9d5de
1 Parent(s): 086266a

albert-spam-sms-classification-finetuned

Browse files
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: albert-base-v2
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: training_dir
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # training_dir
17
+
18
+ This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.0393
21
+ - Accuracy: 0.9946
22
+ - F1 Score: 0.9946
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 5e-05
42
+ - train_batch_size: 16
43
+ - eval_batch_size: 64
44
+ - seed: 42
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: linear
47
+ - num_epochs: 10
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Score |
52
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:|
53
+ | No log | 1.0 | 244 | 0.1070 | 0.9785 | 0.9785 |
54
+ | No log | 2.0 | 488 | 0.0673 | 0.9880 | 0.9880 |
55
+ | 0.0885 | 3.0 | 732 | 0.0293 | 0.9946 | 0.9946 |
56
+ | 0.0885 | 4.0 | 976 | 0.0280 | 0.9964 | 0.9964 |
57
+ | 0.0306 | 5.0 | 1220 | 0.0355 | 0.9952 | 0.9952 |
58
+ | 0.0306 | 6.0 | 1464 | 0.0364 | 0.9952 | 0.9952 |
59
+ | 0.0087 | 7.0 | 1708 | 0.0448 | 0.9946 | 0.9946 |
60
+ | 0.0087 | 8.0 | 1952 | 0.0618 | 0.9922 | 0.9922 |
61
+ | 0.0047 | 9.0 | 2196 | 0.0420 | 0.9946 | 0.9946 |
62
+ | 0.0047 | 10.0 | 2440 | 0.0393 | 0.9946 | 0.9946 |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - Transformers 4.33.2
68
+ - Pytorch 2.0.1+cu118
69
+ - Datasets 2.14.5
70
+ - Tokenizers 0.13.3
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "albert-base-v2",
3
+ "architectures": [
4
+ "AlbertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "down_scale_factor": 1,
10
+ "embedding_size": 128,
11
+ "eos_token_id": 3,
12
+ "gap_size": 0,
13
+ "hidden_act": "gelu_new",
14
+ "hidden_dropout_prob": 0,
15
+ "hidden_size": 768,
16
+ "initializer_range": 0.02,
17
+ "inner_group_num": 1,
18
+ "intermediate_size": 3072,
19
+ "layer_norm_eps": 1e-12,
20
+ "max_position_embeddings": 512,
21
+ "model_type": "albert",
22
+ "net_structure_type": 0,
23
+ "num_attention_heads": 12,
24
+ "num_hidden_groups": 1,
25
+ "num_hidden_layers": 12,
26
+ "num_memory_blocks": 0,
27
+ "pad_token_id": 0,
28
+ "position_embedding_type": "absolute",
29
+ "problem_type": "single_label_classification",
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.33.2",
32
+ "type_vocab_size": 2,
33
+ "vocab_size": 30000
34
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bb504e39b69e2d854684517dc1e6b273899f7a9fe037f98348f762a8689aad1
3
+ size 46749762
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": {
6
+ "content": "[MASK]",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "[SEP]",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "[CLS]",
5
+ "do_lower_case": true,
6
+ "eos_token": "[SEP]",
7
+ "keep_accents": false,
8
+ "mask_token": {
9
+ "__type": "AddedToken",
10
+ "content": "[MASK]",
11
+ "lstrip": true,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "model_max_length": 512,
17
+ "pad_token": "<pad>",
18
+ "remove_space": true,
19
+ "sep_token": "[SEP]",
20
+ "tokenizer_class": "AlbertTokenizer",
21
+ "unk_token": "<unk>"
22
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bb6dd687a5dd93520de7779f88f8e9a5ed2800a0cc50102c9b702c37840e958
3
+ size 4027