rickystanley76 commited on
Commit
4df8e1d
1 Parent(s): af4c803

Upload model trained with Unsloth

Browse files

Upload model trained with Unsloth 2x faster

Files changed (3) hide show
  1. README.md +1 -1
  2. adapter_config.json +34 -0
  3. adapter_model.safetensors +3 -0
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
- license: apache-2.0
3
  language:
4
  - en
5
  library_name: transformers
 
6
  ---
7
  The LLaMA-3.1-8B model has been fine-tuned on a limited dataset related to the quota protests in Bangladesh in July 2024. The primary objective of this model is to identify authentic information published by reputable sources such as BBC, DW, and other trustworthy channels. To ensure the model's continued accuracy and relevance, the dataset will be periodically updated, and the model will undergo subsequent fine-tuning. The dataset utilized for this purpose is sourced from rickystanley76/quota-bd-2024.
 
1
  ---
 
2
  language:
3
  - en
4
  library_name: transformers
5
+ license: apache-2.0
6
  ---
7
  The LLaMA-3.1-8B model has been fine-tuned on a limited dataset related to the quota protests in Bangladesh in July 2024. The primary objective of this model is to identify authentic information published by reputable sources such as BBC, DW, and other trustworthy channels. To ensure the model's continued accuracy and relevance, the dataset will be periodically updated, and the model will undergo subsequent fine-tuning. The dataset utilized for this purpose is sourced from rickystanley76/quota-bd-2024.
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/meta-llama-3.1-8b-bnb-4bit",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "v_proj",
24
+ "up_proj",
25
+ "q_proj",
26
+ "gate_proj",
27
+ "down_proj",
28
+ "o_proj",
29
+ "k_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9dcf08041a5127258ee20a33ae5a4c0da1b82f3533b9a9559966990d207a6ab
3
+ size 167832240