Divyanshh commited on
Commit
0e654af
1 Parent(s): 0aa5cb8

trained on kaggle

Browse files
Files changed (3) hide show
  1. README.md +2 -3
  2. adapter_config.json +1 -1
  3. adapter_model.safetensors +1 -1
README.md CHANGED
@@ -18,7 +18,6 @@ base_model: bigscience/bloom-560m
18
 
19
 
20
  - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
  - **Shared by [optional]:** [More Information Needed]
23
  - **Model type:** [More Information Needed]
24
  - **Language(s) (NLP):** [More Information Needed]
@@ -77,7 +76,7 @@ Use the code below to get started with the model.
77
 
78
  ### Training Data
79
 
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
 
82
  [More Information Needed]
83
 
@@ -108,7 +107,7 @@ Use the code below to get started with the model.
108
 
109
  #### Testing Data
110
 
111
- <!-- This should link to a Dataset Card if possible. -->
112
 
113
  [More Information Needed]
114
 
 
18
 
19
 
20
  - **Developed by:** [More Information Needed]
 
21
  - **Shared by [optional]:** [More Information Needed]
22
  - **Model type:** [More Information Needed]
23
  - **Language(s) (NLP):** [More Information Needed]
 
76
 
77
  ### Training Data
78
 
79
+ <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
80
 
81
  [More Information Needed]
82
 
 
107
 
108
  #### Testing Data
109
 
110
+ <!-- This should link to a Data Card if possible. -->
111
 
112
  [More Information Needed]
113
 
adapter_config.json CHANGED
@@ -9,7 +9,7 @@
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "lora_alpha": 32,
12
- "lora_dropout": 0.05,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
  "r": 16,
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "lora_alpha": 32,
12
+ "lora_dropout": 0.1,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
  "r": 16,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9db0011b68a12b958a404becad7ca46aed29f5ac6cb40e102ea2ae7805d299d
3
  size 6298560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89ef889ba733a69df985da214315ee18f1ca9893743587534853aaced80b3c5a
3
  size 6298560