automerger commited on
Commit
76fb978
1 Parent(s): 2df3315

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -6,14 +6,14 @@ tags:
6
  - lazymergekit
7
  - automerger
8
  base_model:
9
- - rwitz/experiment26-truthy-iter-2
10
  - nlpguy/AlloyIngotNeoY
11
  ---
12
 
13
  # Experiment26Alloyingotneoy-7B
14
 
15
  Experiment26Alloyingotneoy-7B is an automated merge created by [Maxime Labonne](https://huggingface.co/mlabonne) using the following configuration.
16
- * [rwitz/experiment26-truthy-iter-2](https://huggingface.co/rwitz/experiment26-truthy-iter-2)
17
  * [nlpguy/AlloyIngotNeoY](https://huggingface.co/nlpguy/AlloyIngotNeoY)
18
 
19
  ## 🧩 Configuration
@@ -21,12 +21,12 @@ Experiment26Alloyingotneoy-7B is an automated merge created by [Maxime Labonne](
21
  ```yaml
22
  slices:
23
  - sources:
24
- - model: rwitz/experiment26-truthy-iter-2
25
  layer_range: [0, 32]
26
  - model: nlpguy/AlloyIngotNeoY
27
  layer_range: [0, 32]
28
  merge_method: slerp
29
- base_model: rwitz/experiment26-truthy-iter-2
30
  parameters:
31
  t:
32
  - filter: self_attn
 
6
  - lazymergekit
7
  - automerger
8
  base_model:
9
+ - yam-peleg/Experiment26-7B
10
  - nlpguy/AlloyIngotNeoY
11
  ---
12
 
13
  # Experiment26Alloyingotneoy-7B
14
 
15
  Experiment26Alloyingotneoy-7B is an automated merge created by [Maxime Labonne](https://huggingface.co/mlabonne) using the following configuration.
16
+ * [yam-peleg/Experiment26-7B](https://huggingface.co/yam-peleg/Experiment26-7B)
17
  * [nlpguy/AlloyIngotNeoY](https://huggingface.co/nlpguy/AlloyIngotNeoY)
18
 
19
  ## 🧩 Configuration
 
21
  ```yaml
22
  slices:
23
  - sources:
24
+ - model: yam-peleg/Experiment26-7B
25
  layer_range: [0, 32]
26
  - model: nlpguy/AlloyIngotNeoY
27
  layer_range: [0, 32]
28
  merge_method: slerp
29
+ base_model: yam-peleg/Experiment26-7B
30
  parameters:
31
  t:
32
  - filter: self_attn
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "rwitz/experiment26-truthy-iter-2",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.38.2",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
1
  {
2
+ "_name_or_path": "yam-peleg/Experiment26-7B",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.39.0",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
mergekit_config.yml CHANGED
@@ -1,12 +1,12 @@
1
 
2
  slices:
3
  - sources:
4
- - model: rwitz/experiment26-truthy-iter-2
5
  layer_range: [0, 32]
6
  - model: nlpguy/AlloyIngotNeoY
7
  layer_range: [0, 32]
8
  merge_method: slerp
9
- base_model: rwitz/experiment26-truthy-iter-2
10
  parameters:
11
  t:
12
  - filter: self_attn
 
1
 
2
  slices:
3
  - sources:
4
+ - model: yam-peleg/Experiment26-7B
5
  layer_range: [0, 32]
6
  - model: nlpguy/AlloyIngotNeoY
7
  layer_range: [0, 32]
8
  merge_method: slerp
9
+ base_model: yam-peleg/Experiment26-7B
10
  parameters:
11
  t:
12
  - filter: self_attn
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:566d13bec08df5cc311985485c2074b32b93fbf596e6dbd0394b345a6490cb25
3
  size 9942981696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31f010f01335cd31e48b795eda4e3e7faeaf5a923249f10d456274da8c9b1ea1
3
  size 9942981696
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be81eca9b00b2f117af9d336e89c114f0e331ed28e513f9b84c97c3d7204137f
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59fd958e8bf73048d6849934d6387f90e4fa21edf2029ea91563b66bdd1e123a
3
  size 4540516344