models: - model: allenai/tulu-2-dpo-7b # no parameters necessary for base model - model: tokyotech-llm/Swallow-7b-NVE-instruct-hf # Japanese language skills parameters: density: 1 weight: 0.40 merge_method: dare_ties base_model: allenai/tulu-2-dpo-7b dtype: bfloat16 name: Superswallow-7b-v0.3-NVE --- models: - model: tokyotech-llm/Swallow-7b-instruct-hf # no parameters necessary for base model - model: Superswallow-7b-v0.3-NVE parameters: density: 1 weight: - filter: mlp value: 0.1 - filter: self_attn value: 0.6 - value: 0 # fallback for rest of tensors. merge_method: dare_ties base_model: tokyotech-llm/Swallow-7b-instruct-hf dtype: bfloat16 tokenizer_source: union name: Superswallow-7b-v0.3-base --- models: - model: tokyotech-llm/Swallow-7b-instruct-hf # no parameters necessary for base model - model: Superswallow-7b-v0.3-NVE parameters: density: 1 weight: - filter: mlp value: [0.6, 0.1, 0.6, 0.1, 0.6, 0.1, 0.6, 0.1, 0.1] - filter: self_attn value: [0.6, 0.6, 0.1, 0.6, 0.1, 0.6, 0.1, 0.6, 0.6] - value: 0 # fallback for rest of tensors. merge_method: dare_ties base_model: tokyotech-llm/Swallow-7b-instruct-hf dtype: bfloat16 tokenizer_source: union name: Superswallow-7b-v0.3-flavor --- slices: - sources: - model: Superswallow-7b-v0.3-base layer_range: [0, 32] - model: Superswallow-7b-v0.3-flavor layer_range: [0, 32] merge_method: slerp base_model: Superswallow-7b-v0.3-base parameters: t: # model stabilization - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 # fallback for rest of tensors dtype: bfloat16 name: Superswallow-7b-v0.3