base_model: /root/autodl-tmp/c4/Meta-Llama-3-8B-Instruct-abliterated-v3 model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer tokenizer_use_fast: false load_in_8bit: false load_in_4bit: false strict: false model_config: datasets: - path: /root/autodl-tmp/c4/11.json type: sharegpt conversation: llama3 roles: input: user output: assistant - path: /root/autodl-tmp/c4/LimaRP-augmented-8k-context.json type: sharegpt conversation: llama3 - path: Sao10K/Short-Storygen-v2 type: # The below are defaults. only set what's needed if you use a different column name. system_prompt: "" system_format: "{system}" field_instruction: prompt field_system: system field_output: response conversation: llama3 - path: /root/autodl-tmp/c4/Claude-3-Opus-Instruct-15K/Claude3-Opus-Multi-Instruct-5K-v1.json type: # The below are defaults. only set what's needed if you use a different column name. system_prompt: "" system_format: "{system}" field_instruction: prompt #field_input: prompt field_output: response conversation: llama3 - path: /root/autodl-tmp/c4/Claude-3-Opus-Instruct-15K/Opus_Instruct-v2-3.5K-Filtered-v2.json type: # The below are defaults. only set what's needed if you use a different column name. system_prompt: "" system_format: "{system}" field_instruction: prompt #field_input: prompt field_output: response conversation: llama3 - path: /root/autodl-tmp/c4/Claude-3-Opus-Instruct-15K/Opus_Instruct-v2-6.5K-Filtered-v2.json type: # The below are defaults. only set what's needed if you use a different column name. system_prompt: "" system_format: "{system}" field_instruction: prompt #field_input: prompt field_output: response conversation: llama3 - path: SicariusSicariiStuff/Bluemoon_Top50MB_Sorted_Fixed type: sharegpt conversation: llama3 - path: /root/autodl-tmp/c4/clean-gpt2.json type: sharegpt conversation: llama3 - path: /root/autodl-tmp/c4/LimaRP-augmented-8k-context.json type: sharegpt conversation: llama3 chat_template: llama3 dataset_prepared_path: /root/autodl-tmp/thingy val_set_size: 0.005 output_dir: ./out sequence_len: 4096 sample_packing: true pad_to_sequence_len: true gradient_accumulation_steps: 4 micro_batch_size: 3 num_epochs: 5 logging_steps: 1 optimizer: adamw_8bit lr_scheduler: cosine learning_rate: 2e-5 wandb_project: llama3-8b-hiwaifu wandb_watch: wandb_run_id: wandb_log_model: train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: false gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true saves_per_epoch: 1 save_total_limit: 2 save_steps: evals_per_epoch: 4 eval_sample_packing: false debug: deepspeed: /root/autodl-tmp/c4/axolotl/deepspeed_configs/zero3_bf16.json weight_decay: 0.05 fsdp: fsdp_config: special_tokens: eos_token: "<|eot_id|>" pad_token: "<|end_of_text|>"