base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T model_type: LlamaForCausalLM tokenizer_type: LlamaTokenizer is_llama_derived_model: true load_in_8bit: true load_in_4bit: false strict: false datasets: - path: jondurbin/airoboros-3.1 type: sharegpt dataset_prepared_path: val_set_size: 0.01 output_dir: ./airoboros-lora-out sequence_len: 2048 sample_packing: true adapter: lora lora_model_dir: lora_r: 128 lora_alpha: 64 lora_dropout: 0.1 lora_target_linear: true lora_fan_in_fan_out: wandb_project: airoboros-tinyllama wandb_entity: wandb_watch: wandb_name: wandb_log_model: gradient_accumulation_steps: 1 micro_batch_size: 8 num_epochs: 1 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.0002 adam_beta1: 0.999 adam_beta2: 0.95 adam_eps: 0.00001 train_on_inputs: false group_by_length: false bf16: true fp16: false tf32: true gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 100 evals_per_epoch: 2 eval_table_size: saves_per_epoch: 2 debug: deepspeed: weight_decay: 0.0 fsdp: fsdp_config: special_tokens: