| { | |
| "model_name": "NousResearch/Llama-2-7b-chat-hf", | |
| "data_path": "aboonaji/alpaca_micro_demo", | |
| "train_split": "train", | |
| "valid_split": null, | |
| "text_column": "text", | |
| "huggingface_token": null, | |
| "learning_rate": 0.0002, | |
| "num_train_epochs": 1, | |
| "train_batch_size": 2, | |
| "eval_batch_size": 4, | |
| "warmup_ratio": 0.1, | |
| "gradient_accumulation_steps": 1, | |
| "optimizer": "adamw_torch", | |
| "scheduler": "linear", | |
| "weight_decay": 0.0, | |
| "max_grad_norm": 1.0, | |
| "seed": 42, | |
| "add_eos_token": false, | |
| "block_size": -1, | |
| "use_peft": true, | |
| "lora_r": 16, | |
| "lora_alpha": 32, | |
| "lora_dropout": 0.05, | |
| "training_type": "generic", | |
| "train_on_inputs": false, | |
| "logging_steps": -1, | |
| "project_name": "llam", | |
| "evaluation_strategy": "epoch", | |
| "save_total_limit": 1, | |
| "save_strategy": "epoch", | |
| "auto_find_batch_size": false, | |
| "fp16": false, | |
| "push_to_hub": true, | |
| "use_int8": false, | |
| "model_max_length": 1024, | |
| "repo_id": "Abners/Llama-2-7b-chat-hf-finetune_Final", | |
| "use_int4": true, | |
| "trainer": "sft", | |
| "target_modules": null | |
| } |