feat(trl): improve eval dataset handling and documentation
#4
by
evalstate
HF Staff
- opened
- .gitattributes +59 -0
- README.md +3 -6
- dataset_inspector.py +0 -416
- trl/SKILL.md +48 -165
- trl/references/gguf_conversion.md +142 -181
- trl/references/reliability_principles.md +0 -371
- trl/references/trackio_guide.md +19 -8
- trl/references/training_methods.md +34 -4
- trl/references/training_patterns.md +1 -1
- trl/references/troubleshooting.md +3 -10
- trl/references/uv_scripts_guide.md +414 -0
- trl/scripts/convert_to_gguf.py +55 -104
- trl/scripts/train_dpo_example.py +2 -2
- trl/scripts/train_grpo_example.py +2 -2
- trl/scripts/train_sft_example.py +2 -2
- trl/scripts/validate_dataset.py +175 -0
.gitattributes
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
# Audio files - uncompressed
|
| 39 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
# Audio files - compressed
|
| 43 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
# Image files - uncompressed
|
| 49 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
# Image files - compressed
|
| 54 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
# Video files - compressed
|
| 58 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,6 +1,3 @@
|
|
| 1 |
-
---
|
| 2 |
-
|
| 3 |
-
---
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
working here `https://github.com/evalstate/skills-dev`
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
---
|
|
|
|
|
|
|
|
|
dataset_inspector.py
DELETED
|
@@ -1,416 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
# /// script
|
| 3 |
-
# dependencies = []
|
| 4 |
-
# ///
|
| 5 |
-
"""
|
| 6 |
-
Dataset Format Inspector for TRL Training (LLM-Optimized Output)
|
| 7 |
-
|
| 8 |
-
Inspects Hugging Face datasets to determine TRL training compatibility.
|
| 9 |
-
Uses Datasets Server API for instant results - no dataset download needed!
|
| 10 |
-
|
| 11 |
-
ULTRA-EFFICIENT: Uses HF Datasets Server API - completes in <2 seconds.
|
| 12 |
-
|
| 13 |
-
Usage with HF Jobs:
|
| 14 |
-
hf_jobs("uv", {
|
| 15 |
-
"script": "https://huggingface.co/datasets/evalstate/trl-helpers/raw/main/dataset_inspector.py",
|
| 16 |
-
"script_args": ["--dataset", "your/dataset", "--split", "train"]
|
| 17 |
-
})
|
| 18 |
-
"""
|
| 19 |
-
|
| 20 |
-
import argparse
|
| 21 |
-
import sys
|
| 22 |
-
import json
|
| 23 |
-
import urllib.request
|
| 24 |
-
import urllib.parse
|
| 25 |
-
from typing import List, Dict, Any
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def parse_args():
|
| 29 |
-
parser = argparse.ArgumentParser(description="Inspect dataset format for TRL training")
|
| 30 |
-
parser.add_argument("--dataset", type=str, required=True, help="Dataset name")
|
| 31 |
-
parser.add_argument("--split", type=str, default="train", help="Dataset split (default: train)")
|
| 32 |
-
parser.add_argument("--config", type=str, default="default", help="Dataset config name (default: default)")
|
| 33 |
-
parser.add_argument("--preview", type=int, default=150, help="Max chars per field preview")
|
| 34 |
-
parser.add_argument("--samples", type=int, default=5, help="Number of samples to fetch (default: 5)")
|
| 35 |
-
parser.add_argument("--json-output", action="store_true", help="Output as JSON")
|
| 36 |
-
return parser.parse_args()
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
def api_request(url: str) -> Dict:
|
| 40 |
-
"""Make API request to Datasets Server"""
|
| 41 |
-
try:
|
| 42 |
-
with urllib.request.urlopen(url, timeout=10) as response:
|
| 43 |
-
return json.loads(response.read().decode())
|
| 44 |
-
except urllib.error.HTTPError as e:
|
| 45 |
-
if e.code == 404:
|
| 46 |
-
return None
|
| 47 |
-
raise Exception(f"API request failed: {e.code} {e.reason}")
|
| 48 |
-
except Exception as e:
|
| 49 |
-
raise Exception(f"API request failed: {str(e)}")
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
def get_splits(dataset: str) -> Dict:
|
| 53 |
-
"""Get available splits for dataset"""
|
| 54 |
-
url = f"https://datasets-server.huggingface.co/splits?dataset={urllib.parse.quote(dataset)}"
|
| 55 |
-
return api_request(url)
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
def get_rows(dataset: str, config: str, split: str, offset: int = 0, length: int = 5) -> Dict:
|
| 59 |
-
"""Get rows from dataset"""
|
| 60 |
-
url = f"https://datasets-server.huggingface.co/rows?dataset={urllib.parse.quote(dataset)}&config={config}&split={split}&offset={offset}&length={length}"
|
| 61 |
-
return api_request(url)
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
def find_columns(columns: List[str], patterns: List[str]) -> List[str]:
|
| 65 |
-
"""Find columns matching patterns"""
|
| 66 |
-
return [c for c in columns if any(p in c.lower() for p in patterns)]
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
def check_sft_compatibility(columns: List[str]) -> Dict[str, Any]:
|
| 70 |
-
"""Check SFT compatibility"""
|
| 71 |
-
has_messages = "messages" in columns
|
| 72 |
-
has_text = "text" in columns
|
| 73 |
-
has_prompt_completion = "prompt" in columns and "completion" in columns
|
| 74 |
-
|
| 75 |
-
ready = has_messages or has_text or has_prompt_completion
|
| 76 |
-
|
| 77 |
-
possible_prompt = find_columns(columns, ["prompt", "instruction", "question", "input"])
|
| 78 |
-
possible_response = find_columns(columns, ["response", "completion", "output", "answer"])
|
| 79 |
-
|
| 80 |
-
return {
|
| 81 |
-
"ready": ready,
|
| 82 |
-
"reason": "messages" if has_messages else "text" if has_text else "prompt+completion" if has_prompt_completion else None,
|
| 83 |
-
"possible_prompt": possible_prompt[0] if possible_prompt else None,
|
| 84 |
-
"possible_response": possible_response[0] if possible_response else None,
|
| 85 |
-
"has_context": "context" in columns,
|
| 86 |
-
}
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
def check_dpo_compatibility(columns: List[str]) -> Dict[str, Any]:
|
| 90 |
-
"""Check DPO compatibility"""
|
| 91 |
-
has_standard = "prompt" in columns and "chosen" in columns and "rejected" in columns
|
| 92 |
-
|
| 93 |
-
possible_prompt = find_columns(columns, ["prompt", "instruction", "question", "input"])
|
| 94 |
-
possible_chosen = find_columns(columns, ["chosen", "preferred", "winner"])
|
| 95 |
-
possible_rejected = find_columns(columns, ["rejected", "dispreferred", "loser"])
|
| 96 |
-
|
| 97 |
-
can_map = bool(possible_prompt and possible_chosen and possible_rejected)
|
| 98 |
-
|
| 99 |
-
return {
|
| 100 |
-
"ready": has_standard,
|
| 101 |
-
"can_map": can_map,
|
| 102 |
-
"prompt_col": possible_prompt[0] if possible_prompt else None,
|
| 103 |
-
"chosen_col": possible_chosen[0] if possible_chosen else None,
|
| 104 |
-
"rejected_col": possible_rejected[0] if possible_rejected else None,
|
| 105 |
-
}
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
def check_grpo_compatibility(columns: List[str]) -> Dict[str, Any]:
|
| 109 |
-
"""Check GRPO compatibility"""
|
| 110 |
-
has_prompt = "prompt" in columns
|
| 111 |
-
has_no_responses = "chosen" not in columns and "rejected" not in columns
|
| 112 |
-
|
| 113 |
-
possible_prompt = find_columns(columns, ["prompt", "instruction", "question", "input"])
|
| 114 |
-
|
| 115 |
-
return {
|
| 116 |
-
"ready": has_prompt and has_no_responses,
|
| 117 |
-
"can_map": bool(possible_prompt) and has_no_responses,
|
| 118 |
-
"prompt_col": possible_prompt[0] if possible_prompt else None,
|
| 119 |
-
}
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
def check_kto_compatibility(columns: List[str]) -> Dict[str, Any]:
|
| 123 |
-
"""Check KTO compatibility"""
|
| 124 |
-
return {"ready": "prompt" in columns and "completion" in columns and "label" in columns}
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
def generate_mapping_code(method: str, info: Dict[str, Any]) -> str:
|
| 128 |
-
"""Generate mapping code for a training method"""
|
| 129 |
-
if method == "SFT":
|
| 130 |
-
if info["ready"]:
|
| 131 |
-
return None
|
| 132 |
-
|
| 133 |
-
prompt_col = info.get("possible_prompt")
|
| 134 |
-
response_col = info.get("possible_response")
|
| 135 |
-
has_context = info.get("has_context", False)
|
| 136 |
-
|
| 137 |
-
if not prompt_col:
|
| 138 |
-
return None
|
| 139 |
-
|
| 140 |
-
if has_context and response_col:
|
| 141 |
-
return f"""def format_for_sft(example):
|
| 142 |
-
text = f"Instruction: {{example['{prompt_col}']}}\\n\\n"
|
| 143 |
-
if example.get('context'):
|
| 144 |
-
text += f"Context: {{example['context']}}\\n\\n"
|
| 145 |
-
text += f"Response: {{example['{response_col}']}}"
|
| 146 |
-
return {{'text': text}}
|
| 147 |
-
|
| 148 |
-
dataset = dataset.map(format_for_sft, remove_columns=dataset.column_names)"""
|
| 149 |
-
elif response_col:
|
| 150 |
-
return f"""def format_for_sft(example):
|
| 151 |
-
return {{'text': f"{{example['{prompt_col}']}}\\n\\n{{example['{response_col}']}}}}
|
| 152 |
-
|
| 153 |
-
dataset = dataset.map(format_for_sft, remove_columns=dataset.column_names)"""
|
| 154 |
-
else:
|
| 155 |
-
return f"""def format_for_sft(example):
|
| 156 |
-
return {{'text': example['{prompt_col}']}}
|
| 157 |
-
|
| 158 |
-
dataset = dataset.map(format_for_sft, remove_columns=dataset.column_names)"""
|
| 159 |
-
|
| 160 |
-
elif method == "DPO":
|
| 161 |
-
if info["ready"] or not info["can_map"]:
|
| 162 |
-
return None
|
| 163 |
-
|
| 164 |
-
return f"""def format_for_dpo(example):
|
| 165 |
-
return {{
|
| 166 |
-
'prompt': example['{info['prompt_col']}'],
|
| 167 |
-
'chosen': example['{info['chosen_col']}'],
|
| 168 |
-
'rejected': example['{info['rejected_col']}'],
|
| 169 |
-
}}
|
| 170 |
-
|
| 171 |
-
dataset = dataset.map(format_for_dpo, remove_columns=dataset.column_names)"""
|
| 172 |
-
|
| 173 |
-
elif method == "GRPO":
|
| 174 |
-
if info["ready"] or not info["can_map"]:
|
| 175 |
-
return None
|
| 176 |
-
|
| 177 |
-
return f"""def format_for_grpo(example):
|
| 178 |
-
return {{'prompt': example['{info['prompt_col']}']}}
|
| 179 |
-
|
| 180 |
-
dataset = dataset.map(format_for_grpo, remove_columns=dataset.column_names)"""
|
| 181 |
-
|
| 182 |
-
return None
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
def format_value_preview(value: Any, max_chars: int) -> str:
|
| 186 |
-
"""Format value for preview"""
|
| 187 |
-
if value is None:
|
| 188 |
-
return "None"
|
| 189 |
-
elif isinstance(value, str):
|
| 190 |
-
return value[:max_chars] + ("..." if len(value) > max_chars else "")
|
| 191 |
-
elif isinstance(value, list):
|
| 192 |
-
if len(value) > 0 and isinstance(value[0], dict):
|
| 193 |
-
return f"[{len(value)} items] Keys: {list(value[0].keys())}"
|
| 194 |
-
preview = str(value)
|
| 195 |
-
return preview[:max_chars] + ("..." if len(preview) > max_chars else "")
|
| 196 |
-
else:
|
| 197 |
-
preview = str(value)
|
| 198 |
-
return preview[:max_chars] + ("..." if len(preview) > max_chars else "")
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
def main():
|
| 202 |
-
args = parse_args()
|
| 203 |
-
|
| 204 |
-
print(f"Fetching dataset info via Datasets Server API...")
|
| 205 |
-
|
| 206 |
-
try:
|
| 207 |
-
# Get splits info
|
| 208 |
-
splits_data = get_splits(args.dataset)
|
| 209 |
-
if not splits_data or "splits" not in splits_data:
|
| 210 |
-
print(f"ERROR: Could not fetch splits for dataset '{args.dataset}'")
|
| 211 |
-
print(f" Dataset may not exist or is not accessible via Datasets Server API")
|
| 212 |
-
sys.exit(1)
|
| 213 |
-
|
| 214 |
-
# Find the right config
|
| 215 |
-
available_configs = set()
|
| 216 |
-
split_found = False
|
| 217 |
-
config_to_use = args.config
|
| 218 |
-
|
| 219 |
-
for split_info in splits_data["splits"]:
|
| 220 |
-
available_configs.add(split_info["config"])
|
| 221 |
-
if split_info["config"] == args.config and split_info["split"] == args.split:
|
| 222 |
-
split_found = True
|
| 223 |
-
|
| 224 |
-
# If default config not found, try first available
|
| 225 |
-
if not split_found and available_configs:
|
| 226 |
-
config_to_use = list(available_configs)[0]
|
| 227 |
-
print(f"Config '{args.config}' not found, trying '{config_to_use}'...")
|
| 228 |
-
|
| 229 |
-
# Get rows
|
| 230 |
-
rows_data = get_rows(args.dataset, config_to_use, args.split, offset=0, length=args.samples)
|
| 231 |
-
|
| 232 |
-
if not rows_data or "rows" not in rows_data:
|
| 233 |
-
print(f"ERROR: Could not fetch rows for dataset '{args.dataset}'")
|
| 234 |
-
print(f" Split '{args.split}' may not exist")
|
| 235 |
-
print(f" Available configs: {', '.join(sorted(available_configs))}")
|
| 236 |
-
sys.exit(1)
|
| 237 |
-
|
| 238 |
-
rows = rows_data["rows"]
|
| 239 |
-
if not rows:
|
| 240 |
-
print(f"ERROR: No rows found in split '{args.split}'")
|
| 241 |
-
sys.exit(1)
|
| 242 |
-
|
| 243 |
-
# Extract column info from first row
|
| 244 |
-
first_row = rows[0]["row"]
|
| 245 |
-
columns = list(first_row.keys())
|
| 246 |
-
features = rows_data.get("features", [])
|
| 247 |
-
|
| 248 |
-
# Get total count if available
|
| 249 |
-
total_examples = "Unknown"
|
| 250 |
-
for split_info in splits_data["splits"]:
|
| 251 |
-
if split_info["config"] == config_to_use and split_info["split"] == args.split:
|
| 252 |
-
total_examples = f"{split_info.get('num_examples', 'Unknown'):,}" if isinstance(split_info.get('num_examples'), int) else "Unknown"
|
| 253 |
-
break
|
| 254 |
-
|
| 255 |
-
except Exception as e:
|
| 256 |
-
print(f"ERROR: {str(e)}")
|
| 257 |
-
sys.exit(1)
|
| 258 |
-
|
| 259 |
-
# Run compatibility checks
|
| 260 |
-
sft_info = check_sft_compatibility(columns)
|
| 261 |
-
dpo_info = check_dpo_compatibility(columns)
|
| 262 |
-
grpo_info = check_grpo_compatibility(columns)
|
| 263 |
-
kto_info = check_kto_compatibility(columns)
|
| 264 |
-
|
| 265 |
-
# Determine recommended methods
|
| 266 |
-
recommended = []
|
| 267 |
-
if sft_info["ready"]:
|
| 268 |
-
recommended.append("SFT")
|
| 269 |
-
elif sft_info["possible_prompt"]:
|
| 270 |
-
recommended.append("SFT (needs mapping)")
|
| 271 |
-
|
| 272 |
-
if dpo_info["ready"]:
|
| 273 |
-
recommended.append("DPO")
|
| 274 |
-
elif dpo_info["can_map"]:
|
| 275 |
-
recommended.append("DPO (needs mapping)")
|
| 276 |
-
|
| 277 |
-
if grpo_info["ready"]:
|
| 278 |
-
recommended.append("GRPO")
|
| 279 |
-
elif grpo_info["can_map"]:
|
| 280 |
-
recommended.append("GRPO (needs mapping)")
|
| 281 |
-
|
| 282 |
-
if kto_info["ready"]:
|
| 283 |
-
recommended.append("KTO")
|
| 284 |
-
|
| 285 |
-
# JSON output mode
|
| 286 |
-
if args.json_output:
|
| 287 |
-
result = {
|
| 288 |
-
"dataset": args.dataset,
|
| 289 |
-
"config": config_to_use,
|
| 290 |
-
"split": args.split,
|
| 291 |
-
"total_examples": total_examples,
|
| 292 |
-
"columns": columns,
|
| 293 |
-
"features": [{"name": f["name"], "type": f["type"]} for f in features] if features else [],
|
| 294 |
-
"compatibility": {
|
| 295 |
-
"SFT": sft_info,
|
| 296 |
-
"DPO": dpo_info,
|
| 297 |
-
"GRPO": grpo_info,
|
| 298 |
-
"KTO": kto_info,
|
| 299 |
-
},
|
| 300 |
-
"recommended_methods": recommended,
|
| 301 |
-
}
|
| 302 |
-
print(json.dumps(result, indent=2))
|
| 303 |
-
sys.exit(0)
|
| 304 |
-
|
| 305 |
-
# Human-readable output optimized for LLM parsing
|
| 306 |
-
print("=" * 80)
|
| 307 |
-
print(f"DATASET INSPECTION RESULTS")
|
| 308 |
-
print("=" * 80)
|
| 309 |
-
|
| 310 |
-
print(f"\nDataset: {args.dataset}")
|
| 311 |
-
print(f"Config: {config_to_use}")
|
| 312 |
-
print(f"Split: {args.split}")
|
| 313 |
-
print(f"Total examples: {total_examples}")
|
| 314 |
-
print(f"Samples fetched: {len(rows)}")
|
| 315 |
-
|
| 316 |
-
print(f"\n{'COLUMNS':-<80}")
|
| 317 |
-
if features:
|
| 318 |
-
for feature in features:
|
| 319 |
-
print(f" {feature['name']}: {feature['type']}")
|
| 320 |
-
else:
|
| 321 |
-
for col in columns:
|
| 322 |
-
print(f" {col}: (type info not available)")
|
| 323 |
-
|
| 324 |
-
print(f"\n{'EXAMPLE DATA':-<80}")
|
| 325 |
-
example = first_row
|
| 326 |
-
for col in columns:
|
| 327 |
-
value = example.get(col)
|
| 328 |
-
display = format_value_preview(value, args.preview)
|
| 329 |
-
print(f"\n{col}:")
|
| 330 |
-
print(f" {display}")
|
| 331 |
-
|
| 332 |
-
print(f"\n{'TRAINING METHOD COMPATIBILITY':-<80}")
|
| 333 |
-
|
| 334 |
-
# SFT
|
| 335 |
-
print(f"\n[SFT] {'β READY' if sft_info['ready'] else 'β NEEDS MAPPING'}")
|
| 336 |
-
if sft_info["ready"]:
|
| 337 |
-
print(f" Reason: Dataset has '{sft_info['reason']}' field")
|
| 338 |
-
print(f" Action: Use directly with SFTTrainer")
|
| 339 |
-
elif sft_info["possible_prompt"]:
|
| 340 |
-
print(f" Detected: prompt='{sft_info['possible_prompt']}' response='{sft_info['possible_response']}'")
|
| 341 |
-
print(f" Action: Apply mapping code (see below)")
|
| 342 |
-
else:
|
| 343 |
-
print(f" Status: Cannot determine mapping - manual inspection needed")
|
| 344 |
-
|
| 345 |
-
# DPO
|
| 346 |
-
print(f"\n[DPO] {'β READY' if dpo_info['ready'] else 'β NEEDS MAPPING' if dpo_info['can_map'] else 'β INCOMPATIBLE'}")
|
| 347 |
-
if dpo_info["ready"]:
|
| 348 |
-
print(f" Reason: Dataset has 'prompt', 'chosen', 'rejected' fields")
|
| 349 |
-
print(f" Action: Use directly with DPOTrainer")
|
| 350 |
-
elif dpo_info["can_map"]:
|
| 351 |
-
print(f" Detected: prompt='{dpo_info['prompt_col']}' chosen='{dpo_info['chosen_col']}' rejected='{dpo_info['rejected_col']}'")
|
| 352 |
-
print(f" Action: Apply mapping code (see below)")
|
| 353 |
-
else:
|
| 354 |
-
print(f" Status: Missing required fields (prompt + chosen + rejected)")
|
| 355 |
-
|
| 356 |
-
# GRPO
|
| 357 |
-
print(f"\n[GRPO] {'β READY' if grpo_info['ready'] else 'β NEEDS MAPPING' if grpo_info['can_map'] else 'β INCOMPATIBLE'}")
|
| 358 |
-
if grpo_info["ready"]:
|
| 359 |
-
print(f" Reason: Dataset has 'prompt' field")
|
| 360 |
-
print(f" Action: Use directly with GRPOTrainer")
|
| 361 |
-
elif grpo_info["can_map"]:
|
| 362 |
-
print(f" Detected: prompt='{grpo_info['prompt_col']}'")
|
| 363 |
-
print(f" Action: Apply mapping code (see below)")
|
| 364 |
-
else:
|
| 365 |
-
print(f" Status: Missing prompt field")
|
| 366 |
-
|
| 367 |
-
# KTO
|
| 368 |
-
print(f"\n[KTO] {'β READY' if kto_info['ready'] else 'β INCOMPATIBLE'}")
|
| 369 |
-
if kto_info["ready"]:
|
| 370 |
-
print(f" Reason: Dataset has 'prompt', 'completion', 'label' fields")
|
| 371 |
-
print(f" Action: Use directly with KTOTrainer")
|
| 372 |
-
else:
|
| 373 |
-
print(f" Status: Missing required fields (prompt + completion + label)")
|
| 374 |
-
|
| 375 |
-
# Mapping code
|
| 376 |
-
print(f"\n{'MAPPING CODE (if needed)':-<80}")
|
| 377 |
-
|
| 378 |
-
mapping_needed = False
|
| 379 |
-
|
| 380 |
-
sft_mapping = generate_mapping_code("SFT", sft_info)
|
| 381 |
-
if sft_mapping:
|
| 382 |
-
print(f"\n# For SFT Training:")
|
| 383 |
-
print(sft_mapping)
|
| 384 |
-
mapping_needed = True
|
| 385 |
-
|
| 386 |
-
dpo_mapping = generate_mapping_code("DPO", dpo_info)
|
| 387 |
-
if dpo_mapping:
|
| 388 |
-
print(f"\n# For DPO Training:")
|
| 389 |
-
print(dpo_mapping)
|
| 390 |
-
mapping_needed = True
|
| 391 |
-
|
| 392 |
-
grpo_mapping = generate_mapping_code("GRPO", grpo_info)
|
| 393 |
-
if grpo_mapping:
|
| 394 |
-
print(f"\n# For GRPO Training:")
|
| 395 |
-
print(grpo_mapping)
|
| 396 |
-
mapping_needed = True
|
| 397 |
-
|
| 398 |
-
if not mapping_needed:
|
| 399 |
-
print("\nNo mapping needed - dataset is ready for training!")
|
| 400 |
-
|
| 401 |
-
print(f"\n{'SUMMARY':-<80}")
|
| 402 |
-
print(f"Recommended training methods: {', '.join(recommended) if recommended else 'None (dataset needs formatting)'}")
|
| 403 |
-
print(f"\nNote: Used Datasets Server API (instant, no download required)")
|
| 404 |
-
|
| 405 |
-
print("\n" + "=" * 80)
|
| 406 |
-
sys.exit(0)
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
if __name__ == "__main__":
|
| 410 |
-
try:
|
| 411 |
-
main()
|
| 412 |
-
except KeyboardInterrupt:
|
| 413 |
-
sys.exit(0)
|
| 414 |
-
except Exception as e:
|
| 415 |
-
print(f"ERROR: {e}", file=sys.stderr)
|
| 416 |
-
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
trl/SKILL.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
---
|
| 2 |
name: trl
|
| 3 |
-
description: This skill should be used when users want to train or fine-tune language models using TRL (Transformer Reinforcement Learning) on Hugging Face Jobs infrastructure. Covers SFT, DPO, GRPO
|
| 4 |
license: Complete terms in LICENSE.txt
|
| 5 |
---
|
| 6 |
|
|
@@ -14,7 +14,9 @@ Train language models using TRL (Transformer Reinforcement Learning) on fully ma
|
|
| 14 |
- **SFT** (Supervised Fine-Tuning) - Standard instruction tuning
|
| 15 |
- **DPO** (Direct Preference Optimization) - Alignment from preference data
|
| 16 |
- **GRPO** (Group Relative Policy Optimization) - Online RL training
|
|
|
|
| 17 |
- **Reward Modeling** - Train reward models for RLHF
|
|
|
|
| 18 |
|
| 19 |
**For detailed TRL method documentation:**
|
| 20 |
```python
|
|
@@ -30,7 +32,7 @@ hf_doc_fetch("https://huggingface.co/docs/trl/dpo_trainer") # DPO
|
|
| 30 |
|
| 31 |
Use this skill when users want to:
|
| 32 |
- Fine-tune language models on cloud GPUs without local infrastructure
|
| 33 |
-
- Train with TRL methods (SFT, DPO, GRPO, etc.)
|
| 34 |
- Run training jobs on Hugging Face Jobs infrastructure
|
| 35 |
- Convert trained models to GGUF for local deployment (Ollama, LM Studio, llama.cpp)
|
| 36 |
- Ensure trained models are permanently saved to the Hub
|
|
@@ -40,7 +42,7 @@ Use this skill when users want to:
|
|
| 40 |
|
| 41 |
When assisting with training jobs:
|
| 42 |
|
| 43 |
-
1. **
|
| 44 |
|
| 45 |
2. **Always include Trackio** - Every training script should include Trackio for real-time monitoring. Use example scripts in `scripts/` as templates.
|
| 46 |
|
|
@@ -50,7 +52,7 @@ When assisting with training jobs:
|
|
| 50 |
|
| 51 |
## Local Script Dependencies
|
| 52 |
|
| 53 |
-
To run scripts locally (like `estimate_cost.py`), install dependencies:
|
| 54 |
```bash
|
| 55 |
pip install -r requirements.txt
|
| 56 |
```
|
|
@@ -61,14 +63,14 @@ Before starting any training job, verify:
|
|
| 61 |
|
| 62 |
### β
**Account & Authentication**
|
| 63 |
- Hugging Face Account with [Pro](https://hf.co/pro), [Team](https://hf.co/enterprise), or [Enterprise](https://hf.co/enterprise) plan (Jobs require paid plan)
|
| 64 |
-
- Authenticated login: Check with `
|
| 65 |
- **HF_TOKEN for Hub Push** β οΈ CRITICAL - Training environment is ephemeral, must push to Hub or ALL training results are lost
|
| 66 |
- Token must have write permissions and is automatically available as `$HF_TOKEN` in job secrets
|
| 67 |
|
| 68 |
### β
**Dataset Requirements**
|
| 69 |
- Dataset must exist on Hub or be loadable via `datasets.load_dataset()`
|
| 70 |
- Format must match training method (SFT: "messages"/text/prompt-completion; DPO: chosen/rejected; GRPO: prompt-only)
|
| 71 |
-
-
|
| 72 |
- Size appropriate for hardware (Demo: 50-100 examples on t4-small; Production: 1K-10K+ on a10g-large/a100-large)
|
| 73 |
|
| 74 |
### β οΈ **Critical Settings**
|
|
@@ -116,9 +118,27 @@ The job is running in the background. Ask me to check status/logs when ready!
|
|
| 116 |
|
| 117 |
## Quick Start: Three Approaches
|
| 118 |
|
| 119 |
-
### Approach 1:
|
| 120 |
|
| 121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
```python
|
| 124 |
hf_jobs("uv", {
|
|
@@ -165,56 +185,17 @@ trackio.finish()
|
|
| 165 |
})
|
| 166 |
```
|
| 167 |
|
| 168 |
-
**Benefits:**
|
| 169 |
-
**When to use:**
|
| 170 |
-
|
| 171 |
-
#### Working with Scripts
|
| 172 |
-
|
| 173 |
-
β οΈ **Important:** The `script` parameter accepts either inline code (as shown above) OR a URL. **Local file paths do NOT work.**
|
| 174 |
-
|
| 175 |
-
**Why local paths don't work:**
|
| 176 |
-
Jobs run in isolated Docker containers without access to your local filesystem. Scripts must be:
|
| 177 |
-
- Inline code (recommended for custom training)
|
| 178 |
-
- Publicly accessible URLs
|
| 179 |
-
- Private repo URLs (with HF_TOKEN)
|
| 180 |
-
|
| 181 |
-
**Common mistakes:**
|
| 182 |
-
```python
|
| 183 |
-
# β These will all fail
|
| 184 |
-
hf_jobs("uv", {"script": "train.py"})
|
| 185 |
-
hf_jobs("uv", {"script": "./scripts/train.py"})
|
| 186 |
-
hf_jobs("uv", {"script": "/path/to/train.py"})
|
| 187 |
-
```
|
| 188 |
-
|
| 189 |
-
**Correct approaches:**
|
| 190 |
-
```python
|
| 191 |
-
# β
Inline code (recommended)
|
| 192 |
-
hf_jobs("uv", {"script": "# /// script\n# dependencies = [...]\n# ///\n\n<your code>"})
|
| 193 |
-
|
| 194 |
-
# β
From Hugging Face Hub
|
| 195 |
-
hf_jobs("uv", {"script": "https://huggingface.co/user/repo/resolve/main/train.py"})
|
| 196 |
-
|
| 197 |
-
# β
From GitHub
|
| 198 |
-
hf_jobs("uv", {"script": "https://raw.githubusercontent.com/user/repo/main/train.py"})
|
| 199 |
-
|
| 200 |
-
# β
From Gist
|
| 201 |
-
hf_jobs("uv", {"script": "https://gist.githubusercontent.com/user/id/raw/train.py"})
|
| 202 |
-
```
|
| 203 |
-
|
| 204 |
-
**To use local scripts:** Upload to HF Hub first:
|
| 205 |
-
```bash
|
| 206 |
-
huggingface-cli repo create my-training-scripts --type model
|
| 207 |
-
huggingface-cli upload my-training-scripts ./train.py train.py
|
| 208 |
-
# Use: https://huggingface.co/USERNAME/my-training-scripts/resolve/main/train.py
|
| 209 |
-
```
|
| 210 |
|
| 211 |
-
### Approach
|
| 212 |
|
| 213 |
TRL provides battle-tested scripts for all methods. Can be run from URLs:
|
| 214 |
|
| 215 |
```python
|
| 216 |
hf_jobs("uv", {
|
| 217 |
-
"script": "https://
|
| 218 |
"script_args": [
|
| 219 |
"--model_name_or_path", "Qwen/Qwen2.5-0.5B",
|
| 220 |
"--dataset_name", "trl-lib/Capybara",
|
|
@@ -230,7 +211,7 @@ hf_jobs("uv", {
|
|
| 230 |
|
| 231 |
**Benefits:** No code to write, maintained by TRL team, production-tested
|
| 232 |
**When to use:** Standard TRL training, quick experiments, don't need custom code
|
| 233 |
-
**Available:**
|
| 234 |
|
| 235 |
### Finding More UV Scripts on Hub
|
| 236 |
|
|
@@ -246,26 +227,6 @@ hub_repo_details(["uv-scripts/classification"], repo_type="dataset", include_rea
|
|
| 246 |
|
| 247 |
**Popular collections:** ocr, classification, synthetic-data, vllm, dataset-creation
|
| 248 |
|
| 249 |
-
### Approach 3: TRL Jobs Package (For Terminal Use)
|
| 250 |
-
|
| 251 |
-
The `trl-jobs` package provides optimized defaults and one-liner training. **Note: This approach uses bash commands, not `hf_jobs()` MCP tool.**
|
| 252 |
-
|
| 253 |
-
```bash
|
| 254 |
-
# Install (users only, not needed for this environment)
|
| 255 |
-
pip install trl-jobs
|
| 256 |
-
|
| 257 |
-
# Train with SFT (simplest possible)
|
| 258 |
-
trl-jobs sft \
|
| 259 |
-
--model_name Qwen/Qwen2.5-0.5B \
|
| 260 |
-
--dataset_name trl-lib/Capybara
|
| 261 |
-
```
|
| 262 |
-
|
| 263 |
-
**Benefits:** Pre-configured settings, automatic Trackio integration, automatic Hub push, one-line commands
|
| 264 |
-
**When to use:** User working in terminal directly (not Claude Code context), quick local experimentation
|
| 265 |
-
**Repository:** https://github.com/huggingface/trl-jobs
|
| 266 |
-
|
| 267 |
-
β οΈ **In Claude Code context, use Approach 1 (UV Scripts) with `hf_jobs()` instead.**
|
| 268 |
-
|
| 269 |
## Hardware Selection
|
| 270 |
|
| 271 |
| Model Size | Recommended Hardware | Cost (approx/hr) | Use Case |
|
|
@@ -398,85 +359,6 @@ hf_jobs("logs", {"job_id": "your-job-id"})
|
|
| 398 |
|
| 399 |
**Remember:** Wait for user to request status checks. Avoid polling repeatedly.
|
| 400 |
|
| 401 |
-
## Dataset Validation
|
| 402 |
-
|
| 403 |
-
**Validate dataset format BEFORE launching GPU training to prevent the #1 cause of training failures: format mismatches.**
|
| 404 |
-
|
| 405 |
-
### Why Validate
|
| 406 |
-
|
| 407 |
-
- 50%+ of training failures are due to dataset format issues
|
| 408 |
-
- DPO especially strict: requires exact column names (`prompt`, `chosen`, `rejected`)
|
| 409 |
-
- Failed GPU jobs waste $1-10 and 30-60 minutes
|
| 410 |
-
- Validation on CPU costs ~$0.01 and takes <1 minute
|
| 411 |
-
|
| 412 |
-
### When to Validate
|
| 413 |
-
|
| 414 |
-
**ALWAYS validate for:**
|
| 415 |
-
- Unknown or custom datasets
|
| 416 |
-
- DPO training (CRITICAL - 90% of datasets need mapping)
|
| 417 |
-
- Any dataset not explicitly TRL-compatible
|
| 418 |
-
|
| 419 |
-
**Skip validation for known TRL datasets:**
|
| 420 |
-
- `trl-lib/ultrachat_200k`, `trl-lib/Capybara`, `HuggingFaceH4/ultrachat_200k`, etc.
|
| 421 |
-
|
| 422 |
-
### Usage
|
| 423 |
-
|
| 424 |
-
```python
|
| 425 |
-
hf_jobs("uv", {
|
| 426 |
-
"script": "https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspector.py",
|
| 427 |
-
"script_args": ["--dataset", "username/dataset-name", "--split", "train"]
|
| 428 |
-
})
|
| 429 |
-
```
|
| 430 |
-
|
| 431 |
-
The script is fast, and will usually complete synchronously.
|
| 432 |
-
|
| 433 |
-
### Reading Results
|
| 434 |
-
|
| 435 |
-
The output shows compatibility for each training method:
|
| 436 |
-
|
| 437 |
-
- **`β READY`** - Dataset is compatible, use directly
|
| 438 |
-
- **`β NEEDS MAPPING`** - Compatible but needs preprocessing (mapping code provided)
|
| 439 |
-
- **`β INCOMPATIBLE`** - Cannot be used for this method
|
| 440 |
-
|
| 441 |
-
When mapping is needed, the output includes a **"MAPPING CODE"** section with copy-paste ready Python code.
|
| 442 |
-
|
| 443 |
-
### Example Workflow
|
| 444 |
-
|
| 445 |
-
```python
|
| 446 |
-
# 1. Inspect dataset (costs ~$0.01, <1 min on CPU)
|
| 447 |
-
hf_jobs("uv", {
|
| 448 |
-
"script": "https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspector.py",
|
| 449 |
-
"script_args": ["--dataset", "argilla/distilabel-math-preference-dpo", "--split", "train"]
|
| 450 |
-
})
|
| 451 |
-
|
| 452 |
-
# 2. Check output markers:
|
| 453 |
-
# β READY β proceed with training
|
| 454 |
-
# β NEEDS MAPPING β apply mapping code below
|
| 455 |
-
# β INCOMPATIBLE β choose different method/dataset
|
| 456 |
-
|
| 457 |
-
# 3. If mapping needed, apply before training:
|
| 458 |
-
def format_for_dpo(example):
|
| 459 |
-
return {
|
| 460 |
-
'prompt': example['instruction'],
|
| 461 |
-
'chosen': example['chosen_response'],
|
| 462 |
-
'rejected': example['rejected_response'],
|
| 463 |
-
}
|
| 464 |
-
dataset = dataset.map(format_for_dpo, remove_columns=dataset.column_names)
|
| 465 |
-
|
| 466 |
-
# 4. Launch training job with confidence
|
| 467 |
-
```
|
| 468 |
-
|
| 469 |
-
### Common Scenario: DPO Format Mismatch
|
| 470 |
-
|
| 471 |
-
Most DPO datasets use non-standard column names. Example:
|
| 472 |
-
|
| 473 |
-
```
|
| 474 |
-
Dataset has: instruction, chosen_response, rejected_response
|
| 475 |
-
DPO expects: prompt, chosen, rejected
|
| 476 |
-
```
|
| 477 |
-
|
| 478 |
-
The validator detects this and provides exact mapping code to fix it.
|
| 479 |
-
|
| 480 |
## Converting Models to GGUF
|
| 481 |
|
| 482 |
After training, convert models to **GGUF format** for use with llama.cpp, Ollama, LM Studio, and other local inference tools.
|
|
@@ -531,13 +413,15 @@ See `references/training_patterns.md` for detailed examples including:
|
|
| 531 |
### Dataset Misformatted
|
| 532 |
|
| 533 |
**Fix:**
|
| 534 |
-
1. Validate first
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 538 |
```
|
| 539 |
-
2. Check output for compatibility markers (β READY, β NEEDS MAPPING, β INCOMPATIBLE)
|
| 540 |
-
3. Apply mapping code from inspector output if needed
|
| 541 |
|
| 542 |
### Job Timeout
|
| 543 |
|
|
@@ -573,7 +457,7 @@ Add to PEP 723 header:
|
|
| 573 |
- Job times out β Increase timeout, reduce epochs/dataset, use smaller model/LoRA
|
| 574 |
- Model not saved to Hub β Check push_to_hub=True, hub_model_id, secrets=HF_TOKEN
|
| 575 |
- Out of Memory (OOM) β Reduce batch size, increase gradient accumulation, enable LoRA, use larger GPU
|
| 576 |
-
- Dataset format error β
|
| 577 |
- Import/module errors β Add PEP 723 header with dependencies, verify format
|
| 578 |
- Authentication errors β Check `mcp__huggingface__hf_whoami()`, token permissions, secrets parameter
|
| 579 |
|
|
@@ -586,6 +470,7 @@ Add to PEP 723 header:
|
|
| 586 |
- `references/training_patterns.md` - Common training patterns and examples
|
| 587 |
- `references/gguf_conversion.md` - Complete GGUF conversion guide
|
| 588 |
- `references/trackio_guide.md` - Trackio monitoring setup
|
|
|
|
| 589 |
- `references/hardware_guide.md` - Hardware specs and selection
|
| 590 |
- `references/hub_saving.md` - Hub authentication troubleshooting
|
| 591 |
- `references/troubleshooting.md` - Common issues and solutions
|
|
@@ -594,12 +479,10 @@ Add to PEP 723 header:
|
|
| 594 |
- `scripts/train_sft_example.py` - Production SFT template
|
| 595 |
- `scripts/train_dpo_example.py` - Production DPO template
|
| 596 |
- `scripts/train_grpo_example.py` - Production GRPO template
|
|
|
|
| 597 |
- `scripts/estimate_cost.py` - Estimate time and cost (offer when appropriate)
|
| 598 |
- `scripts/convert_to_gguf.py` - Complete GGUF conversion script
|
| 599 |
|
| 600 |
-
### External Scripts
|
| 601 |
-
- [Dataset Inspector](https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspector.py) - Validate dataset format before training (use via `uv run` or `hf_jobs`)
|
| 602 |
-
|
| 603 |
### External Links
|
| 604 |
- [TRL Documentation](https://huggingface.co/docs/trl)
|
| 605 |
- [TRL Jobs Training Guide](https://huggingface.co/docs/trl/en/jobs_training)
|
|
@@ -617,7 +500,7 @@ Add to PEP 723 header:
|
|
| 617 |
4. **Always enable Hub push** - Environment is ephemeral; without push, all results lost
|
| 618 |
5. **Include Trackio** - Use example scripts as templates for real-time monitoring
|
| 619 |
6. **Offer cost estimation** - When parameters are known, use `scripts/estimate_cost.py`
|
| 620 |
-
7. **
|
| 621 |
-
8. **Use
|
| 622 |
-
9. **Validate dataset format** before training with
|
| 623 |
10. **Choose appropriate hardware** for model size; use LoRA for models >7B
|
|
|
|
| 1 |
---
|
| 2 |
name: trl
|
| 3 |
+
description: This skill should be used when users want to train or fine-tune language models using TRL (Transformer Reinforcement Learning) on Hugging Face Jobs infrastructure. Covers SFT, DPO, GRPO, KTO, reward modeling, and PPO training methods, plus GGUF conversion for local deployment. Includes guidance on the TRL Jobs package, UV scripts with PEP 723 format, dataset preparation and validation, hardware selection, cost estimation, Trackio monitoring, Hub authentication, and model persistence. Should be invoked for tasks involving cloud GPU training, GGUF conversion, or when users mention training on Hugging Face Jobs without local GPU setup.
|
| 4 |
license: Complete terms in LICENSE.txt
|
| 5 |
---
|
| 6 |
|
|
|
|
| 14 |
- **SFT** (Supervised Fine-Tuning) - Standard instruction tuning
|
| 15 |
- **DPO** (Direct Preference Optimization) - Alignment from preference data
|
| 16 |
- **GRPO** (Group Relative Policy Optimization) - Online RL training
|
| 17 |
+
- **KTO** (Kahneman-Tversky Optimization) - Preference tuning without paired data
|
| 18 |
- **Reward Modeling** - Train reward models for RLHF
|
| 19 |
+
- **PPO** (Proximal Policy Optimization) - Classic RLHF method
|
| 20 |
|
| 21 |
**For detailed TRL method documentation:**
|
| 22 |
```python
|
|
|
|
| 32 |
|
| 33 |
Use this skill when users want to:
|
| 34 |
- Fine-tune language models on cloud GPUs without local infrastructure
|
| 35 |
+
- Train with TRL methods (SFT, DPO, GRPO, KTO, etc.)
|
| 36 |
- Run training jobs on Hugging Face Jobs infrastructure
|
| 37 |
- Convert trained models to GGUF for local deployment (Ollama, LM Studio, llama.cpp)
|
| 38 |
- Ensure trained models are permanently saved to the Hub
|
|
|
|
| 42 |
|
| 43 |
When assisting with training jobs:
|
| 44 |
|
| 45 |
+
1. **Submit jobs directly with inline scripts** - The `script` parameter accepts Python code directly. Do NOT save to local files unless the user explicitly requests it. Pass the script content as a string to `hf_jobs()`. If user asks to "train a model", "fine-tune", or similar requests, you MUST create the training script AND submit the job immediately.
|
| 46 |
|
| 47 |
2. **Always include Trackio** - Every training script should include Trackio for real-time monitoring. Use example scripts in `scripts/` as templates.
|
| 48 |
|
|
|
|
| 52 |
|
| 53 |
## Local Script Dependencies
|
| 54 |
|
| 55 |
+
To run scripts locally (like `validate_dataset.py`, `estimate_cost.py`), install dependencies:
|
| 56 |
```bash
|
| 57 |
pip install -r requirements.txt
|
| 58 |
```
|
|
|
|
| 63 |
|
| 64 |
### β
**Account & Authentication**
|
| 65 |
- Hugging Face Account with [Pro](https://hf.co/pro), [Team](https://hf.co/enterprise), or [Enterprise](https://hf.co/enterprise) plan (Jobs require paid plan)
|
| 66 |
+
- Authenticated login: Check with `mcp__huggingface__hf_whoami()`
|
| 67 |
- **HF_TOKEN for Hub Push** β οΈ CRITICAL - Training environment is ephemeral, must push to Hub or ALL training results are lost
|
| 68 |
- Token must have write permissions and is automatically available as `$HF_TOKEN` in job secrets
|
| 69 |
|
| 70 |
### β
**Dataset Requirements**
|
| 71 |
- Dataset must exist on Hub or be loadable via `datasets.load_dataset()`
|
| 72 |
- Format must match training method (SFT: "messages"/text/prompt-completion; DPO: chosen/rejected; GRPO: prompt-only)
|
| 73 |
+
- Use `scripts/validate_dataset.py` to verify format or `hf_doc_fetch("https://huggingface.co/docs/trl/dataset_formats")` for complete reference
|
| 74 |
- Size appropriate for hardware (Demo: 50-100 examples on t4-small; Production: 1K-10K+ on a10g-large/a100-large)
|
| 75 |
|
| 76 |
### β οΈ **Critical Settings**
|
|
|
|
| 118 |
|
| 119 |
## Quick Start: Three Approaches
|
| 120 |
|
| 121 |
+
### Approach 1: TRL Jobs Package (EasiestβRecommended for Beginners)
|
| 122 |
|
| 123 |
+
The `trl-jobs` package provides optimized defaults and one-liner training:
|
| 124 |
+
|
| 125 |
+
```bash
|
| 126 |
+
# Install (users only, not needed for this environment)
|
| 127 |
+
pip install trl-jobs
|
| 128 |
+
|
| 129 |
+
# Train with SFT (simplest possible)
|
| 130 |
+
trl-jobs sft \
|
| 131 |
+
--model_name Qwen/Qwen2.5-0.5B \
|
| 132 |
+
--dataset_name trl-lib/Capybara
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
**Benefits:** Pre-configured settings, automatic Trackio integration, automatic Hub push, one-line commands
|
| 136 |
+
**When to use:** User is new to training, standard scenarios, quick experimentation
|
| 137 |
+
**Repository:** https://github.com/huggingface/trl-jobs
|
| 138 |
+
|
| 139 |
+
### Approach 2: UV Scripts (Recommended for Custom Training)
|
| 140 |
+
|
| 141 |
+
UV scripts use PEP 723 inline dependencies for clean, self-contained training. **Submit script content directly inline:**
|
| 142 |
|
| 143 |
```python
|
| 144 |
hf_jobs("uv", {
|
|
|
|
| 185 |
})
|
| 186 |
```
|
| 187 |
|
| 188 |
+
**Benefits:** Clean code, dependencies declared inline (PEP 723), no file saving required
|
| 189 |
+
**When to use:** Custom training logic, full control over training
|
| 190 |
+
**See:** `references/uv_scripts_guide.md` for complete UV scripts guide
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
|
| 192 |
+
### Approach 3: TRL Maintained Scripts (Run Official Examples)
|
| 193 |
|
| 194 |
TRL provides battle-tested scripts for all methods. Can be run from URLs:
|
| 195 |
|
| 196 |
```python
|
| 197 |
hf_jobs("uv", {
|
| 198 |
+
"script": "https://raw.githubusercontent.com/huggingface/trl/main/examples/scripts/sft.py",
|
| 199 |
"script_args": [
|
| 200 |
"--model_name_or_path", "Qwen/Qwen2.5-0.5B",
|
| 201 |
"--dataset_name", "trl-lib/Capybara",
|
|
|
|
| 211 |
|
| 212 |
**Benefits:** No code to write, maintained by TRL team, production-tested
|
| 213 |
**When to use:** Standard TRL training, quick experiments, don't need custom code
|
| 214 |
+
**Available:** sft.py, dpo.py, grpo.py, kto.py, reward.py, ppo.py - https://github.com/huggingface/trl/tree/main/examples/scripts
|
| 215 |
|
| 216 |
### Finding More UV Scripts on Hub
|
| 217 |
|
|
|
|
| 227 |
|
| 228 |
**Popular collections:** ocr, classification, synthetic-data, vllm, dataset-creation
|
| 229 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
## Hardware Selection
|
| 231 |
|
| 232 |
| Model Size | Recommended Hardware | Cost (approx/hr) | Use Case |
|
|
|
|
| 359 |
|
| 360 |
**Remember:** Wait for user to request status checks. Avoid polling repeatedly.
|
| 361 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 362 |
## Converting Models to GGUF
|
| 363 |
|
| 364 |
After training, convert models to **GGUF format** for use with llama.cpp, Ollama, LM Studio, and other local inference tools.
|
|
|
|
| 413 |
### Dataset Misformatted
|
| 414 |
|
| 415 |
**Fix:**
|
| 416 |
+
1. Validate first: `python scripts/validate_dataset.py --dataset name --method sft`
|
| 417 |
+
2. Check required columns:
|
| 418 |
+
- SFT: `messages` OR `text` OR `prompt`+`completion`
|
| 419 |
+
- DPO: `prompt`, `chosen`, `rejected`
|
| 420 |
+
- GRPO: `prompt` only
|
| 421 |
+
3. Apply formatting if needed:
|
| 422 |
+
```python
|
| 423 |
+
dataset = dataset.map(lambda x: {"text": f"User: {x['input']}\nBot: {x['output']}"})
|
| 424 |
```
|
|
|
|
|
|
|
| 425 |
|
| 426 |
### Job Timeout
|
| 427 |
|
|
|
|
| 457 |
- Job times out β Increase timeout, reduce epochs/dataset, use smaller model/LoRA
|
| 458 |
- Model not saved to Hub β Check push_to_hub=True, hub_model_id, secrets=HF_TOKEN
|
| 459 |
- Out of Memory (OOM) β Reduce batch size, increase gradient accumulation, enable LoRA, use larger GPU
|
| 460 |
+
- Dataset format error β Check format docs, validate dataset with `scripts/validate_dataset.py`
|
| 461 |
- Import/module errors β Add PEP 723 header with dependencies, verify format
|
| 462 |
- Authentication errors β Check `mcp__huggingface__hf_whoami()`, token permissions, secrets parameter
|
| 463 |
|
|
|
|
| 470 |
- `references/training_patterns.md` - Common training patterns and examples
|
| 471 |
- `references/gguf_conversion.md` - Complete GGUF conversion guide
|
| 472 |
- `references/trackio_guide.md` - Trackio monitoring setup
|
| 473 |
+
- `references/uv_scripts_guide.md` - Complete UV scripts guide
|
| 474 |
- `references/hardware_guide.md` - Hardware specs and selection
|
| 475 |
- `references/hub_saving.md` - Hub authentication troubleshooting
|
| 476 |
- `references/troubleshooting.md` - Common issues and solutions
|
|
|
|
| 479 |
- `scripts/train_sft_example.py` - Production SFT template
|
| 480 |
- `scripts/train_dpo_example.py` - Production DPO template
|
| 481 |
- `scripts/train_grpo_example.py` - Production GRPO template
|
| 482 |
+
- `scripts/validate_dataset.py` - Validate dataset format before training
|
| 483 |
- `scripts/estimate_cost.py` - Estimate time and cost (offer when appropriate)
|
| 484 |
- `scripts/convert_to_gguf.py` - Complete GGUF conversion script
|
| 485 |
|
|
|
|
|
|
|
|
|
|
| 486 |
### External Links
|
| 487 |
- [TRL Documentation](https://huggingface.co/docs/trl)
|
| 488 |
- [TRL Jobs Training Guide](https://huggingface.co/docs/trl/en/jobs_training)
|
|
|
|
| 500 |
4. **Always enable Hub push** - Environment is ephemeral; without push, all results lost
|
| 501 |
5. **Include Trackio** - Use example scripts as templates for real-time monitoring
|
| 502 |
6. **Offer cost estimation** - When parameters are known, use `scripts/estimate_cost.py`
|
| 503 |
+
7. **Three approaches available:** TRL Jobs package (easiest), UV scripts (custom, modern), TRL maintained scripts (official examples)
|
| 504 |
+
8. **Use doc-fetch/doc-search** for latest TRL documentation
|
| 505 |
+
9. **Validate dataset format** before training with `scripts/validate_dataset.py`
|
| 506 |
10. **Choose appropriate hardware** for model size; use LoRA for models >7B
|
trl/references/gguf_conversion.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
|
| 3 |
After training models with TRL on Hugging Face Jobs, convert them to **GGUF format** for use with llama.cpp, Ollama, LM Studio, and other local inference tools.
|
| 4 |
|
| 5 |
-
**This guide provides production-ready, tested code
|
| 6 |
|
| 7 |
## What is GGUF?
|
| 8 |
|
|
@@ -21,119 +21,147 @@ After training models with TRL on Hugging Face Jobs, convert them to **GGUF form
|
|
| 21 |
- Deploying to edge devices
|
| 22 |
- Sharing models for local-first use
|
| 23 |
|
| 24 |
-
##
|
| 25 |
-
|
| 26 |
-
Based on production testing, these are **essential** for reliable conversion:
|
| 27 |
-
|
| 28 |
-
### 1. β
Install Build Tools FIRST
|
| 29 |
-
**Before cloning llama.cpp**, install build dependencies:
|
| 30 |
-
```python
|
| 31 |
-
subprocess.run(["apt-get", "update", "-qq"], check=True, capture_output=True)
|
| 32 |
-
subprocess.run(["apt-get", "install", "-y", "-qq", "build-essential", "cmake"], check=True, capture_output=True)
|
| 33 |
-
```
|
| 34 |
|
| 35 |
-
**
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
**Build the quantize tool with CMake:**
|
| 39 |
-
```python
|
| 40 |
-
# Create build directory
|
| 41 |
-
os.makedirs("/tmp/llama.cpp/build", exist_ok=True)
|
| 42 |
|
| 43 |
-
|
| 44 |
-
subprocess.run([
|
| 45 |
-
"cmake", "-B", "/tmp/llama.cpp/build", "-S", "/tmp/llama.cpp",
|
| 46 |
-
"-DGGML_CUDA=OFF" # Faster build, CUDA not needed for quantization
|
| 47 |
-
], check=True, capture_output=True, text=True)
|
| 48 |
|
| 49 |
-
|
| 50 |
-
subprocess.run([
|
| 51 |
-
"cmake", "--build", "/tmp/llama.cpp/build",
|
| 52 |
-
"--target", "llama-quantize", "-j", "4"
|
| 53 |
-
], check=True, capture_output=True, text=True)
|
| 54 |
|
| 55 |
-
# Binary path
|
| 56 |
-
quantize_bin = "/tmp/llama.cpp/build/bin/llama-quantize"
|
| 57 |
-
```
|
| 58 |
-
|
| 59 |
-
**Why:** CMake is more reliable than `make` and produces consistent binary paths.
|
| 60 |
-
|
| 61 |
-
### 3. β
Include All Dependencies
|
| 62 |
-
**PEP 723 header must include:**
|
| 63 |
```python
|
|
|
|
|
|
|
| 64 |
# /// script
|
| 65 |
# dependencies = [
|
| 66 |
# "transformers>=4.36.0",
|
| 67 |
# "peft>=0.7.0",
|
| 68 |
# "torch>=2.0.0",
|
| 69 |
-
# "accelerate>=0.24.0",
|
| 70 |
# "huggingface_hub>=0.20.0",
|
| 71 |
-
# "sentencepiece>=0.1.99",
|
| 72 |
-
# "protobuf>=3.20.0",
|
| 73 |
# "numpy",
|
| 74 |
# "gguf",
|
| 75 |
# ]
|
| 76 |
# ///
|
| 77 |
-
```
|
| 78 |
-
|
| 79 |
-
**Why:** `sentencepiece` and `protobuf` are critical for tokenizer conversion. Missing them causes silent failures.
|
| 80 |
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
- β
Comprehensive error handling
|
| 100 |
-
- β
Environment variable configuration
|
| 101 |
-
- β
Automatic README generation
|
| 102 |
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
"flavor": "a10g-large",
|
| 114 |
"timeout": "45m",
|
| 115 |
"secrets": {"HF_TOKEN": "$HF_TOKEN"},
|
| 116 |
"env": {
|
| 117 |
"ADAPTER_MODEL": "username/my-finetuned-model",
|
| 118 |
"BASE_MODEL": "Qwen/Qwen2.5-0.5B",
|
| 119 |
-
"OUTPUT_REPO": "username/my-model-gguf"
|
| 120 |
-
"HF_USERNAME": "username" # Optional, for README
|
| 121 |
}
|
| 122 |
})
|
| 123 |
```
|
| 124 |
|
| 125 |
-
## Conversion Process
|
| 126 |
-
|
| 127 |
-
The script performs these steps:
|
| 128 |
-
|
| 129 |
-
1. **Load and Merge** - Load base model and LoRA adapter, merge them
|
| 130 |
-
2. **Install Build Tools** - Install gcc, cmake (CRITICAL: before cloning llama.cpp)
|
| 131 |
-
3. **Setup llama.cpp** - Clone repo, install Python dependencies
|
| 132 |
-
4. **Convert to GGUF** - Create FP16 GGUF using llama.cpp converter
|
| 133 |
-
5. **Build Quantize Tool** - Use CMake to build `llama-quantize`
|
| 134 |
-
6. **Quantize** - Create Q4_K_M, Q5_K_M, Q8_0 versions
|
| 135 |
-
7. **Upload** - Upload all versions + README to Hub
|
| 136 |
-
|
| 137 |
## Quantization Options
|
| 138 |
|
| 139 |
Common quantization formats (from smallest to largest):
|
|
@@ -163,7 +191,7 @@ Common quantization formats (from smallest to largest):
|
|
| 163 |
|
| 164 |
**GGUF models work on both CPU and GPU.** They're optimized for CPU inference but can also leverage GPU acceleration when available.
|
| 165 |
|
| 166 |
-
|
| 167 |
```bash
|
| 168 |
# Download GGUF
|
| 169 |
huggingface-cli download username/my-model-gguf model-q4_k_m.gguf
|
|
@@ -176,7 +204,7 @@ ollama create my-model -f Modelfile
|
|
| 176 |
ollama run my-model
|
| 177 |
```
|
| 178 |
|
| 179 |
-
|
| 180 |
```bash
|
| 181 |
# CPU only
|
| 182 |
./llama-cli -m model-q4_k_m.gguf -p "Your prompt"
|
|
@@ -185,112 +213,45 @@ ollama run my-model
|
|
| 185 |
./llama-cli -m model-q4_k_m.gguf -ngl 32 -p "Your prompt"
|
| 186 |
```
|
| 187 |
|
| 188 |
-
|
| 189 |
1. Download the `.gguf` file
|
| 190 |
2. Import into LM Studio
|
| 191 |
3. Start chatting
|
| 192 |
|
| 193 |
## Best Practices
|
| 194 |
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
5. **Create multiple quantizations** - Give users choice
|
| 201 |
-
6. **Test on known models** before production use
|
| 202 |
-
7. **Use A10G GPU** for faster conversion
|
| 203 |
-
|
| 204 |
-
### β DON'T:
|
| 205 |
-
1. **Assume repos exist** - Always verify with hub tools
|
| 206 |
-
2. **Use make** instead of CMake - Less reliable
|
| 207 |
-
3. **Remove dependencies** to "simplify" - They're all needed
|
| 208 |
-
4. **Skip build tools** - Quantization will fail silently
|
| 209 |
-
5. **Use default paths** - CMake puts binaries in build/bin/
|
| 210 |
|
| 211 |
## Common Issues
|
| 212 |
|
| 213 |
-
|
| 214 |
-
**Fix:**
|
| 215 |
- Use larger GPU (a10g-large or a100-large)
|
| 216 |
-
-
|
| 217 |
-
- Use `dtype=torch.float16` or `torch.bfloat16`
|
| 218 |
|
| 219 |
-
|
| 220 |
-
**Fix:**
|
| 221 |
- Ensure llama.cpp supports the model architecture
|
| 222 |
-
- Check
|
| 223 |
-
-
|
| 224 |
-
- Check llama.cpp
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
-
|
| 229 |
-
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
-
|
| 242 |
-
- Check network/Hub status
|
| 243 |
-
|
| 244 |
-
## Lessons Learned
|
| 245 |
-
|
| 246 |
-
These are from production testing and real failures:
|
| 247 |
-
|
| 248 |
-
### 1. Always Verify Before Use
|
| 249 |
-
**Lesson:** Don't assume repos/datasets exist. Check first.
|
| 250 |
-
```python
|
| 251 |
-
# BEFORE submitting job
|
| 252 |
-
hub_repo_details(["trl-lib/argilla-dpo-mix-7k"], repo_type="dataset") # Would catch error
|
| 253 |
-
```
|
| 254 |
-
**Prevented failures:** Non-existent dataset names, typos in model names
|
| 255 |
-
|
| 256 |
-
### 2. Prioritize Reliability Over Performance
|
| 257 |
-
**Lesson:** Default to what's most likely to succeed.
|
| 258 |
-
- Use CMake (not make) - more reliable
|
| 259 |
-
- Disable CUDA in build - faster, not needed
|
| 260 |
-
- Include all dependencies - don't "simplify"
|
| 261 |
-
|
| 262 |
-
**Prevented failures:** Build failures, missing binaries
|
| 263 |
-
|
| 264 |
-
### 3. Create Atomic, Self-Contained Scripts
|
| 265 |
-
**Lesson:** Don't remove dependencies or steps. Scripts should work as a unit.
|
| 266 |
-
- All dependencies in PEP 723 header
|
| 267 |
-
- All build steps included
|
| 268 |
-
- Clear error messages
|
| 269 |
-
|
| 270 |
-
**Prevented failures:** Missing tokenizer libraries, build tool failures
|
| 271 |
-
|
| 272 |
-
## References
|
| 273 |
-
|
| 274 |
-
**In this skill:**
|
| 275 |
-
- `scripts/convert_to_gguf.py` - Complete, production-ready script
|
| 276 |
-
|
| 277 |
-
**External:**
|
| 278 |
-
- [llama.cpp Repository](https://github.com/ggerganov/llama.cpp)
|
| 279 |
-
- [GGUF Specification](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md)
|
| 280 |
-
- [Ollama Documentation](https://ollama.ai)
|
| 281 |
-
- [LM Studio](https://lmstudio.ai)
|
| 282 |
-
|
| 283 |
-
## Summary
|
| 284 |
-
|
| 285 |
-
**Critical checklist for GGUF conversion:**
|
| 286 |
-
- [ ] Verify adapter and base models exist on Hub
|
| 287 |
-
- [ ] Use production script from `scripts/convert_to_gguf.py`
|
| 288 |
-
- [ ] All dependencies in PEP 723 header (including sentencepiece, protobuf)
|
| 289 |
-
- [ ] Build tools installed before cloning llama.cpp
|
| 290 |
-
- [ ] CMake used for building quantize tool (not make)
|
| 291 |
-
- [ ] Correct binary path: `/tmp/llama.cpp/build/bin/llama-quantize`
|
| 292 |
-
- [ ] A10G GPU selected for reasonable conversion time
|
| 293 |
-
- [ ] Timeout set to 45m minimum
|
| 294 |
-
- [ ] HF_TOKEN in secrets for Hub upload
|
| 295 |
-
|
| 296 |
-
**The script in `scripts/convert_to_gguf.py` incorporates all these lessons and has been tested successfully in production.**
|
|
|
|
| 2 |
|
| 3 |
After training models with TRL on Hugging Face Jobs, convert them to **GGUF format** for use with llama.cpp, Ollama, LM Studio, and other local inference tools.
|
| 4 |
|
| 5 |
+
**This guide provides production-ready, tested code.** All required dependencies are included in the examples below. No additional troubleshooting should be needed when following the templates exactly.
|
| 6 |
|
| 7 |
## What is GGUF?
|
| 8 |
|
|
|
|
| 21 |
- Deploying to edge devices
|
| 22 |
- Sharing models for local-first use
|
| 23 |
|
| 24 |
+
## Conversion Process
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
**The conversion requires:**
|
| 27 |
+
1. **Merge LoRA adapter** with base model (if using PEFT)
|
| 28 |
+
2. **Convert to GGUF** format using llama.cpp
|
| 29 |
+
3. **Quantize** to different bit depths (optional but recommended)
|
| 30 |
+
4. **Upload** GGUF files to Hub
|
| 31 |
|
| 32 |
+
## GGUF Conversion Script Template
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
+
See `scripts/convert_to_gguf.py` for a complete, production-ready conversion script.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
**Quick conversion job:**
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
```python
|
| 39 |
+
hf_jobs("uv", {
|
| 40 |
+
"script": """
|
| 41 |
# /// script
|
| 42 |
# dependencies = [
|
| 43 |
# "transformers>=4.36.0",
|
| 44 |
# "peft>=0.7.0",
|
| 45 |
# "torch>=2.0.0",
|
|
|
|
| 46 |
# "huggingface_hub>=0.20.0",
|
| 47 |
+
# "sentencepiece>=0.1.99",
|
| 48 |
+
# "protobuf>=3.20.0",
|
| 49 |
# "numpy",
|
| 50 |
# "gguf",
|
| 51 |
# ]
|
| 52 |
# ///
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
+
import os
|
| 55 |
+
import torch
|
| 56 |
+
import subprocess
|
| 57 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 58 |
+
from peft import PeftModel
|
| 59 |
+
from huggingface_hub import HfApi
|
| 60 |
+
|
| 61 |
+
# Configuration from environment
|
| 62 |
+
ADAPTER_MODEL = os.environ.get("ADAPTER_MODEL", "username/my-model")
|
| 63 |
+
BASE_MODEL = os.environ.get("BASE_MODEL", "Qwen/Qwen2.5-0.5B")
|
| 64 |
+
OUTPUT_REPO = os.environ.get("OUTPUT_REPO", "username/my-model-gguf")
|
| 65 |
+
|
| 66 |
+
print("π Converting to GGUF...")
|
| 67 |
+
|
| 68 |
+
# Step 1: Load and merge
|
| 69 |
+
print("Loading base model...")
|
| 70 |
+
base = AutoModelForCausalLM.from_pretrained(
|
| 71 |
+
BASE_MODEL,
|
| 72 |
+
dtype=torch.float16,
|
| 73 |
+
device_map="auto",
|
| 74 |
+
trust_remote_code=True
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
print("Loading adapter...")
|
| 78 |
+
model = PeftModel.from_pretrained(base, ADAPTER_MODEL)
|
| 79 |
+
|
| 80 |
+
print("Merging...")
|
| 81 |
+
merged = model.merge_and_unload()
|
| 82 |
+
|
| 83 |
+
# Save merged model
|
| 84 |
+
merged_dir = "/tmp/merged"
|
| 85 |
+
merged.save_pretrained(merged_dir, safe_serialization=True)
|
| 86 |
+
tokenizer = AutoTokenizer.from_pretrained(ADAPTER_MODEL)
|
| 87 |
+
tokenizer.save_pretrained(merged_dir)
|
| 88 |
+
|
| 89 |
+
# Step 2: Install build tools and clone llama.cpp
|
| 90 |
+
print("Setting up llama.cpp...")
|
| 91 |
+
subprocess.run(["apt-get", "update", "-qq"], check=True, capture_output=True)
|
| 92 |
+
subprocess.run(["apt-get", "install", "-y", "-qq", "build-essential", "cmake"], check=True, capture_output=True)
|
| 93 |
|
| 94 |
+
subprocess.run([
|
| 95 |
+
"git", "clone",
|
| 96 |
+
"https://github.com/ggerganov/llama.cpp.git",
|
| 97 |
+
"/tmp/llama.cpp"
|
| 98 |
+
], check=True)
|
| 99 |
|
| 100 |
+
subprocess.run([
|
| 101 |
+
"pip", "install", "-r",
|
| 102 |
+
"/tmp/llama.cpp/requirements.txt"
|
| 103 |
+
], check=True)
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
+
# Convert to GGUF
|
| 106 |
+
print("Converting to GGUF...")
|
| 107 |
+
subprocess.run([
|
| 108 |
+
"python", "/tmp/llama.cpp/convert_hf_to_gguf.py",
|
| 109 |
+
merged_dir,
|
| 110 |
+
"--outfile", "/tmp/model-f16.gguf",
|
| 111 |
+
"--outtype", "f16"
|
| 112 |
+
], check=True)
|
| 113 |
+
|
| 114 |
+
# Step 3: Build quantization tool with CMake
|
| 115 |
+
print("Building quantization tool...")
|
| 116 |
+
os.makedirs("/tmp/llama.cpp/build", exist_ok=True)
|
| 117 |
|
| 118 |
+
subprocess.run([
|
| 119 |
+
"cmake", "-B", "/tmp/llama.cpp/build", "-S", "/tmp/llama.cpp",
|
| 120 |
+
"-DGGML_CUDA=OFF"
|
| 121 |
+
], check=True)
|
| 122 |
|
| 123 |
+
subprocess.run([
|
| 124 |
+
"cmake", "--build", "/tmp/llama.cpp/build",
|
| 125 |
+
"--target", "llama-quantize", "-j", "4"
|
| 126 |
+
], check=True)
|
| 127 |
+
|
| 128 |
+
quantize = "/tmp/llama.cpp/build/bin/llama-quantize"
|
| 129 |
+
quants = ["Q4_K_M", "Q5_K_M", "Q8_0"]
|
| 130 |
+
|
| 131 |
+
for q in quants:
|
| 132 |
+
print(f"Creating {q} quantization...")
|
| 133 |
+
subprocess.run([
|
| 134 |
+
quantize,
|
| 135 |
+
"/tmp/model-f16.gguf",
|
| 136 |
+
f"/tmp/model-{q.lower()}.gguf",
|
| 137 |
+
q
|
| 138 |
+
], check=True)
|
| 139 |
+
|
| 140 |
+
# Step 4: Upload
|
| 141 |
+
print("Uploading to Hub...")
|
| 142 |
+
api = HfApi()
|
| 143 |
+
api.create_repo(OUTPUT_REPO, repo_type="model", exist_ok=True)
|
| 144 |
+
|
| 145 |
+
for q in ["f16"] + [q.lower() for q in quants]:
|
| 146 |
+
api.upload_file(
|
| 147 |
+
path_or_fileobj=f"/tmp/model-{q}.gguf",
|
| 148 |
+
path_in_repo=f"model-{q}.gguf",
|
| 149 |
+
repo_id=OUTPUT_REPO
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
print(f"β
Done! Models at: https://huggingface.co/{OUTPUT_REPO}")
|
| 153 |
+
""",
|
| 154 |
"flavor": "a10g-large",
|
| 155 |
"timeout": "45m",
|
| 156 |
"secrets": {"HF_TOKEN": "$HF_TOKEN"},
|
| 157 |
"env": {
|
| 158 |
"ADAPTER_MODEL": "username/my-finetuned-model",
|
| 159 |
"BASE_MODEL": "Qwen/Qwen2.5-0.5B",
|
| 160 |
+
"OUTPUT_REPO": "username/my-model-gguf"
|
|
|
|
| 161 |
}
|
| 162 |
})
|
| 163 |
```
|
| 164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
## Quantization Options
|
| 166 |
|
| 167 |
Common quantization formats (from smallest to largest):
|
|
|
|
| 191 |
|
| 192 |
**GGUF models work on both CPU and GPU.** They're optimized for CPU inference but can also leverage GPU acceleration when available.
|
| 193 |
|
| 194 |
+
**With Ollama (auto-detects GPU):**
|
| 195 |
```bash
|
| 196 |
# Download GGUF
|
| 197 |
huggingface-cli download username/my-model-gguf model-q4_k_m.gguf
|
|
|
|
| 204 |
ollama run my-model
|
| 205 |
```
|
| 206 |
|
| 207 |
+
**With llama.cpp:**
|
| 208 |
```bash
|
| 209 |
# CPU only
|
| 210 |
./llama-cli -m model-q4_k_m.gguf -p "Your prompt"
|
|
|
|
| 213 |
./llama-cli -m model-q4_k_m.gguf -ngl 32 -p "Your prompt"
|
| 214 |
```
|
| 215 |
|
| 216 |
+
**With LM Studio:**
|
| 217 |
1. Download the `.gguf` file
|
| 218 |
2. Import into LM Studio
|
| 219 |
3. Start chatting
|
| 220 |
|
| 221 |
## Best Practices
|
| 222 |
|
| 223 |
+
1. **Always create multiple quantizations** - Give users choice of size/quality
|
| 224 |
+
2. **Include README** - Document which quantization to use for what purpose
|
| 225 |
+
3. **Test the GGUF** - Run a quick inference test before uploading
|
| 226 |
+
4. **Use A10G GPU** - Much faster than CPU for loading/merging large models
|
| 227 |
+
5. **Clean up temp files** - Conversion creates large intermediate files
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
|
| 229 |
## Common Issues
|
| 230 |
|
| 231 |
+
**Out of memory during merge:**
|
|
|
|
| 232 |
- Use larger GPU (a10g-large or a100-large)
|
| 233 |
+
- Load with `device_map="auto"` for automatic device placement
|
| 234 |
+
- Use `dtype=torch.float16` or `torch.bfloat16` instead of float32
|
| 235 |
|
| 236 |
+
**Conversion fails with architecture error:**
|
|
|
|
| 237 |
- Ensure llama.cpp supports the model architecture
|
| 238 |
+
- Check that model uses standard architecture (Qwen, Llama, Mistral, etc.)
|
| 239 |
+
- Some newer models require latest llama.cpp from main branch
|
| 240 |
+
- Check llama.cpp issues/docs for model support
|
| 241 |
+
|
| 242 |
+
**GGUF file doesn't work with llama.cpp:**
|
| 243 |
+
- Verify llama.cpp version compatibility
|
| 244 |
+
- Download latest llama.cpp: `git clone https://github.com/ggerganov/llama.cpp.git`
|
| 245 |
+
- Rebuild llama.cpp after updating: `make clean && make`
|
| 246 |
+
|
| 247 |
+
**Quantization fails:**
|
| 248 |
+
- Ensure the `llama-quantize` tool was built: `make llama-quantize`
|
| 249 |
+
- Check that FP16 GGUF was created successfully before quantizing
|
| 250 |
+
- Some quantization types require specific llama.cpp versions
|
| 251 |
+
|
| 252 |
+
**Upload fails or times out:**
|
| 253 |
+
- Large models (>2GB) may need longer timeout
|
| 254 |
+
- Use `api.upload_file()` with `commit_message` for better tracking
|
| 255 |
+
- Consider uploading quantized versions separately
|
| 256 |
+
|
| 257 |
+
**See:** `scripts/convert_to_gguf.py` for complete, production-ready conversion script with all dependencies included.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
trl/references/reliability_principles.md
DELETED
|
@@ -1,371 +0,0 @@
|
|
| 1 |
-
# Reliability Principles for Training Jobs
|
| 2 |
-
|
| 3 |
-
These principles are derived from real production failures and successful fixes. Following them prevents common failure modes and ensures reliable job execution.
|
| 4 |
-
|
| 5 |
-
## Principle 1: Always Verify Before Use
|
| 6 |
-
|
| 7 |
-
**Rule:** Never assume repos, datasets, or resources exist. Verify with tools first.
|
| 8 |
-
|
| 9 |
-
### What It Prevents
|
| 10 |
-
|
| 11 |
-
- **Non-existent datasets** - Jobs fail immediately when dataset doesn't exist
|
| 12 |
-
- **Typos in names** - Simple mistakes like "argilla-dpo-mix-7k" vs "ultrafeedback_binarized"
|
| 13 |
-
- **Incorrect paths** - Old or moved repos, renamed files
|
| 14 |
-
- **Missing dependencies** - Undocumented requirements
|
| 15 |
-
|
| 16 |
-
### How to Apply
|
| 17 |
-
|
| 18 |
-
**Before submitting ANY job:**
|
| 19 |
-
|
| 20 |
-
```python
|
| 21 |
-
# Verify dataset exists
|
| 22 |
-
dataset_search({"query": "dataset-name", "author": "author-name", "limit": 5})
|
| 23 |
-
hub_repo_details(["author/dataset-name"], repo_type="dataset")
|
| 24 |
-
|
| 25 |
-
# Verify model exists
|
| 26 |
-
hub_repo_details(["org/model-name"], repo_type="model")
|
| 27 |
-
|
| 28 |
-
# Check script/file paths (for URL-based scripts)
|
| 29 |
-
# Verify before using: https://github.com/user/repo/blob/main/script.py
|
| 30 |
-
```
|
| 31 |
-
|
| 32 |
-
**Examples that would have caught errors:**
|
| 33 |
-
|
| 34 |
-
```python
|
| 35 |
-
# β WRONG: Assumed dataset exists
|
| 36 |
-
hf_jobs("uv", {
|
| 37 |
-
"script": """...""",
|
| 38 |
-
"env": {"DATASET": "trl-lib/argilla-dpo-mix-7k"} # Doesn't exist!
|
| 39 |
-
})
|
| 40 |
-
|
| 41 |
-
# β
CORRECT: Verify first
|
| 42 |
-
dataset_search({"query": "argilla dpo", "author": "trl-lib"})
|
| 43 |
-
# Would show: "trl-lib/ultrafeedback_binarized" is the correct name
|
| 44 |
-
|
| 45 |
-
hub_repo_details(["trl-lib/ultrafeedback_binarized"], repo_type="dataset")
|
| 46 |
-
# Confirms it exists before using
|
| 47 |
-
```
|
| 48 |
-
|
| 49 |
-
### Implementation Checklist
|
| 50 |
-
|
| 51 |
-
- [ ] Check dataset exists before training
|
| 52 |
-
- [ ] Verify base model exists before fine-tuning
|
| 53 |
-
- [ ] Confirm adapter model exists before GGUF conversion
|
| 54 |
-
- [ ] Test script URLs are valid before submitting
|
| 55 |
-
- [ ] Validate file paths in repositories
|
| 56 |
-
- [ ] Check for recent updates/renames of resources
|
| 57 |
-
|
| 58 |
-
**Time cost:** 5-10 seconds
|
| 59 |
-
**Time saved:** Hours of failed job time + debugging
|
| 60 |
-
|
| 61 |
-
---
|
| 62 |
-
|
| 63 |
-
## Principle 2: Prioritize Reliability Over Performance
|
| 64 |
-
|
| 65 |
-
**Rule:** Default to what is most likely to succeed, not what is theoretically fastest.
|
| 66 |
-
|
| 67 |
-
### What It Prevents
|
| 68 |
-
|
| 69 |
-
- **Hardware incompatibilities** - Features that fail on certain GPUs
|
| 70 |
-
- **Unstable optimizations** - Speed-ups that cause crashes
|
| 71 |
-
- **Complex configurations** - More failure points
|
| 72 |
-
- **Build system issues** - Unreliable compilation methods
|
| 73 |
-
|
| 74 |
-
### How to Apply
|
| 75 |
-
|
| 76 |
-
**Choose reliability:**
|
| 77 |
-
|
| 78 |
-
```python
|
| 79 |
-
# β RISKY: Aggressive optimization that may fail
|
| 80 |
-
SFTConfig(
|
| 81 |
-
torch_compile=True, # Can fail on T4, A10G GPUs
|
| 82 |
-
optim="adamw_bnb_8bit", # Requires specific setup
|
| 83 |
-
fp16=False, # May cause training instability
|
| 84 |
-
...
|
| 85 |
-
)
|
| 86 |
-
|
| 87 |
-
# β
SAFE: Proven defaults
|
| 88 |
-
SFTConfig(
|
| 89 |
-
# torch_compile=True, # Commented with note: "Enable on H100 for 20% speedup"
|
| 90 |
-
optim="adamw_torch", # Standard, always works
|
| 91 |
-
fp16=True, # Stable and fast
|
| 92 |
-
...
|
| 93 |
-
)
|
| 94 |
-
```
|
| 95 |
-
|
| 96 |
-
**For build processes:**
|
| 97 |
-
|
| 98 |
-
```python
|
| 99 |
-
# β UNRELIABLE: Uses make (platform-dependent)
|
| 100 |
-
subprocess.run(["make", "-C", "/tmp/llama.cpp", "llama-quantize"], check=True)
|
| 101 |
-
|
| 102 |
-
# β
RELIABLE: Uses CMake (consistent, documented)
|
| 103 |
-
subprocess.run([
|
| 104 |
-
"cmake", "-B", "/tmp/llama.cpp/build", "-S", "/tmp/llama.cpp",
|
| 105 |
-
"-DGGML_CUDA=OFF" # Disable CUDA for faster, more reliable build
|
| 106 |
-
], check=True)
|
| 107 |
-
|
| 108 |
-
subprocess.run([
|
| 109 |
-
"cmake", "--build", "/tmp/llama.cpp/build",
|
| 110 |
-
"--target", "llama-quantize", "-j", "4"
|
| 111 |
-
], check=True)
|
| 112 |
-
```
|
| 113 |
-
|
| 114 |
-
### Real-World Example
|
| 115 |
-
|
| 116 |
-
**The `torch.compile` failure:**
|
| 117 |
-
- Added for "20% speedup" on H100
|
| 118 |
-
- **Failed fatally on T4-medium** with cryptic error
|
| 119 |
-
- Misdiagnosed as dataset issue (cost hours)
|
| 120 |
-
- **Fix:** Disable by default, add as optional comment
|
| 121 |
-
|
| 122 |
-
**Result:** Reliability > 20% performance gain
|
| 123 |
-
|
| 124 |
-
### Implementation Checklist
|
| 125 |
-
|
| 126 |
-
- [ ] Use proven, standard configurations by default
|
| 127 |
-
- [ ] Comment out performance optimizations with hardware notes
|
| 128 |
-
- [ ] Use stable build systems (CMake > make)
|
| 129 |
-
- [ ] Test on target hardware before production
|
| 130 |
-
- [ ] Document known incompatibilities
|
| 131 |
-
- [ ] Provide "safe" and "fast" variants when needed
|
| 132 |
-
|
| 133 |
-
**Performance loss:** 10-20% in best case
|
| 134 |
-
**Reliability gain:** 95%+ success rate vs 60-70%
|
| 135 |
-
|
| 136 |
-
---
|
| 137 |
-
|
| 138 |
-
## Principle 3: Create Atomic, Self-Contained Scripts
|
| 139 |
-
|
| 140 |
-
**Rule:** Scripts should work as complete, independent units. Don't remove parts to "simplify."
|
| 141 |
-
|
| 142 |
-
### What It Prevents
|
| 143 |
-
|
| 144 |
-
- **Missing dependencies** - Removed "unnecessary" packages that are actually required
|
| 145 |
-
- **Incomplete processes** - Skipped steps that seem redundant
|
| 146 |
-
- **Environment assumptions** - Scripts that need pre-setup
|
| 147 |
-
- **Partial failures** - Some parts work, others fail silently
|
| 148 |
-
|
| 149 |
-
### How to Apply
|
| 150 |
-
|
| 151 |
-
**Complete dependency specifications:**
|
| 152 |
-
|
| 153 |
-
```python
|
| 154 |
-
# β INCOMPLETE: "Simplified" by removing dependencies
|
| 155 |
-
# /// script
|
| 156 |
-
# dependencies = [
|
| 157 |
-
# "transformers",
|
| 158 |
-
# "peft",
|
| 159 |
-
# "torch",
|
| 160 |
-
# ]
|
| 161 |
-
# ///
|
| 162 |
-
|
| 163 |
-
# β
COMPLETE: All dependencies explicit
|
| 164 |
-
# /// script
|
| 165 |
-
# dependencies = [
|
| 166 |
-
# "transformers>=4.36.0",
|
| 167 |
-
# "peft>=0.7.0",
|
| 168 |
-
# "torch>=2.0.0",
|
| 169 |
-
# "accelerate>=0.24.0",
|
| 170 |
-
# "huggingface_hub>=0.20.0",
|
| 171 |
-
# "sentencepiece>=0.1.99", # Required for tokenizers
|
| 172 |
-
# "protobuf>=3.20.0", # Required for tokenizers
|
| 173 |
-
# "numpy",
|
| 174 |
-
# "gguf",
|
| 175 |
-
# ]
|
| 176 |
-
# ///
|
| 177 |
-
```
|
| 178 |
-
|
| 179 |
-
**Complete build processes:**
|
| 180 |
-
|
| 181 |
-
```python
|
| 182 |
-
# β INCOMPLETE: Assumes build tools exist
|
| 183 |
-
subprocess.run(["git", "clone", "https://github.com/ggerganov/llama.cpp.git", "/tmp/llama.cpp"])
|
| 184 |
-
subprocess.run(["make", "-C", "/tmp/llama.cpp", "llama-quantize"]) # FAILS: no gcc/make
|
| 185 |
-
|
| 186 |
-
# β
COMPLETE: Installs all requirements
|
| 187 |
-
subprocess.run(["apt-get", "update", "-qq"], check=True)
|
| 188 |
-
subprocess.run(["apt-get", "install", "-y", "-qq", "build-essential", "cmake"], check=True)
|
| 189 |
-
subprocess.run(["git", "clone", "https://github.com/ggerganov/llama.cpp.git", "/tmp/llama.cpp"])
|
| 190 |
-
# ... then build
|
| 191 |
-
```
|
| 192 |
-
|
| 193 |
-
### Real-World Example
|
| 194 |
-
|
| 195 |
-
**The `sentencepiece` failure:**
|
| 196 |
-
- Original script had it: worked fine
|
| 197 |
-
- "Simplified" version removed it: "doesn't look necessary"
|
| 198 |
-
- **GGUF conversion failed silently** - tokenizer couldn't convert
|
| 199 |
-
- Hard to debug: no obvious error message
|
| 200 |
-
- **Fix:** Restore all original dependencies
|
| 201 |
-
|
| 202 |
-
**Result:** Don't remove dependencies without thorough testing
|
| 203 |
-
|
| 204 |
-
### Implementation Checklist
|
| 205 |
-
|
| 206 |
-
- [ ] All dependencies in PEP 723 header with version pins
|
| 207 |
-
- [ ] All system packages installed by script
|
| 208 |
-
- [ ] No assumptions about pre-existing environment
|
| 209 |
-
- [ ] No "optional" steps that are actually required
|
| 210 |
-
- [ ] Test scripts in clean environment
|
| 211 |
-
- [ ] Document why each dependency is needed
|
| 212 |
-
|
| 213 |
-
**Complexity:** Slightly longer scripts
|
| 214 |
-
**Reliability:** Scripts "just work" every time
|
| 215 |
-
|
| 216 |
-
---
|
| 217 |
-
|
| 218 |
-
## Principle 4: Provide Clear Error Context
|
| 219 |
-
|
| 220 |
-
**Rule:** When things fail, make it obvious what went wrong and how to fix it.
|
| 221 |
-
|
| 222 |
-
### How to Apply
|
| 223 |
-
|
| 224 |
-
**Wrap subprocess calls:**
|
| 225 |
-
|
| 226 |
-
```python
|
| 227 |
-
# β UNCLEAR: Silent failure
|
| 228 |
-
subprocess.run([...], check=True, capture_output=True)
|
| 229 |
-
|
| 230 |
-
# β
CLEAR: Shows what failed
|
| 231 |
-
try:
|
| 232 |
-
result = subprocess.run(
|
| 233 |
-
[...],
|
| 234 |
-
check=True,
|
| 235 |
-
capture_output=True,
|
| 236 |
-
text=True
|
| 237 |
-
)
|
| 238 |
-
print(result.stdout)
|
| 239 |
-
if result.stderr:
|
| 240 |
-
print("Warnings:", result.stderr)
|
| 241 |
-
except subprocess.CalledProcessError as e:
|
| 242 |
-
print(f"β Command failed!")
|
| 243 |
-
print("STDOUT:", e.stdout)
|
| 244 |
-
print("STDERR:", e.stderr)
|
| 245 |
-
raise
|
| 246 |
-
```
|
| 247 |
-
|
| 248 |
-
**Validate inputs:**
|
| 249 |
-
|
| 250 |
-
```python
|
| 251 |
-
# β UNCLEAR: Fails later with cryptic error
|
| 252 |
-
model = load_model(MODEL_NAME)
|
| 253 |
-
|
| 254 |
-
# β
CLEAR: Fails fast with clear message
|
| 255 |
-
if not MODEL_NAME:
|
| 256 |
-
raise ValueError("MODEL_NAME environment variable not set!")
|
| 257 |
-
|
| 258 |
-
print(f"Loading model: {MODEL_NAME}")
|
| 259 |
-
try:
|
| 260 |
-
model = load_model(MODEL_NAME)
|
| 261 |
-
print(f"β
Model loaded successfully")
|
| 262 |
-
except Exception as e:
|
| 263 |
-
print(f"β Failed to load model: {MODEL_NAME}")
|
| 264 |
-
print(f"Error: {e}")
|
| 265 |
-
print("Hint: Check that model exists on Hub")
|
| 266 |
-
raise
|
| 267 |
-
```
|
| 268 |
-
|
| 269 |
-
### Implementation Checklist
|
| 270 |
-
|
| 271 |
-
- [ ] Wrap external calls with try/except
|
| 272 |
-
- [ ] Print stdout/stderr on failure
|
| 273 |
-
- [ ] Validate environment variables early
|
| 274 |
-
- [ ] Add progress indicators (β
, β, π)
|
| 275 |
-
- [ ] Include hints for common failures
|
| 276 |
-
- [ ] Log configuration at start
|
| 277 |
-
|
| 278 |
-
---
|
| 279 |
-
|
| 280 |
-
## Principle 5: Test the Happy Path on Known-Good Inputs
|
| 281 |
-
|
| 282 |
-
**Rule:** Before using new code in production, test with inputs you know work.
|
| 283 |
-
|
| 284 |
-
### How to Apply
|
| 285 |
-
|
| 286 |
-
**Known-good test inputs:**
|
| 287 |
-
|
| 288 |
-
```python
|
| 289 |
-
# For training
|
| 290 |
-
TEST_DATASET = "trl-lib/Capybara" # Small, well-formatted, widely used
|
| 291 |
-
TEST_MODEL = "Qwen/Qwen2.5-0.5B" # Small, fast, reliable
|
| 292 |
-
|
| 293 |
-
# For GGUF conversion
|
| 294 |
-
TEST_ADAPTER = "evalstate/qwen-capybara-medium" # Known working model
|
| 295 |
-
TEST_BASE = "Qwen/Qwen2.5-0.5B" # Compatible base
|
| 296 |
-
```
|
| 297 |
-
|
| 298 |
-
**Testing workflow:**
|
| 299 |
-
|
| 300 |
-
1. Test with known-good inputs first
|
| 301 |
-
2. If that works, try production inputs
|
| 302 |
-
3. If production fails, you know it's the inputs (not code)
|
| 303 |
-
4. Isolate the difference
|
| 304 |
-
|
| 305 |
-
### Implementation Checklist
|
| 306 |
-
|
| 307 |
-
- [ ] Maintain list of known-good test models/datasets
|
| 308 |
-
- [ ] Test new scripts with test inputs first
|
| 309 |
-
- [ ] Document what makes inputs "good"
|
| 310 |
-
- [ ] Keep test jobs cheap (small models, short timeouts)
|
| 311 |
-
- [ ] Only move to production after test succeeds
|
| 312 |
-
|
| 313 |
-
**Time cost:** 5-10 minutes for test run
|
| 314 |
-
**Debugging time saved:** Hours
|
| 315 |
-
|
| 316 |
-
---
|
| 317 |
-
|
| 318 |
-
## Summary: The Reliability Checklist
|
| 319 |
-
|
| 320 |
-
Before submitting ANY job:
|
| 321 |
-
|
| 322 |
-
### Pre-Flight Checks
|
| 323 |
-
- [ ] **Verified** all repos/datasets exist (hub_repo_details)
|
| 324 |
-
- [ ] **Tested** with known-good inputs if new code
|
| 325 |
-
- [ ] **Using** proven hardware/configuration
|
| 326 |
-
- [ ] **Included** all dependencies in PEP 723 header
|
| 327 |
-
- [ ] **Installed** system requirements (build tools, etc.)
|
| 328 |
-
- [ ] **Set** appropriate timeout (not default 30m)
|
| 329 |
-
- [ ] **Configured** Hub push with HF_TOKEN
|
| 330 |
-
- [ ] **Added** clear error handling
|
| 331 |
-
|
| 332 |
-
### Script Quality
|
| 333 |
-
- [ ] Self-contained (no external setup needed)
|
| 334 |
-
- [ ] Complete dependencies listed
|
| 335 |
-
- [ ] Build tools installed by script
|
| 336 |
-
- [ ] Progress indicators included
|
| 337 |
-
- [ ] Error messages are clear
|
| 338 |
-
- [ ] Configuration logged at start
|
| 339 |
-
|
| 340 |
-
### Job Configuration
|
| 341 |
-
- [ ] Timeout > expected runtime + 30% buffer
|
| 342 |
-
- [ ] Hardware appropriate for model size
|
| 343 |
-
- [ ] Secrets include HF_TOKEN
|
| 344 |
-
- [ ] Environment variables set correctly
|
| 345 |
-
- [ ] Cost estimated and acceptable
|
| 346 |
-
|
| 347 |
-
**Following these principles transforms job success rate from ~60-70% to ~95%+**
|
| 348 |
-
|
| 349 |
-
---
|
| 350 |
-
|
| 351 |
-
## When Principles Conflict
|
| 352 |
-
|
| 353 |
-
Sometimes reliability and performance conflict. Here's how to choose:
|
| 354 |
-
|
| 355 |
-
| Scenario | Choose | Rationale |
|
| 356 |
-
|----------|--------|-----------|
|
| 357 |
-
| Demo/test | Reliability | Fast failure is worse than slow success |
|
| 358 |
-
| Production (first run) | Reliability | Prove it works before optimizing |
|
| 359 |
-
| Production (proven) | Performance | Safe to optimize after validation |
|
| 360 |
-
| Time-critical | Reliability | Failures cause more delay than slow runs |
|
| 361 |
-
| Cost-critical | Balanced | Test with small model, then optimize |
|
| 362 |
-
|
| 363 |
-
**General rule:** Reliability first, optimize second.
|
| 364 |
-
|
| 365 |
-
---
|
| 366 |
-
|
| 367 |
-
## Further Reading
|
| 368 |
-
|
| 369 |
-
- `troubleshooting.md` - Common issues and fixes
|
| 370 |
-
- `training_patterns.md` - Proven training configurations
|
| 371 |
-
- `gguf_conversion.md` - Production GGUF workflow
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
trl/references/trackio_guide.md
CHANGED
|
@@ -1,12 +1,11 @@
|
|
| 1 |
# Trackio Integration for TRL Training
|
| 2 |
|
| 3 |
-
**Trackio** is
|
| 4 |
|
| 5 |
-
β οΈ **IMPORTANT**:
|
| 6 |
-
-
|
| 7 |
-
-
|
| 8 |
-
- Without a Space, metrics are
|
| 9 |
-
- The Space dashboard persists your training metrics permanently
|
| 10 |
|
| 11 |
## Setting Up Trackio for Jobs
|
| 12 |
|
|
@@ -36,7 +35,7 @@ import trackio
|
|
| 36 |
|
| 37 |
trackio.init(
|
| 38 |
project="my-training",
|
| 39 |
-
space_id="username/trackio", # CRITICAL for Jobs!
|
| 40 |
config={
|
| 41 |
"model": "Qwen/Qwen2.5-0.5B",
|
| 42 |
"dataset": "trl-lib/Capybara",
|
|
@@ -79,11 +78,23 @@ Trackio automatically logs:
|
|
| 79 |
## Viewing the Dashboard
|
| 80 |
|
| 81 |
After starting training:
|
| 82 |
-
1. Navigate to the Space: `https://huggingface.co/spaces/username/trackio`
|
| 83 |
2. The Gradio dashboard shows all tracked experiments
|
| 84 |
3. Filter by project, compare runs, view charts with smoothing
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
## Recommendation
|
| 87 |
|
| 88 |
- **Trackio**: Best for real-time monitoring during long training runs
|
|
|
|
| 89 |
- **Weights & Biases**: Best for team collaboration, requires account
|
|
|
|
| 1 |
# Trackio Integration for TRL Training
|
| 2 |
|
| 3 |
+
**Trackio** is a local-first experiment tracking library that provides real-time metrics visualization via a Gradio dashboard.
|
| 4 |
|
| 5 |
+
β οΈ **IMPORTANT**: Trackio is local-first, which means:
|
| 6 |
+
- It runs a dashboard on the machine where training happens
|
| 7 |
+
- For Jobs training, sync to a Hugging Face Space to view metrics
|
| 8 |
+
- Without a Space, metrics are only accessible during the job (then lost)
|
|
|
|
| 9 |
|
| 10 |
## Setting Up Trackio for Jobs
|
| 11 |
|
|
|
|
| 35 |
|
| 36 |
trackio.init(
|
| 37 |
project="my-training",
|
| 38 |
+
space_id="username/my-trackio-dashboard", # CRITICAL for Jobs!
|
| 39 |
config={
|
| 40 |
"model": "Qwen/Qwen2.5-0.5B",
|
| 41 |
"dataset": "trl-lib/Capybara",
|
|
|
|
| 78 |
## Viewing the Dashboard
|
| 79 |
|
| 80 |
After starting training:
|
| 81 |
+
1. Navigate to the Space: `https://huggingface.co/spaces/username/my-trackio-dashboard`
|
| 82 |
2. The Gradio dashboard shows all tracked experiments
|
| 83 |
3. Filter by project, compare runs, view charts with smoothing
|
| 84 |
|
| 85 |
+
## Alternative: TensorBoard (Simpler for Jobs)
|
| 86 |
+
|
| 87 |
+
For simpler setup without needing a Space:
|
| 88 |
+
```python
|
| 89 |
+
SFTConfig(
|
| 90 |
+
report_to="tensorboard", # Logs saved with model to Hub
|
| 91 |
+
)
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
TensorBoard logs are automatically saved with the model and viewable via TensorBoard locally after downloading.
|
| 95 |
+
|
| 96 |
## Recommendation
|
| 97 |
|
| 98 |
- **Trackio**: Best for real-time monitoring during long training runs
|
| 99 |
+
- **TensorBoard**: Best for post-training analysis, simpler setup
|
| 100 |
- **Weights & Biases**: Best for team collaboration, requires account
|
trl/references/training_methods.md
CHANGED
|
@@ -94,6 +94,19 @@ hf_jobs("uv", {
|
|
| 94 |
|
| 95 |
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/grpo_trainer")`
|
| 96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
## Reward Modeling
|
| 98 |
|
| 99 |
**What it is:** Train a reward model to score responses, used as a component in RLHF pipelines.
|
|
@@ -107,6 +120,21 @@ hf_jobs("uv", {
|
|
| 107 |
|
| 108 |
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/reward_trainer")`
|
| 109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
## Method Selection Guide
|
| 111 |
|
| 112 |
| Method | Complexity | Data Required | Use Case |
|
|
@@ -114,7 +142,9 @@ hf_jobs("uv", {
|
|
| 114 |
| **SFT** | Low | Demonstrations | Initial fine-tuning |
|
| 115 |
| **DPO** | Medium | Paired preferences | Post-SFT alignment |
|
| 116 |
| **GRPO** | Medium | Prompts + reward fn | Online RL with automatic rewards |
|
|
|
|
| 117 |
| **Reward** | Medium | Paired preferences | Building RLHF pipeline |
|
|
|
|
| 118 |
|
| 119 |
## Recommended Pipeline
|
| 120 |
|
|
@@ -126,6 +156,7 @@ hf_jobs("uv", {
|
|
| 126 |
**For advanced RL scenarios:**
|
| 127 |
1. **Start with SFT** - Fine-tune base model
|
| 128 |
2. **Train reward model** - On preference data
|
|
|
|
| 129 |
|
| 130 |
## Dataset Format Reference
|
| 131 |
|
|
@@ -135,9 +166,8 @@ hf_doc_fetch("https://huggingface.co/docs/trl/dataset_formats")
|
|
| 135 |
```
|
| 136 |
|
| 137 |
Or validate your dataset:
|
| 138 |
-
```
|
| 139 |
-
|
| 140 |
-
--dataset your/dataset --split train
|
| 141 |
```
|
| 142 |
|
| 143 |
## See Also
|
|
@@ -145,4 +175,4 @@ uv run https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspect
|
|
| 145 |
- `references/training_patterns.md` - Common training patterns and examples
|
| 146 |
- `scripts/train_sft_example.py` - Complete SFT template
|
| 147 |
- `scripts/train_dpo_example.py` - Complete DPO template
|
| 148 |
-
-
|
|
|
|
| 94 |
|
| 95 |
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/grpo_trainer")`
|
| 96 |
|
| 97 |
+
## Kahneman-Tversky Optimization (KTO)
|
| 98 |
+
|
| 99 |
+
**What it is:** Preference tuning without paired data - uses independent positive/negative examples.
|
| 100 |
+
|
| 101 |
+
**When to use:**
|
| 102 |
+
- Have preference data but not paired comparisons
|
| 103 |
+
- Simpler data collection than DPO
|
| 104 |
+
- Want to incorporate human feedback without explicit pairs
|
| 105 |
+
|
| 106 |
+
**Dataset format:** Examples with binary labels (desirable/undesirable) but not paired
|
| 107 |
+
|
| 108 |
+
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/kto_trainer")`
|
| 109 |
+
|
| 110 |
## Reward Modeling
|
| 111 |
|
| 112 |
**What it is:** Train a reward model to score responses, used as a component in RLHF pipelines.
|
|
|
|
| 120 |
|
| 121 |
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/reward_trainer")`
|
| 122 |
|
| 123 |
+
## Proximal Policy Optimization (PPO)
|
| 124 |
+
|
| 125 |
+
**What it is:** Classic RLHF method using a reward model to guide policy optimization.
|
| 126 |
+
|
| 127 |
+
**When to use:**
|
| 128 |
+
- Full RLHF pipeline
|
| 129 |
+
- Have trained reward model
|
| 130 |
+
- Need fine-grained control over optimization
|
| 131 |
+
|
| 132 |
+
**Requirements:** Pre-trained reward model
|
| 133 |
+
|
| 134 |
+
**Note:** PPO is more complex than DPO. For most use cases, start with DPO.
|
| 135 |
+
|
| 136 |
+
**Documentation:** `hf_doc_fetch("https://huggingface.co/docs/trl/ppo_trainer")`
|
| 137 |
+
|
| 138 |
## Method Selection Guide
|
| 139 |
|
| 140 |
| Method | Complexity | Data Required | Use Case |
|
|
|
|
| 142 |
| **SFT** | Low | Demonstrations | Initial fine-tuning |
|
| 143 |
| **DPO** | Medium | Paired preferences | Post-SFT alignment |
|
| 144 |
| **GRPO** | Medium | Prompts + reward fn | Online RL with automatic rewards |
|
| 145 |
+
| **KTO** | Medium | Unpaired preferences | Alignment with simpler data |
|
| 146 |
| **Reward** | Medium | Paired preferences | Building RLHF pipeline |
|
| 147 |
+
| **PPO** | High | Demonstrations + reward model | Full RLHF |
|
| 148 |
|
| 149 |
## Recommended Pipeline
|
| 150 |
|
|
|
|
| 156 |
**For advanced RL scenarios:**
|
| 157 |
1. **Start with SFT** - Fine-tune base model
|
| 158 |
2. **Train reward model** - On preference data
|
| 159 |
+
3. **Apply GRPO or PPO** - Online RL with reward model
|
| 160 |
|
| 161 |
## Dataset Format Reference
|
| 162 |
|
|
|
|
| 166 |
```
|
| 167 |
|
| 168 |
Or validate your dataset:
|
| 169 |
+
```python
|
| 170 |
+
# See scripts/validate_dataset.py
|
|
|
|
| 171 |
```
|
| 172 |
|
| 173 |
## See Also
|
|
|
|
| 175 |
- `references/training_patterns.md` - Common training patterns and examples
|
| 176 |
- `scripts/train_sft_example.py` - Complete SFT template
|
| 177 |
- `scripts/train_dpo_example.py` - Complete DPO template
|
| 178 |
+
- `scripts/validate_dataset.py` - Dataset format validation tool
|
trl/references/training_patterns.md
CHANGED
|
@@ -39,7 +39,7 @@ from datasets import load_dataset
|
|
| 39 |
from trl import DPOTrainer, DPOConfig
|
| 40 |
import trackio
|
| 41 |
|
| 42 |
-
trackio.init(project="dpo-training", space_id="username/
|
| 43 |
|
| 44 |
dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train")
|
| 45 |
|
|
|
|
| 39 |
from trl import DPOTrainer, DPOConfig
|
| 40 |
import trackio
|
| 41 |
|
| 42 |
+
trackio.init(project="dpo-training", space_id="username/my-dashboard")
|
| 43 |
|
| 44 |
dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train")
|
| 45 |
|
trl/references/troubleshooting.md
CHANGED
|
@@ -103,15 +103,8 @@ trainer = SFTTrainer(
|
|
| 103 |
|
| 104 |
2. **Validate dataset before training:**
|
| 105 |
```bash
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
```
|
| 109 |
-
Or via hf_jobs:
|
| 110 |
-
```python
|
| 111 |
-
hf_jobs("uv", {
|
| 112 |
-
"script": "https://huggingface.co/datasets/mcp-tools/skills/raw/main/dataset_inspector.py",
|
| 113 |
-
"script_args": ["--dataset", "dataset-name", "--split", "train"]
|
| 114 |
-
})
|
| 115 |
```
|
| 116 |
|
| 117 |
3. **Verify field names:**
|
|
@@ -257,7 +250,7 @@ If issues persist:
|
|
| 257 |
3. **Review related guides:**
|
| 258 |
- `references/hub_saving.md` - Hub authentication issues
|
| 259 |
- `references/hardware_guide.md` - Hardware selection and specs
|
|
|
|
| 260 |
- `references/training_patterns.md` - Eval dataset requirements
|
| 261 |
-
- SKILL.md "Working with Scripts" section - Script format and URL issues
|
| 262 |
|
| 263 |
4. **Ask in HF forums:** https://discuss.huggingface.co/
|
|
|
|
| 103 |
|
| 104 |
2. **Validate dataset before training:**
|
| 105 |
```bash
|
| 106 |
+
python scripts/validate_dataset.py <dataset-name> <method>
|
| 107 |
+
# e.g., python scripts/validate_dataset.py trl-lib/Capybara sft
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
```
|
| 109 |
|
| 110 |
3. **Verify field names:**
|
|
|
|
| 250 |
3. **Review related guides:**
|
| 251 |
- `references/hub_saving.md` - Hub authentication issues
|
| 252 |
- `references/hardware_guide.md` - Hardware selection and specs
|
| 253 |
+
- `references/uv_scripts_guide.md` - UV script format issues
|
| 254 |
- `references/training_patterns.md` - Eval dataset requirements
|
|
|
|
| 255 |
|
| 256 |
4. **Ask in HF forums:** https://discuss.huggingface.co/
|
trl/references/uv_scripts_guide.md
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UV Scripts Guide for TRL Training
|
| 2 |
+
|
| 3 |
+
UV scripts are self-contained Python scripts with inline dependency declarations (PEP 723). They're the modern, recommended approach for custom TRL training.
|
| 4 |
+
|
| 5 |
+
## What are UV Scripts?
|
| 6 |
+
|
| 7 |
+
UV scripts declare dependencies at the top of the file using special comment syntax:
|
| 8 |
+
|
| 9 |
+
```python
|
| 10 |
+
# /// script
|
| 11 |
+
# dependencies = [
|
| 12 |
+
# "trl>=0.12.0",
|
| 13 |
+
# "transformers>=4.36.0",
|
| 14 |
+
# ]
|
| 15 |
+
# ///
|
| 16 |
+
|
| 17 |
+
# Your training code here
|
| 18 |
+
from trl import SFTTrainer
|
| 19 |
+
```
|
| 20 |
+
|
| 21 |
+
## Benefits
|
| 22 |
+
|
| 23 |
+
1. **Self-contained**: Dependencies are part of the script
|
| 24 |
+
2. **Version control**: Pin exact versions for reproducibility
|
| 25 |
+
3. **No setup files**: No requirements.txt or setup.py needed
|
| 26 |
+
4. **Portable**: Run anywhere UV is installed
|
| 27 |
+
5. **Clean**: Much cleaner than bash + pip + python strings
|
| 28 |
+
|
| 29 |
+
## Creating a UV Script
|
| 30 |
+
|
| 31 |
+
### Step 1: Define Dependencies
|
| 32 |
+
|
| 33 |
+
Start with dependency declaration:
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
# /// script
|
| 37 |
+
# dependencies = [
|
| 38 |
+
# "trl>=0.12.0", # TRL for training
|
| 39 |
+
# "transformers>=4.36.0", # Transformers library
|
| 40 |
+
# "datasets>=2.14.0", # Dataset loading
|
| 41 |
+
# "accelerate>=0.24.0", # Distributed training
|
| 42 |
+
# "peft>=0.7.0", # LoRA/PEFT (optional)
|
| 43 |
+
# ]
|
| 44 |
+
# ///
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
### Step 2: Add Training Code
|
| 48 |
+
|
| 49 |
+
```python
|
| 50 |
+
# /// script
|
| 51 |
+
# dependencies = ["trl", "peft"]
|
| 52 |
+
# ///
|
| 53 |
+
|
| 54 |
+
from datasets import load_dataset
|
| 55 |
+
from peft import LoraConfig
|
| 56 |
+
from trl import SFTTrainer, SFTConfig
|
| 57 |
+
|
| 58 |
+
# Load dataset
|
| 59 |
+
dataset = load_dataset("trl-lib/Capybara", split="train")
|
| 60 |
+
|
| 61 |
+
# Configure training
|
| 62 |
+
config = SFTConfig(
|
| 63 |
+
output_dir="my-model",
|
| 64 |
+
num_train_epochs=3,
|
| 65 |
+
push_to_hub=True,
|
| 66 |
+
hub_model_id="username/my-model",
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Train
|
| 70 |
+
trainer = SFTTrainer(
|
| 71 |
+
model="Qwen/Qwen2.5-0.5B",
|
| 72 |
+
train_dataset=dataset,
|
| 73 |
+
args=config,
|
| 74 |
+
peft_config=LoraConfig(r=16, lora_alpha=32),
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
trainer.train()
|
| 78 |
+
trainer.push_to_hub()
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
### Step 3: Run on Jobs
|
| 82 |
+
|
| 83 |
+
```python
|
| 84 |
+
hf_jobs("uv", {
|
| 85 |
+
"script": "train.py", # or URL
|
| 86 |
+
"flavor": "a10g-large",
|
| 87 |
+
"timeout": "2h",
|
| 88 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 89 |
+
})
|
| 90 |
+
```
|
| 91 |
+
|
| 92 |
+
## Running Scripts from URLs
|
| 93 |
+
|
| 94 |
+
UV scripts can be run directly from URLs:
|
| 95 |
+
|
| 96 |
+
```python
|
| 97 |
+
hf_jobs("uv", {
|
| 98 |
+
"script": "https://gist.github.com/username/abc123/raw/train.py",
|
| 99 |
+
"flavor": "a10g-large",
|
| 100 |
+
"timeout": "2h",
|
| 101 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 102 |
+
})
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
**Benefits:**
|
| 106 |
+
- Share scripts via GitHub Gists
|
| 107 |
+
- Version control in Git repos
|
| 108 |
+
- Scripts accessible from anywhere
|
| 109 |
+
|
| 110 |
+
## Working with Local Scripts
|
| 111 |
+
|
| 112 |
+
β οΈ **Important:** The `hf_jobs("uv", ...)` command does NOT support local file paths directly. You must make scripts accessible via URL.
|
| 113 |
+
|
| 114 |
+
### Why Local Paths Don't Work
|
| 115 |
+
|
| 116 |
+
The Jobs API runs in isolated Docker containers without access to your local filesystem. Scripts must be:
|
| 117 |
+
- Publicly accessible URLs, OR
|
| 118 |
+
- Accessible via authentication (HF_TOKEN for private repos)
|
| 119 |
+
|
| 120 |
+
**Don't:**
|
| 121 |
+
```python
|
| 122 |
+
# β These will all fail
|
| 123 |
+
hf_jobs("uv", {"script": "train.py"})
|
| 124 |
+
hf_jobs("uv", {"script": "./scripts/train.py"})
|
| 125 |
+
hf_jobs("uv", {"script": "/path/to/train.py"})
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
**Do:**
|
| 129 |
+
```python
|
| 130 |
+
# β
These work
|
| 131 |
+
hf_jobs("uv", {"script": "https://huggingface.co/user/repo/resolve/main/train.py"})
|
| 132 |
+
hf_jobs("uv", {"script": "https://raw.githubusercontent.com/user/repo/main/train.py"})
|
| 133 |
+
hf_jobs("uv", {"script": "https://gist.githubusercontent.com/user/id/raw/train.py"})
|
| 134 |
+
```
|
| 135 |
+
|
| 136 |
+
### Recommended: Upload to Hugging Face Hub
|
| 137 |
+
|
| 138 |
+
The easiest way to use local scripts is to upload them to a Hugging Face repository:
|
| 139 |
+
|
| 140 |
+
```bash
|
| 141 |
+
# Create a dedicated scripts repo
|
| 142 |
+
huggingface-cli repo create my-training-scripts --type model
|
| 143 |
+
|
| 144 |
+
# Upload your script
|
| 145 |
+
huggingface-cli upload my-training-scripts ./train.py train.py
|
| 146 |
+
|
| 147 |
+
# If you update the script later
|
| 148 |
+
huggingface-cli upload my-training-scripts ./train.py train.py --commit-message "Updated training params"
|
| 149 |
+
|
| 150 |
+
# Use in jobs
|
| 151 |
+
script_url = "https://huggingface.co/USERNAME/my-training-scripts/resolve/main/train.py"
|
| 152 |
+
|
| 153 |
+
hf_jobs("uv", {
|
| 154 |
+
"script": script_url,
|
| 155 |
+
"flavor": "a10g-large",
|
| 156 |
+
"timeout": "2h",
|
| 157 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 158 |
+
})
|
| 159 |
+
```
|
| 160 |
+
|
| 161 |
+
**Benefits:**
|
| 162 |
+
- β
Version control via Git
|
| 163 |
+
- β
Private repos supported (with HF_TOKEN)
|
| 164 |
+
- β
Easy to share and update
|
| 165 |
+
- β
No external dependencies
|
| 166 |
+
- β
Integrates with HF ecosystem
|
| 167 |
+
|
| 168 |
+
**For Private Scripts:**
|
| 169 |
+
```python
|
| 170 |
+
# Your script is in a private repo
|
| 171 |
+
hf_jobs("uv", {
|
| 172 |
+
"script": "https://huggingface.co/USERNAME/private-scripts/resolve/main/train.py",
|
| 173 |
+
"flavor": "a10g-large",
|
| 174 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"} # Allows access to private repo
|
| 175 |
+
})
|
| 176 |
+
```
|
| 177 |
+
|
| 178 |
+
### Alternative: GitHub Gist
|
| 179 |
+
|
| 180 |
+
For quick scripts or one-off experiments:
|
| 181 |
+
|
| 182 |
+
```bash
|
| 183 |
+
# 1. Create a gist at https://gist.github.com
|
| 184 |
+
# 2. Paste your script
|
| 185 |
+
# 3. Click "Create public gist" (or secret gist)
|
| 186 |
+
# 4. Click the "Raw" button to get the raw URL
|
| 187 |
+
|
| 188 |
+
# Use in jobs
|
| 189 |
+
hf_jobs("uv", {
|
| 190 |
+
"script": "https://gist.githubusercontent.com/username/gist-id/raw/train.py",
|
| 191 |
+
"flavor": "a10g-large"
|
| 192 |
+
})
|
| 193 |
+
```
|
| 194 |
+
|
| 195 |
+
**Benefits:**
|
| 196 |
+
- β
Quick and easy
|
| 197 |
+
- β
No HF CLI setup needed
|
| 198 |
+
- β
Good for sharing examples
|
| 199 |
+
|
| 200 |
+
**Limitations:**
|
| 201 |
+
- β Less version control than Git repos
|
| 202 |
+
- β Secret gists are still publicly accessible via URL
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
## Using TRL Example Scripts
|
| 206 |
+
|
| 207 |
+
TRL provides maintained scripts that are UV-compatible:
|
| 208 |
+
|
| 209 |
+
```python
|
| 210 |
+
hf_jobs("uv", {
|
| 211 |
+
"script": "https://raw.githubusercontent.com/huggingface/trl/main/examples/scripts/sft.py",
|
| 212 |
+
"script_args": [
|
| 213 |
+
"--model_name_or_path", "Qwen/Qwen2.5-0.5B",
|
| 214 |
+
"--dataset_name", "trl-lib/Capybara",
|
| 215 |
+
"--output_dir", "my-model",
|
| 216 |
+
"--push_to_hub",
|
| 217 |
+
"--hub_model_id", "username/my-model"
|
| 218 |
+
],
|
| 219 |
+
"flavor": "a10g-large",
|
| 220 |
+
"timeout": "2h",
|
| 221 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 222 |
+
})
|
| 223 |
+
```
|
| 224 |
+
|
| 225 |
+
**Available TRL scripts:**
|
| 226 |
+
- `sft.py` - Supervised fine-tuning
|
| 227 |
+
- `dpo.py` - Direct Preference Optimization
|
| 228 |
+
- `kto.py` - KTO training
|
| 229 |
+
- `grpo.py` - GRPO training
|
| 230 |
+
- `reward.py` - Reward model training
|
| 231 |
+
- `prm.py` - Process reward model
|
| 232 |
+
|
| 233 |
+
All at: https://github.com/huggingface/trl/tree/main/examples/scripts
|
| 234 |
+
|
| 235 |
+
## Best Practices
|
| 236 |
+
|
| 237 |
+
### 1. Pin Versions
|
| 238 |
+
|
| 239 |
+
Always pin dependency versions for reproducibility:
|
| 240 |
+
|
| 241 |
+
```python
|
| 242 |
+
# /// script
|
| 243 |
+
# dependencies = [
|
| 244 |
+
# "trl==0.12.0", # Exact version
|
| 245 |
+
# "transformers>=4.36.0", # Minimum version
|
| 246 |
+
# ]
|
| 247 |
+
# ///
|
| 248 |
+
```
|
| 249 |
+
|
| 250 |
+
### 2. Add Logging
|
| 251 |
+
|
| 252 |
+
Include progress logging for monitoring:
|
| 253 |
+
|
| 254 |
+
```python
|
| 255 |
+
print("β
Dataset loaded")
|
| 256 |
+
print("π Starting training...")
|
| 257 |
+
print(f"π Training on {len(dataset)} examples")
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
### 3. Validate Inputs
|
| 261 |
+
|
| 262 |
+
Check dataset and configuration before training:
|
| 263 |
+
|
| 264 |
+
```python
|
| 265 |
+
dataset = load_dataset("trl-lib/Capybara", split="train")
|
| 266 |
+
assert len(dataset) > 0, "Dataset is empty!"
|
| 267 |
+
print(f"β
Dataset loaded: {len(dataset)} examples")
|
| 268 |
+
```
|
| 269 |
+
|
| 270 |
+
### 4. Add Comments
|
| 271 |
+
|
| 272 |
+
Document the script for future reference:
|
| 273 |
+
|
| 274 |
+
```python
|
| 275 |
+
# Train Qwen-0.5B on Capybara dataset using LoRA
|
| 276 |
+
# Expected runtime: ~2 hours on a10g-large
|
| 277 |
+
# Cost estimate: ~$6-8
|
| 278 |
+
```
|
| 279 |
+
|
| 280 |
+
### 5. Test Locally First
|
| 281 |
+
|
| 282 |
+
Test scripts locally before running on Jobs:
|
| 283 |
+
|
| 284 |
+
```bash
|
| 285 |
+
uv run train.py # Runs locally with uv
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
## Docker Images
|
| 289 |
+
|
| 290 |
+
### Default Image
|
| 291 |
+
|
| 292 |
+
UV scripts run on default Python image with UV installed.
|
| 293 |
+
|
| 294 |
+
### TRL Image
|
| 295 |
+
|
| 296 |
+
Use official TRL image for faster startup:
|
| 297 |
+
|
| 298 |
+
```python
|
| 299 |
+
hf_jobs("uv", {
|
| 300 |
+
"script": "train.py",
|
| 301 |
+
"image": "huggingface/trl", # Pre-installed TRL dependencies
|
| 302 |
+
"flavor": "a10g-large",
|
| 303 |
+
"timeout": "2h",
|
| 304 |
+
"secrets": {"HF_TOKEN": "$HF_TOKEN"}
|
| 305 |
+
})
|
| 306 |
+
```
|
| 307 |
+
|
| 308 |
+
**Benefits:**
|
| 309 |
+
- Faster job startup (no pip install)
|
| 310 |
+
- All TRL dependencies pre-installed
|
| 311 |
+
- Tested and maintained by HF
|
| 312 |
+
|
| 313 |
+
## Template Scripts
|
| 314 |
+
|
| 315 |
+
### Basic SFT Template
|
| 316 |
+
|
| 317 |
+
```python
|
| 318 |
+
# /// script
|
| 319 |
+
# dependencies = ["trl>=0.12.0"]
|
| 320 |
+
# ///
|
| 321 |
+
|
| 322 |
+
from datasets import load_dataset
|
| 323 |
+
from trl import SFTTrainer, SFTConfig
|
| 324 |
+
|
| 325 |
+
dataset = load_dataset("DATASET_NAME", split="train")
|
| 326 |
+
|
| 327 |
+
trainer = SFTTrainer(
|
| 328 |
+
model="MODEL_NAME",
|
| 329 |
+
train_dataset=dataset,
|
| 330 |
+
args=SFTConfig(
|
| 331 |
+
output_dir="OUTPUT_DIR",
|
| 332 |
+
num_train_epochs=3,
|
| 333 |
+
push_to_hub=True,
|
| 334 |
+
hub_model_id="USERNAME/MODEL_NAME",
|
| 335 |
+
)
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
trainer.train()
|
| 339 |
+
trainer.push_to_hub()
|
| 340 |
+
```
|
| 341 |
+
|
| 342 |
+
### SFT with LoRA Template
|
| 343 |
+
|
| 344 |
+
```python
|
| 345 |
+
# /// script
|
| 346 |
+
# dependencies = ["trl>=0.12.0", "peft>=0.7.0"]
|
| 347 |
+
# ///
|
| 348 |
+
|
| 349 |
+
from datasets import load_dataset
|
| 350 |
+
from peft import LoraConfig
|
| 351 |
+
from trl import SFTTrainer, SFTConfig
|
| 352 |
+
|
| 353 |
+
dataset = load_dataset("DATASET_NAME", split="train")
|
| 354 |
+
|
| 355 |
+
trainer = SFTTrainer(
|
| 356 |
+
model="MODEL_NAME",
|
| 357 |
+
train_dataset=dataset,
|
| 358 |
+
peft_config=LoraConfig(r=16, lora_alpha=32),
|
| 359 |
+
args=SFTConfig(
|
| 360 |
+
output_dir="OUTPUT_DIR",
|
| 361 |
+
num_train_epochs=3,
|
| 362 |
+
push_to_hub=True,
|
| 363 |
+
hub_model_id="USERNAME/MODEL_NAME",
|
| 364 |
+
)
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
trainer.train()
|
| 368 |
+
trainer.push_to_hub()
|
| 369 |
+
```
|
| 370 |
+
|
| 371 |
+
### DPO Template
|
| 372 |
+
|
| 373 |
+
```python
|
| 374 |
+
# /// script
|
| 375 |
+
# dependencies = ["trl>=0.12.0"]
|
| 376 |
+
# ///
|
| 377 |
+
|
| 378 |
+
from datasets import load_dataset
|
| 379 |
+
from transformers import AutoTokenizer
|
| 380 |
+
from trl import DPOTrainer, DPOConfig
|
| 381 |
+
|
| 382 |
+
model_name = "MODEL_NAME"
|
| 383 |
+
dataset = load_dataset("DATASET_NAME", split="train")
|
| 384 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 385 |
+
|
| 386 |
+
trainer = DPOTrainer(
|
| 387 |
+
model=model_name,
|
| 388 |
+
train_dataset=dataset,
|
| 389 |
+
tokenizer=tokenizer,
|
| 390 |
+
args=DPOConfig(
|
| 391 |
+
output_dir="OUTPUT_DIR",
|
| 392 |
+
num_train_epochs=3,
|
| 393 |
+
push_to_hub=True,
|
| 394 |
+
hub_model_id="USERNAME/MODEL_NAME",
|
| 395 |
+
)
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
trainer.train()
|
| 399 |
+
trainer.push_to_hub()
|
| 400 |
+
```
|
| 401 |
+
|
| 402 |
+
## Troubleshooting
|
| 403 |
+
|
| 404 |
+
### Issue: Dependencies not installing
|
| 405 |
+
**Check:** Verify dependency names and versions are correct
|
| 406 |
+
|
| 407 |
+
### Issue: Script not found
|
| 408 |
+
**Check:** Verify URL is accessible and points to raw file
|
| 409 |
+
|
| 410 |
+
### Issue: Import errors
|
| 411 |
+
**Solution:** Add missing dependencies to `dependencies` list
|
| 412 |
+
|
| 413 |
+
### Issue: Slow startup
|
| 414 |
+
**Solution:** Use `image="huggingface/trl"` for pre-installed dependencies
|
trl/scripts/convert_to_gguf.py
CHANGED
|
@@ -13,46 +13,26 @@
|
|
| 13 |
# ]
|
| 14 |
# ///
|
| 15 |
|
| 16 |
-
"""
|
| 17 |
-
GGUF Conversion Script - Production Ready
|
| 18 |
-
|
| 19 |
-
This script converts a LoRA fine-tuned model to GGUF format for use with:
|
| 20 |
-
- llama.cpp
|
| 21 |
-
- Ollama
|
| 22 |
-
- LM Studio
|
| 23 |
-
- Other GGUF-compatible tools
|
| 24 |
-
|
| 25 |
-
Usage:
|
| 26 |
-
Set environment variables:
|
| 27 |
-
- ADAPTER_MODEL: Your fine-tuned model (e.g., "username/my-finetuned-model")
|
| 28 |
-
- BASE_MODEL: Base model used for fine-tuning (e.g., "Qwen/Qwen2.5-0.5B")
|
| 29 |
-
- OUTPUT_REPO: Where to upload GGUF files (e.g., "username/my-model-gguf")
|
| 30 |
-
- HF_USERNAME: Your Hugging Face username (optional, for README)
|
| 31 |
-
|
| 32 |
-
Dependencies: All required packages are declared in PEP 723 header above.
|
| 33 |
-
Build tools (gcc, cmake) are installed automatically by this script.
|
| 34 |
-
"""
|
| 35 |
-
|
| 36 |
import os
|
| 37 |
import torch
|
| 38 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 39 |
from peft import PeftModel
|
| 40 |
-
from huggingface_hub import HfApi
|
| 41 |
import subprocess
|
| 42 |
|
| 43 |
print("π GGUF Conversion Script")
|
| 44 |
print("=" * 60)
|
| 45 |
|
| 46 |
-
# Configuration
|
| 47 |
-
ADAPTER_MODEL =
|
| 48 |
-
BASE_MODEL =
|
| 49 |
-
|
| 50 |
-
username = os.environ.get("HF_USERNAME",
|
| 51 |
|
| 52 |
print(f"\nπ¦ Configuration:")
|
| 53 |
print(f" Base model: {BASE_MODEL}")
|
| 54 |
print(f" Adapter model: {ADAPTER_MODEL}")
|
| 55 |
-
print(f" Output repo: {
|
| 56 |
|
| 57 |
# Step 1: Load base model and adapter
|
| 58 |
print("\nπ§ Step 1: Loading base model and LoRA adapter...")
|
|
@@ -88,21 +68,6 @@ print(f" β
Merged model saved to {merged_dir}")
|
|
| 88 |
|
| 89 |
# Step 3: Install llama.cpp for conversion
|
| 90 |
print("\nπ₯ Step 3: Setting up llama.cpp for GGUF conversion...")
|
| 91 |
-
|
| 92 |
-
# CRITICAL: Install build tools FIRST (before cloning llama.cpp)
|
| 93 |
-
print(" Installing build tools...")
|
| 94 |
-
subprocess.run(
|
| 95 |
-
["apt-get", "update", "-qq"],
|
| 96 |
-
check=True,
|
| 97 |
-
capture_output=True
|
| 98 |
-
)
|
| 99 |
-
subprocess.run(
|
| 100 |
-
["apt-get", "install", "-y", "-qq", "build-essential", "cmake"],
|
| 101 |
-
check=True,
|
| 102 |
-
capture_output=True
|
| 103 |
-
)
|
| 104 |
-
print(" β
Build tools installed")
|
| 105 |
-
|
| 106 |
print(" Cloning llama.cpp repository...")
|
| 107 |
subprocess.run(
|
| 108 |
["git", "clone", "https://github.com/ggerganov/llama.cpp.git", "/tmp/llama.cpp"],
|
|
@@ -117,7 +82,7 @@ subprocess.run(
|
|
| 117 |
check=True,
|
| 118 |
capture_output=True
|
| 119 |
)
|
| 120 |
-
#
|
| 121 |
subprocess.run(
|
| 122 |
["pip", "install", "sentencepiece", "protobuf"],
|
| 123 |
check=True,
|
|
@@ -131,8 +96,7 @@ gguf_output_dir = "/tmp/gguf_output"
|
|
| 131 |
os.makedirs(gguf_output_dir, exist_ok=True)
|
| 132 |
|
| 133 |
convert_script = "/tmp/llama.cpp/convert_hf_to_gguf.py"
|
| 134 |
-
|
| 135 |
-
gguf_file = f"{gguf_output_dir}/{model_name}-f16.gguf"
|
| 136 |
|
| 137 |
print(f" Running: python {convert_script} {merged_dir}")
|
| 138 |
try:
|
|
@@ -159,38 +123,16 @@ print(f" β
FP16 GGUF created: {gguf_file}")
|
|
| 159 |
|
| 160 |
# Step 5: Quantize to different formats
|
| 161 |
print("\nβοΈ Step 5: Creating quantized versions...")
|
|
|
|
| 162 |
|
| 163 |
-
# Build quantize tool
|
| 164 |
-
print(" Building quantize tool
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
["cmake", "-B", "/tmp/llama.cpp/build", "-S", "/tmp/llama.cpp",
|
| 172 |
-
"-DGGML_CUDA=OFF"], # Disable CUDA for faster build
|
| 173 |
-
check=True,
|
| 174 |
-
capture_output=True,
|
| 175 |
-
text=True
|
| 176 |
-
)
|
| 177 |
-
|
| 178 |
-
# Build just the quantize tool
|
| 179 |
-
subprocess.run(
|
| 180 |
-
["cmake", "--build", "/tmp/llama.cpp/build", "--target", "llama-quantize", "-j", "4"],
|
| 181 |
-
check=True,
|
| 182 |
-
capture_output=True,
|
| 183 |
-
text=True
|
| 184 |
-
)
|
| 185 |
-
print(" β
Quantize tool built")
|
| 186 |
-
except subprocess.CalledProcessError as e:
|
| 187 |
-
print(f" β Build failed!")
|
| 188 |
-
print("STDOUT:", e.stdout)
|
| 189 |
-
print("STDERR:", e.stderr)
|
| 190 |
-
raise
|
| 191 |
-
|
| 192 |
-
# Use the CMake build output path
|
| 193 |
-
quantize_bin = "/tmp/llama.cpp/build/bin/llama-quantize"
|
| 194 |
|
| 195 |
# Common quantization formats
|
| 196 |
quant_formats = [
|
|
@@ -202,7 +144,7 @@ quant_formats = [
|
|
| 202 |
quantized_files = []
|
| 203 |
for quant_type, description in quant_formats:
|
| 204 |
print(f" Creating {quant_type} quantization ({description})...")
|
| 205 |
-
quant_file = f"{gguf_output_dir}/
|
| 206 |
|
| 207 |
subprocess.run(
|
| 208 |
[quantize_bin, gguf_file, quant_file, quant_type],
|
|
@@ -220,9 +162,9 @@ print("\nβοΈ Step 6: Uploading to Hugging Face Hub...")
|
|
| 220 |
api = HfApi()
|
| 221 |
|
| 222 |
# Create repo
|
| 223 |
-
print(f" Creating repository: {
|
| 224 |
try:
|
| 225 |
-
api.create_repo(repo_id=
|
| 226 |
print(" β
Repository created")
|
| 227 |
except Exception as e:
|
| 228 |
print(f" βΉοΈ Repository may already exist: {e}")
|
|
@@ -231,8 +173,8 @@ except Exception as e:
|
|
| 231 |
print(" Uploading FP16 GGUF...")
|
| 232 |
api.upload_file(
|
| 233 |
path_or_fileobj=gguf_file,
|
| 234 |
-
path_in_repo=
|
| 235 |
-
repo_id=
|
| 236 |
)
|
| 237 |
print(" β
FP16 uploaded")
|
| 238 |
|
|
@@ -241,8 +183,8 @@ for quant_file, quant_type in quantized_files:
|
|
| 241 |
print(f" Uploading {quant_type}...")
|
| 242 |
api.upload_file(
|
| 243 |
path_or_fileobj=quant_file,
|
| 244 |
-
path_in_repo=f"
|
| 245 |
-
repo_id=
|
| 246 |
)
|
| 247 |
print(f" β
{quant_type} uploaded")
|
| 248 |
|
|
@@ -258,7 +200,7 @@ tags:
|
|
| 258 |
- sft
|
| 259 |
---
|
| 260 |
|
| 261 |
-
# {
|
| 262 |
|
| 263 |
This is a GGUF conversion of [{ADAPTER_MODEL}](https://huggingface.co/{ADAPTER_MODEL}), which is a LoRA fine-tuned version of [{BASE_MODEL}](https://huggingface.co/{BASE_MODEL}).
|
| 264 |
|
|
@@ -273,10 +215,10 @@ This is a GGUF conversion of [{ADAPTER_MODEL}](https://huggingface.co/{ADAPTER_M
|
|
| 273 |
|
| 274 |
| File | Quant | Size | Description | Use Case |
|
| 275 |
|------|-------|------|-------------|----------|
|
| 276 |
-
|
|
| 277 |
-
|
|
| 278 |
-
|
|
| 279 |
-
|
|
| 280 |
|
| 281 |
## Usage
|
| 282 |
|
|
@@ -284,23 +226,23 @@ This is a GGUF conversion of [{ADAPTER_MODEL}](https://huggingface.co/{ADAPTER_M
|
|
| 284 |
|
| 285 |
```bash
|
| 286 |
# Download model
|
| 287 |
-
huggingface-cli download {
|
| 288 |
|
| 289 |
# Run with llama.cpp
|
| 290 |
-
./llama-cli -m
|
| 291 |
```
|
| 292 |
|
| 293 |
### With Ollama
|
| 294 |
|
| 295 |
1. Create a `Modelfile`:
|
| 296 |
```
|
| 297 |
-
FROM ./
|
| 298 |
```
|
| 299 |
|
| 300 |
2. Create the model:
|
| 301 |
```bash
|
| 302 |
-
ollama create
|
| 303 |
-
ollama run
|
| 304 |
```
|
| 305 |
|
| 306 |
### With LM Studio
|
|
@@ -309,6 +251,15 @@ ollama run my-model
|
|
| 309 |
2. Import into LM Studio
|
| 310 |
3. Start chatting!
|
| 311 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 312 |
## License
|
| 313 |
|
| 314 |
Inherits the license from the base model: {BASE_MODEL}
|
|
@@ -316,12 +267,12 @@ Inherits the license from the base model: {BASE_MODEL}
|
|
| 316 |
## Citation
|
| 317 |
|
| 318 |
```bibtex
|
| 319 |
-
@misc{{
|
| 320 |
author = {{{username}}},
|
| 321 |
-
title = {{
|
| 322 |
year = {{2025}},
|
| 323 |
publisher = {{Hugging Face}},
|
| 324 |
-
url = {{https://huggingface.co/{
|
| 325 |
}}
|
| 326 |
```
|
| 327 |
|
|
@@ -333,18 +284,18 @@ Inherits the license from the base model: {BASE_MODEL}
|
|
| 333 |
api.upload_file(
|
| 334 |
path_or_fileobj=readme_content.encode(),
|
| 335 |
path_in_repo="README.md",
|
| 336 |
-
repo_id=
|
| 337 |
)
|
| 338 |
print(" β
README uploaded")
|
| 339 |
|
| 340 |
print("\n" + "=" * 60)
|
| 341 |
print("β
GGUF Conversion Complete!")
|
| 342 |
-
print(f"π¦ Repository: https://huggingface.co/{
|
| 343 |
-
print(
|
| 344 |
-
print(f" huggingface-cli download {
|
| 345 |
-
print(
|
| 346 |
print(" 1. Download the GGUF file")
|
| 347 |
-
print(
|
| 348 |
-
print(" 3. ollama create
|
| 349 |
-
print(" 4. ollama run
|
| 350 |
print("=" * 60)
|
|
|
|
| 13 |
# ]
|
| 14 |
# ///
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
import os
|
| 17 |
import torch
|
| 18 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 19 |
from peft import PeftModel
|
| 20 |
+
from huggingface_hub import HfApi, snapshot_download
|
| 21 |
import subprocess
|
| 22 |
|
| 23 |
print("π GGUF Conversion Script")
|
| 24 |
print("=" * 60)
|
| 25 |
|
| 26 |
+
# Configuration
|
| 27 |
+
ADAPTER_MODEL = "evalstate/qwen-capybara-medium"
|
| 28 |
+
BASE_MODEL = "Qwen/Qwen2.5-0.5B"
|
| 29 |
+
OUTPUT_MODEL_NAME = "evalstate/qwen-capybara-medium-gguf"
|
| 30 |
+
username = os.environ.get("HF_USERNAME", "evalstate")
|
| 31 |
|
| 32 |
print(f"\nπ¦ Configuration:")
|
| 33 |
print(f" Base model: {BASE_MODEL}")
|
| 34 |
print(f" Adapter model: {ADAPTER_MODEL}")
|
| 35 |
+
print(f" Output repo: {OUTPUT_MODEL_NAME}")
|
| 36 |
|
| 37 |
# Step 1: Load base model and adapter
|
| 38 |
print("\nπ§ Step 1: Loading base model and LoRA adapter...")
|
|
|
|
| 68 |
|
| 69 |
# Step 3: Install llama.cpp for conversion
|
| 70 |
print("\nπ₯ Step 3: Setting up llama.cpp for GGUF conversion...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
print(" Cloning llama.cpp repository...")
|
| 72 |
subprocess.run(
|
| 73 |
["git", "clone", "https://github.com/ggerganov/llama.cpp.git", "/tmp/llama.cpp"],
|
|
|
|
| 82 |
check=True,
|
| 83 |
capture_output=True
|
| 84 |
)
|
| 85 |
+
# Also need sentencepiece for tokenizer conversion
|
| 86 |
subprocess.run(
|
| 87 |
["pip", "install", "sentencepiece", "protobuf"],
|
| 88 |
check=True,
|
|
|
|
| 96 |
os.makedirs(gguf_output_dir, exist_ok=True)
|
| 97 |
|
| 98 |
convert_script = "/tmp/llama.cpp/convert_hf_to_gguf.py"
|
| 99 |
+
gguf_file = f"{gguf_output_dir}/qwen-capybara-medium-f16.gguf"
|
|
|
|
| 100 |
|
| 101 |
print(f" Running: python {convert_script} {merged_dir}")
|
| 102 |
try:
|
|
|
|
| 123 |
|
| 124 |
# Step 5: Quantize to different formats
|
| 125 |
print("\nβοΈ Step 5: Creating quantized versions...")
|
| 126 |
+
quantize_bin = "/tmp/llama.cpp/llama-quantize"
|
| 127 |
|
| 128 |
+
# Build quantize tool first
|
| 129 |
+
print(" Building quantize tool...")
|
| 130 |
+
subprocess.run(
|
| 131 |
+
["make", "-C", "/tmp/llama.cpp", "llama-quantize"],
|
| 132 |
+
check=True,
|
| 133 |
+
capture_output=True
|
| 134 |
+
)
|
| 135 |
+
print(" β
Quantize tool built")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
|
| 137 |
# Common quantization formats
|
| 138 |
quant_formats = [
|
|
|
|
| 144 |
quantized_files = []
|
| 145 |
for quant_type, description in quant_formats:
|
| 146 |
print(f" Creating {quant_type} quantization ({description})...")
|
| 147 |
+
quant_file = f"{gguf_output_dir}/qwen-capybara-medium-{quant_type.lower()}.gguf"
|
| 148 |
|
| 149 |
subprocess.run(
|
| 150 |
[quantize_bin, gguf_file, quant_file, quant_type],
|
|
|
|
| 162 |
api = HfApi()
|
| 163 |
|
| 164 |
# Create repo
|
| 165 |
+
print(f" Creating repository: {OUTPUT_MODEL_NAME}")
|
| 166 |
try:
|
| 167 |
+
api.create_repo(repo_id=OUTPUT_MODEL_NAME, repo_type="model", exist_ok=True)
|
| 168 |
print(" β
Repository created")
|
| 169 |
except Exception as e:
|
| 170 |
print(f" βΉοΈ Repository may already exist: {e}")
|
|
|
|
| 173 |
print(" Uploading FP16 GGUF...")
|
| 174 |
api.upload_file(
|
| 175 |
path_or_fileobj=gguf_file,
|
| 176 |
+
path_in_repo="qwen-capybara-medium-f16.gguf",
|
| 177 |
+
repo_id=OUTPUT_MODEL_NAME,
|
| 178 |
)
|
| 179 |
print(" β
FP16 uploaded")
|
| 180 |
|
|
|
|
| 183 |
print(f" Uploading {quant_type}...")
|
| 184 |
api.upload_file(
|
| 185 |
path_or_fileobj=quant_file,
|
| 186 |
+
path_in_repo=f"qwen-capybara-medium-{quant_type.lower()}.gguf",
|
| 187 |
+
repo_id=OUTPUT_MODEL_NAME,
|
| 188 |
)
|
| 189 |
print(f" β
{quant_type} uploaded")
|
| 190 |
|
|
|
|
| 200 |
- sft
|
| 201 |
---
|
| 202 |
|
| 203 |
+
# {OUTPUT_MODEL_NAME.split('/')[-1]}
|
| 204 |
|
| 205 |
This is a GGUF conversion of [{ADAPTER_MODEL}](https://huggingface.co/{ADAPTER_MODEL}), which is a LoRA fine-tuned version of [{BASE_MODEL}](https://huggingface.co/{BASE_MODEL}).
|
| 206 |
|
|
|
|
| 215 |
|
| 216 |
| File | Quant | Size | Description | Use Case |
|
| 217 |
|------|-------|------|-------------|----------|
|
| 218 |
+
| qwen-capybara-medium-f16.gguf | F16 | ~1GB | Full precision | Best quality, slower |
|
| 219 |
+
| qwen-capybara-medium-q8_0.gguf | Q8_0 | ~500MB | 8-bit | High quality |
|
| 220 |
+
| qwen-capybara-medium-q5_k_m.gguf | Q5_K_M | ~350MB | 5-bit medium | Good quality, smaller |
|
| 221 |
+
| qwen-capybara-medium-q4_k_m.gguf | Q4_K_M | ~300MB | 4-bit medium | Recommended - good balance |
|
| 222 |
|
| 223 |
## Usage
|
| 224 |
|
|
|
|
| 226 |
|
| 227 |
```bash
|
| 228 |
# Download model
|
| 229 |
+
huggingface-cli download {OUTPUT_MODEL_NAME} qwen-capybara-medium-q4_k_m.gguf
|
| 230 |
|
| 231 |
# Run with llama.cpp
|
| 232 |
+
./llama-cli -m qwen-capybara-medium-q4_k_m.gguf -p "Your prompt here"
|
| 233 |
```
|
| 234 |
|
| 235 |
### With Ollama
|
| 236 |
|
| 237 |
1. Create a `Modelfile`:
|
| 238 |
```
|
| 239 |
+
FROM ./qwen-capybara-medium-q4_k_m.gguf
|
| 240 |
```
|
| 241 |
|
| 242 |
2. Create the model:
|
| 243 |
```bash
|
| 244 |
+
ollama create qwen-capybara -f Modelfile
|
| 245 |
+
ollama run qwen-capybara
|
| 246 |
```
|
| 247 |
|
| 248 |
### With LM Studio
|
|
|
|
| 251 |
2. Import into LM Studio
|
| 252 |
3. Start chatting!
|
| 253 |
|
| 254 |
+
## Training Details
|
| 255 |
+
|
| 256 |
+
This model was fine-tuned using:
|
| 257 |
+
- **Dataset:** trl-lib/Capybara (1,000 examples)
|
| 258 |
+
- **Method:** Supervised Fine-Tuning with LoRA
|
| 259 |
+
- **Epochs:** 3
|
| 260 |
+
- **LoRA rank:** 16
|
| 261 |
+
- **Hardware:** A10G Large GPU
|
| 262 |
+
|
| 263 |
## License
|
| 264 |
|
| 265 |
Inherits the license from the base model: {BASE_MODEL}
|
|
|
|
| 267 |
## Citation
|
| 268 |
|
| 269 |
```bibtex
|
| 270 |
+
@misc{{qwen-capybara-medium-gguf,
|
| 271 |
author = {{{username}}},
|
| 272 |
+
title = {{Qwen Capybara Medium GGUF}},
|
| 273 |
year = {{2025}},
|
| 274 |
publisher = {{Hugging Face}},
|
| 275 |
+
url = {{https://huggingface.co/{OUTPUT_MODEL_NAME}}}
|
| 276 |
}}
|
| 277 |
```
|
| 278 |
|
|
|
|
| 284 |
api.upload_file(
|
| 285 |
path_or_fileobj=readme_content.encode(),
|
| 286 |
path_in_repo="README.md",
|
| 287 |
+
repo_id=OUTPUT_MODEL_NAME,
|
| 288 |
)
|
| 289 |
print(" β
README uploaded")
|
| 290 |
|
| 291 |
print("\n" + "=" * 60)
|
| 292 |
print("β
GGUF Conversion Complete!")
|
| 293 |
+
print(f"π¦ Repository: https://huggingface.co/{OUTPUT_MODEL_NAME}")
|
| 294 |
+
print("\nπ₯ Download with:")
|
| 295 |
+
print(f" huggingface-cli download {OUTPUT_MODEL_NAME} qwen-capybara-medium-q4_k_m.gguf")
|
| 296 |
+
print("\nπ Use with Ollama:")
|
| 297 |
print(" 1. Download the GGUF file")
|
| 298 |
+
print(" 2. Create Modelfile: FROM ./qwen-capybara-medium-q4_k_m.gguf")
|
| 299 |
+
print(" 3. ollama create qwen-capybara -f Modelfile")
|
| 300 |
+
print(" 4. ollama run qwen-capybara")
|
| 301 |
print("=" * 60)
|
trl/scripts/train_dpo_example.py
CHANGED
|
@@ -32,7 +32,7 @@ from trl import DPOTrainer, DPOConfig
|
|
| 32 |
# Initialize Trackio for real-time monitoring
|
| 33 |
trackio.init(
|
| 34 |
project="qwen-dpo-alignment",
|
| 35 |
-
space_id="username/trackio",
|
| 36 |
config={
|
| 37 |
"model": "Qwen/Qwen2.5-0.5B-Instruct",
|
| 38 |
"dataset": "trl-lib/ultrafeedback_binarized",
|
|
@@ -110,4 +110,4 @@ trainer.push_to_hub()
|
|
| 110 |
trackio.finish()
|
| 111 |
|
| 112 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-dpo-aligned")
|
| 113 |
-
print("π View metrics at: https://huggingface.co/spaces/username/trackio")
|
|
|
|
| 32 |
# Initialize Trackio for real-time monitoring
|
| 33 |
trackio.init(
|
| 34 |
project="qwen-dpo-alignment",
|
| 35 |
+
space_id="username/my-trackio-dashboard",
|
| 36 |
config={
|
| 37 |
"model": "Qwen/Qwen2.5-0.5B-Instruct",
|
| 38 |
"dataset": "trl-lib/ultrafeedback_binarized",
|
|
|
|
| 110 |
trackio.finish()
|
| 111 |
|
| 112 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-dpo-aligned")
|
| 113 |
+
print("π View metrics at: https://huggingface.co/spaces/username/my-trackio-dashboard")
|
trl/scripts/train_grpo_example.py
CHANGED
|
@@ -36,7 +36,7 @@ from trl import GRPOTrainer, GRPOConfig
|
|
| 36 |
# Initialize Trackio for real-time monitoring
|
| 37 |
trackio.init(
|
| 38 |
project="qwen-grpo-math",
|
| 39 |
-
space_id="username/trackio",
|
| 40 |
config={
|
| 41 |
"model": "Qwen/Qwen2.5-0.5B-Instruct",
|
| 42 |
"dataset": "trl-lib/math_shepherd",
|
|
@@ -94,4 +94,4 @@ trainer.push_to_hub()
|
|
| 94 |
trackio.finish()
|
| 95 |
|
| 96 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-grpo-math")
|
| 97 |
-
print("π View metrics at: https://huggingface.co/spaces/username/trackio")
|
|
|
|
| 36 |
# Initialize Trackio for real-time monitoring
|
| 37 |
trackio.init(
|
| 38 |
project="qwen-grpo-math",
|
| 39 |
+
space_id="username/my-trackio-dashboard",
|
| 40 |
config={
|
| 41 |
"model": "Qwen/Qwen2.5-0.5B-Instruct",
|
| 42 |
"dataset": "trl-lib/math_shepherd",
|
|
|
|
| 94 |
trackio.finish()
|
| 95 |
|
| 96 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-grpo-math")
|
| 97 |
+
print("π View metrics at: https://huggingface.co/spaces/username/my-trackio-dashboard")
|
trl/scripts/train_sft_example.py
CHANGED
|
@@ -39,7 +39,7 @@ from trl import SFTTrainer, SFTConfig
|
|
| 39 |
# Initialize Trackio for real-time monitoring
|
| 40 |
trackio.init(
|
| 41 |
project="qwen-capybara-sft",
|
| 42 |
-
space_id="username/trackio", # Creates Space if it doesn't exist
|
| 43 |
config={
|
| 44 |
"model": "Qwen/Qwen2.5-0.5B",
|
| 45 |
"dataset": "trl-lib/Capybara",
|
|
@@ -124,4 +124,4 @@ trainer.push_to_hub()
|
|
| 124 |
trackio.finish()
|
| 125 |
|
| 126 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-capybara-sft")
|
| 127 |
-
print("π View metrics at: https://huggingface.co/spaces/username/trackio")
|
|
|
|
| 39 |
# Initialize Trackio for real-time monitoring
|
| 40 |
trackio.init(
|
| 41 |
project="qwen-capybara-sft",
|
| 42 |
+
space_id="username/my-trackio-dashboard", # Creates Space if it doesn't exist
|
| 43 |
config={
|
| 44 |
"model": "Qwen/Qwen2.5-0.5B",
|
| 45 |
"dataset": "trl-lib/Capybara",
|
|
|
|
| 124 |
trackio.finish()
|
| 125 |
|
| 126 |
print("β
Complete! Model at: https://huggingface.co/username/qwen-capybara-sft")
|
| 127 |
+
print("π View metrics at: https://huggingface.co/spaces/username/my-trackio-dashboard")
|
trl/scripts/validate_dataset.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# /// script
|
| 3 |
+
# dependencies = [
|
| 4 |
+
# "datasets>=2.14.0",
|
| 5 |
+
# ]
|
| 6 |
+
# ///
|
| 7 |
+
"""
|
| 8 |
+
Validate dataset format for TRL training.
|
| 9 |
+
|
| 10 |
+
Usage:
|
| 11 |
+
python validate_dataset.py <dataset_name> <method>
|
| 12 |
+
|
| 13 |
+
Examples:
|
| 14 |
+
python validate_dataset.py trl-lib/Capybara sft
|
| 15 |
+
python validate_dataset.py Anthropic/hh-rlhf dpo
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import sys
|
| 19 |
+
from datasets import load_dataset
|
| 20 |
+
|
| 21 |
+
def validate_sft_dataset(dataset):
|
| 22 |
+
"""Validate SFT dataset format."""
|
| 23 |
+
print("π Validating SFT dataset...")
|
| 24 |
+
|
| 25 |
+
# Check for common fields
|
| 26 |
+
columns = dataset.column_names
|
| 27 |
+
print(f"π Columns: {columns}")
|
| 28 |
+
|
| 29 |
+
has_messages = "messages" in columns
|
| 30 |
+
has_text = "text" in columns
|
| 31 |
+
|
| 32 |
+
if not (has_messages or has_text):
|
| 33 |
+
print("β Dataset must have 'messages' or 'text' field")
|
| 34 |
+
return False
|
| 35 |
+
|
| 36 |
+
# Check first example
|
| 37 |
+
example = dataset[0]
|
| 38 |
+
|
| 39 |
+
if has_messages:
|
| 40 |
+
messages = example["messages"]
|
| 41 |
+
if not isinstance(messages, list):
|
| 42 |
+
print("β 'messages' field must be a list")
|
| 43 |
+
return False
|
| 44 |
+
|
| 45 |
+
if len(messages) == 0:
|
| 46 |
+
print("β 'messages' field is empty")
|
| 47 |
+
return False
|
| 48 |
+
|
| 49 |
+
# Check message format
|
| 50 |
+
msg = messages[0]
|
| 51 |
+
if not isinstance(msg, dict):
|
| 52 |
+
print("β Messages must be dictionaries")
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
if "role" not in msg or "content" not in msg:
|
| 56 |
+
print("β Messages must have 'role' and 'content' keys")
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
print("β
Messages format valid")
|
| 60 |
+
print(f" First message: {msg['role']}: {msg['content'][:50]}...")
|
| 61 |
+
|
| 62 |
+
if has_text:
|
| 63 |
+
text = example["text"]
|
| 64 |
+
if not isinstance(text, str):
|
| 65 |
+
print("β 'text' field must be a string")
|
| 66 |
+
return False
|
| 67 |
+
|
| 68 |
+
if len(text) == 0:
|
| 69 |
+
print("β 'text' field is empty")
|
| 70 |
+
return False
|
| 71 |
+
|
| 72 |
+
print("β
Text format valid")
|
| 73 |
+
print(f" First text: {text[:100]}...")
|
| 74 |
+
|
| 75 |
+
return True
|
| 76 |
+
|
| 77 |
+
def validate_dpo_dataset(dataset):
|
| 78 |
+
"""Validate DPO dataset format."""
|
| 79 |
+
print("π Validating DPO dataset...")
|
| 80 |
+
|
| 81 |
+
columns = dataset.column_names
|
| 82 |
+
print(f"π Columns: {columns}")
|
| 83 |
+
|
| 84 |
+
required = ["prompt", "chosen", "rejected"]
|
| 85 |
+
missing = [col for col in required if col not in columns]
|
| 86 |
+
|
| 87 |
+
if missing:
|
| 88 |
+
print(f"β Missing required fields: {missing}")
|
| 89 |
+
return False
|
| 90 |
+
|
| 91 |
+
# Check first example
|
| 92 |
+
example = dataset[0]
|
| 93 |
+
|
| 94 |
+
for field in required:
|
| 95 |
+
value = example[field]
|
| 96 |
+
if isinstance(value, str):
|
| 97 |
+
if len(value) == 0:
|
| 98 |
+
print(f"β '{field}' field is empty")
|
| 99 |
+
return False
|
| 100 |
+
print(f"β
'{field}' format valid (string)")
|
| 101 |
+
elif isinstance(value, list):
|
| 102 |
+
if len(value) == 0:
|
| 103 |
+
print(f"β '{field}' field is empty")
|
| 104 |
+
return False
|
| 105 |
+
print(f"β
'{field}' format valid (list of messages)")
|
| 106 |
+
else:
|
| 107 |
+
print(f"β '{field}' must be string or list")
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
return True
|
| 111 |
+
|
| 112 |
+
def validate_kto_dataset(dataset):
|
| 113 |
+
"""Validate KTO dataset format."""
|
| 114 |
+
print("π Validating KTO dataset...")
|
| 115 |
+
|
| 116 |
+
columns = dataset.column_names
|
| 117 |
+
print(f"π Columns: {columns}")
|
| 118 |
+
|
| 119 |
+
required = ["prompt", "completion", "label"]
|
| 120 |
+
missing = [col for col in required if col not in columns]
|
| 121 |
+
|
| 122 |
+
if missing:
|
| 123 |
+
print(f"β Missing required fields: {missing}")
|
| 124 |
+
return False
|
| 125 |
+
|
| 126 |
+
# Check first example
|
| 127 |
+
example = dataset[0]
|
| 128 |
+
|
| 129 |
+
if not isinstance(example["label"], bool):
|
| 130 |
+
print("β 'label' field must be boolean")
|
| 131 |
+
return False
|
| 132 |
+
|
| 133 |
+
print("β
KTO format valid")
|
| 134 |
+
return True
|
| 135 |
+
|
| 136 |
+
def main():
|
| 137 |
+
if len(sys.argv) != 3:
|
| 138 |
+
print("Usage: python validate_dataset.py <dataset_name> <method>")
|
| 139 |
+
print("Methods: sft, dpo, kto")
|
| 140 |
+
sys.exit(1)
|
| 141 |
+
|
| 142 |
+
dataset_name = sys.argv[1]
|
| 143 |
+
method = sys.argv[2].lower()
|
| 144 |
+
|
| 145 |
+
print(f"π¦ Loading dataset: {dataset_name}")
|
| 146 |
+
try:
|
| 147 |
+
dataset = load_dataset(dataset_name, split="train")
|
| 148 |
+
print(f"β
Dataset loaded: {len(dataset)} examples")
|
| 149 |
+
except Exception as e:
|
| 150 |
+
print(f"β Failed to load dataset: {e}")
|
| 151 |
+
sys.exit(1)
|
| 152 |
+
|
| 153 |
+
validators = {
|
| 154 |
+
"sft": validate_sft_dataset,
|
| 155 |
+
"dpo": validate_dpo_dataset,
|
| 156 |
+
"kto": validate_kto_dataset,
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
if method not in validators:
|
| 160 |
+
print(f"β Unknown method: {method}")
|
| 161 |
+
print(f"Supported methods: {list(validators.keys())}")
|
| 162 |
+
sys.exit(1)
|
| 163 |
+
|
| 164 |
+
validator = validators[method]
|
| 165 |
+
valid = validator(dataset)
|
| 166 |
+
|
| 167 |
+
if valid:
|
| 168 |
+
print(f"\nβ
Dataset is valid for {method.upper()} training")
|
| 169 |
+
sys.exit(0)
|
| 170 |
+
else:
|
| 171 |
+
print(f"\nβ Dataset is NOT valid for {method.upper()} training")
|
| 172 |
+
sys.exit(1)
|
| 173 |
+
|
| 174 |
+
if __name__ == "__main__":
|
| 175 |
+
main()
|