#!/usr/bin/env python3 """ Eurus-2-RL-Data (Code) to VERL converter Filters code problems from PRIME-RL/Eurus-2-RL-Data Applies best practices from Big-Math-RL-Verified conversion: - Multi-file output for memory efficiency - Streaming with ParquetWriter - Aggressive cache cleanup - Progress tracking and resume capability """ import pyarrow.parquet as pq import pyarrow as pa from datasets import load_dataset from pathlib import Path import json import shutil import time from datetime import datetime # Configuration REPO_ID = "PRIME-RL/Eurus-2-RL-Data" OUTPUT_DIR = "data" PROGRESS_FILE = "progress.json" DATA_SOURCE_PREFIX = "Eurus-2-Code" ABILITY_FILTER = "code" # Filter for code problems only BATCH_SIZE = 10000 ROWS_PER_OUTPUT_FILE = 100000 # 100K rows per file CHECKPOINT_INTERVAL = 1 # Save progress after each file # Global state current_output_file_idx = 0 current_output_rows = 0 current_writer = None current_output_path = None def load_progress(): """Load progress from checkpoint""" if Path(PROGRESS_FILE).exists(): with open(PROGRESS_FILE, 'r') as f: return json.load(f) return { "rows_processed": 0, "total_rows": 0, "current_output_file_idx": 0, "current_output_rows": 0, "completed": False } def save_progress(progress): """Save progress checkpoint""" progress["timestamp"] = datetime.now().isoformat() with open(PROGRESS_FILE, 'w') as f: json.dump(progress, f, indent=2) def check_disk_space(): """Monitor disk space""" usage = shutil.disk_usage("/Users/sungyubkim") free_gb = usage.free / (1024**3) if free_gb < 5: print(f"āš ļø WARNING: Low disk space: {free_gb:.1f} GB remaining") return free_gb def get_output_filename(file_idx, total_files=None): """Generate output filename with proper padding""" if total_files is None: # Estimate: 25K rows / 100K per file = ~1 file total_files = 5 return f"{OUTPUT_DIR}/train-{file_idx:05d}-of-{total_files:05d}.parquet" def get_verl_schema(): """Define the VERL schema (same as source, already in VERL format)""" return pa.schema([ ('data_source', pa.string()), ('prompt', pa.list_(pa.struct([ ('role', pa.string()), ('content', pa.string()) ]))), ('ability', pa.string()), ('reward_model', pa.struct([ ('style', pa.string()), ('ground_truth', pa.string()) ])), ('extra_info', pa.struct([ ('split', pa.string()), ('index', pa.int64()) ])) ]) def init_new_output_file(file_idx): """Initialize a new output file with ParquetWriter""" global current_writer, current_output_path # Close previous writer if exists if current_writer is not None: current_writer.close() print(f" āœ… Closed output file: {current_output_path}") # Create new output file current_output_path = get_output_filename(file_idx) schema = get_verl_schema() current_writer = pq.ParquetWriter(current_output_path, schema) print(f" šŸ“ Created new output file: {current_output_path}") return current_writer def convert_example_to_verl(example, global_index): """Convert a single example to VERL format (minimal changes needed)""" # The data is already in VERL format, just ensure consistency return { 'data_source': example['data_source'], 'prompt': example['prompt'], 'ability': example['ability'], 'reward_model': example['reward_model'], 'extra_info': { 'split': example['extra_info'].get('split', 'train'), 'index': global_index } } def write_batch_to_output(batch_data): """Write a batch to the current output file""" global current_writer, current_output_rows, current_output_file_idx # Check if we need to start a new output file if current_output_rows >= ROWS_PER_OUTPUT_FILE or current_writer is None: init_new_output_file(current_output_file_idx) current_output_file_idx += 1 current_output_rows = 0 # Convert batch to PyArrow table table = pa.table({ 'data_source': [x['data_source'] for x in batch_data], 'prompt': [x['prompt'] for x in batch_data], 'ability': [x['ability'] for x in batch_data], 'reward_model': [x['reward_model'] for x in batch_data], 'extra_info': [x['extra_info'] for x in batch_data] }, schema=get_verl_schema()) # Write to current file current_writer.write_table(table) current_output_rows += len(table) def process_dataset(start_index=0): """Process the Eurus-2 dataset and filter code problems""" print(f"\nšŸ“‚ Loading dataset from HuggingFace...") # Load dataset in streaming mode dataset = load_dataset(REPO_ID, split='train', streaming=True) print(f" āœ… Dataset loaded in streaming mode") print(f" šŸ” Filtering for ability='{ABILITY_FILTER}'") batch = [] global_index = start_index rows_processed = 0 rows_written = 0 print(f"\n šŸ”„ Processing and filtering...") for example in dataset: rows_processed += 1 # Filter for code problems only if example['ability'] == ABILITY_FILTER: verl_example = convert_example_to_verl(example, global_index) batch.append(verl_example) global_index += 1 # Write batch when it reaches BATCH_SIZE if len(batch) >= BATCH_SIZE: write_batch_to_output(batch) rows_written += len(batch) batch = [] if rows_written % 10000 == 0: print(f" ... {rows_written:,} code rows written ({rows_processed:,} total processed)") # Write remaining batch if batch: write_batch_to_output(batch) rows_written += len(batch) print(f" āœ… Complete: {rows_written:,} code rows written from {rows_processed:,} total rows") return rows_written, rows_processed def finalize_output_files(): """Close all writers and rename files with correct total count""" global current_writer, current_output_file_idx # Close final writer if current_writer is not None: current_writer.close() print(f"\n āœ… Closed final output file") # Get actual number of output files output_files = sorted(Path(OUTPUT_DIR).glob("train-*-of-*.parquet")) actual_count = len(output_files) print(f"\nšŸ“ Finalizing {actual_count} output files...") # Rename files with correct total count for idx, old_path in enumerate(output_files): new_name = f"train-{idx:05d}-of-{actual_count:05d}.parquet" new_path = old_path.parent / new_name if old_path != new_path: old_path.rename(new_path) print(f" Renamed: {old_path.name} -> {new_name}") def main(): global current_output_file_idx, current_output_rows print("=" * 80) print("Eurus-2-RL-Data (Code) Conversion to VERL Format") print("=" * 80) # Load progress progress = load_progress() if progress.get("completed", False): print(f"\nšŸ“„ Already processed! Skipping conversion.") print(f" Total code rows: {progress.get('total_rows', 0):,}") return # Ensure output directory exists Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True) # Check disk space free_gb = check_disk_space() print(f"\nšŸ’¾ Disk space: {free_gb:.1f} GB free") # Process dataset start_time = time.time() print(f"\nšŸš€ Starting conversion...") print(f" Source: {REPO_ID}") print(f" Filter: ability='{ABILITY_FILTER}'") print(f" Output: Multiple files, {ROWS_PER_OUTPUT_FILE:,} rows each") print() start_index = progress.get("total_rows", 0) rows_written, rows_processed = process_dataset(start_index) if rows_written > 0: # Finalize output files finalize_output_files() # Final save progress = { "rows_processed": rows_processed, "total_rows": rows_written, "current_output_file_idx": current_output_file_idx, "completed": True, "status": "complete" } save_progress(progress) # Final statistics elapsed = time.time() - start_time print("\n" + "=" * 80) print("āœ… CONVERSION COMPLETE!") print("=" * 80) print(f"šŸ“Š Statistics:") print(f" Total input rows processed: {rows_processed:,}") print(f" Total code rows written: {rows_written:,}") print(f" Filter rate: {rows_written/rows_processed*100:.1f}%") print(f" Time elapsed: {elapsed/60:.1f} minutes ({elapsed/3600:.2f} hours)") print(f" Output directory: {OUTPUT_DIR}") # Check output files output_files = sorted(Path(OUTPUT_DIR).glob("train-*-of-*.parquet")) if output_files: total_size = sum(f.stat().st_size for f in output_files) / (1024**3) print(f" Output files: {len(output_files)}") print(f" Total size: {total_size:.2f} GB") # Validate first file print(f"\nšŸ” Validation (first file):") table = pq.read_table(output_files[0]) print(f" Rows in first file: {len(table):,}") print(f" Columns: {table.column_names}") # Check sample if len(table) > 0: print(f" Sample data_source: {table['data_source'][0].as_py()}") print(f" Sample ability: {table['ability'][0].as_py()}") print("\n" + "=" * 80) if __name__ == "__main__": main()