Spaces:
Running
on
Zero
Running
on
Zero
File size: 29,031 Bytes
d1b6f86 1015ddf d1b6f86 1015ddf d1b6f86 b8b7922 d1b6f86 b8b7922 9a8320c 6559aff bea0065 6559aff 623cee1 bea0065 623cee1 b595d0c b8b7922 b595d0c 9a8320c b595d0c b8b7922 b595d0c b8b7922 d1b6f86 fbc33ea 17f5813 fbc33ea 6d5bfcd d1b6f86 9a8320c d1b6f86 623cee1 d1b6f86 9a8320c 661fe20 9a8320c 661fe20 9a8320c d1b6f86 9a8320c d1b6f86 9a8320c 1252ef3 9a8320c d1b6f86 9a8320c d1b6f86 7c2cd8e d1b6f86 9a8320c 7c2cd8e d1b6f86 9a8320c d1b6f86 9a8320c d1b6f86 9a8320c d1b6f86 1015ddf 7a66654 384696b 1015ddf 7a66654 384696b 1015ddf 275de5b 1015ddf 275de5b 7a66654 1015ddf 275de5b 1015ddf 275de5b 1015ddf 275de5b 384696b 275de5b 384696b 275de5b 1015ddf 275de5b 1015ddf 275de5b 1015ddf 275de5b 1015ddf d1b6f86 7a66654 d1b6f86 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 |
"""
Dataset download and preparation service
Downloads curated datasets from HuggingFace for LoRA training
"""
import os
import logging
from pathlib import Path
from typing import List, Dict, Optional, Callable
import json
from datetime import datetime
logger = logging.getLogger(__name__)
class DatasetService:
"""Service for downloading and preparing training datasets"""
# Dataset configurations (Parquet format only - no loading scripts)
DATASETS = {
'gtzan': {
'name': 'GTZAN Music Genre Dataset',
'type': 'music',
'hf_id': 'lewtun/music_genres_small',
'description': 'Music genre classification dataset (GTZAN-based)',
'size_gb': 1.2
},
'fsd50k': {
'name': 'FSD50K Sound Events',
'type': 'sound_effects',
'hf_id': 'nguyenvulebinh/fsd50k',
'description': 'Freesound Dataset with 51K audio clips and 200 sound classes',
'size_gb': 30.0
},
'librispeech': {
'name': 'LibriSpeech ASR',
'type': 'vocal',
'hf_id': 'openslr/librispeech_asr',
'description': 'LibriSpeech corpus for speech recognition',
'size_gb': 60.0
},
'libritts': {
'name': 'LibriTTS',
'type': 'vocal',
'hf_id': 'cdminix/libritts-aligned',
'description': 'Multi-speaker English audiobook corpus for TTS',
'size_gb': 35.0
},
'audioset_strong': {
'name': 'AudioSet Strong',
'type': 'music',
'hf_id': 'agkphysics/AudioSet',
'description': 'High-quality labeled audio events',
'size_gb': 12.0
},
'esc50': {
'name': 'ESC-50 Environmental Sounds',
'type': 'sound_effects',
'hf_id': 'ashraq/esc50',
'description': 'Environmental sound classification with 2,000 recordings',
'size_gb': 0.6
},
'urbansound8k': {
'name': 'UrbanSound8K',
'type': 'sound_effects',
'hf_id': 'danavery/urbansound8K',
'description': 'Urban sound classification - 8,732 labeled sound excerpts',
'size_gb': 5.6
}
}
def __init__(self, base_dir: str = "training_data"):
"""
Initialize dataset service
Args:
base_dir: Base directory for storing datasets
"""
self.base_dir = Path(base_dir)
self.base_dir.mkdir(parents=True, exist_ok=True)
def import_prepared_dataset(self, zip_path: str) -> Optional[str]:
"""
Import a prepared dataset from a ZIP file
Args:
zip_path: Path to the ZIP file containing dataset
Returns:
Dataset key if successful, None otherwise
"""
try:
import zipfile
import tempfile
# Extract to temporary directory
with tempfile.TemporaryDirectory() as temp_dir:
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(temp_dir)
temp_path = Path(temp_dir)
# Look for dataset_info.json (at root or in subfolder)
dataset_info_file = None
if (temp_path / "dataset_info.json").exists():
dataset_info_file = temp_path / "dataset_info.json"
else:
# Check subfolders
for subfolder in temp_path.iterdir():
if subfolder.is_dir() and (subfolder / "dataset_info.json").exists():
dataset_info_file = subfolder / "dataset_info.json"
temp_path = subfolder
break
if not dataset_info_file:
logger.error("No dataset_info.json found in ZIP file")
return None
# Read dataset info
with open(dataset_info_file, 'r') as f:
dataset_info = json.load(f)
dataset_key = dataset_info.get('dataset_key', 'imported_dataset')
# Check if dataset already exists, add number suffix if needed
dest_path = self.base_dir / dataset_key
counter = 1
original_key = dataset_key
while dest_path.exists():
dataset_key = f"{original_key}_{counter}"
dest_path = self.base_dir / dataset_key
counter += 1
if dataset_key != original_key:
logger.info(f"Dataset '{original_key}' exists, importing as '{dataset_key}'")
dataset_info['dataset_key'] = dataset_key
# Copy entire dataset directory
import shutil
shutil.copytree(temp_path, dest_path)
# Update dataset_info.json with new key if renamed
if dataset_key != original_key:
with open(dest_path / "dataset_info.json", 'w') as f:
json.dump(dataset_info, f, indent=2)
logger.info(f"β
Imported dataset: {dataset_key}")
return dataset_key
except Exception as e:
logger.error(f"Failed to import dataset: {str(e)}", exc_info=True)
return None
def is_dataset_downloaded(self, dataset_key: str) -> bool:
"""
Check if a dataset has already been downloaded
Args:
dataset_key: Key identifying the dataset
Returns:
True if dataset exists and has metadata file, False otherwise
"""
dataset_dir = self.base_dir / dataset_key
metadata_path = dataset_dir / 'dataset_info.json'
return metadata_path.exists()
def get_downloaded_datasets(self) -> Dict[str, Dict]:
"""
Get information about all downloaded datasets
Returns:
Dictionary mapping dataset keys to their metadata
"""
downloaded = {}
for dataset_key in self.DATASETS.keys():
if self.is_dataset_downloaded(dataset_key):
dataset_dir = self.base_dir / dataset_key
metadata_path = dataset_dir / 'dataset_info.json'
try:
with open(metadata_path, 'r') as f:
info = json.load(f)
downloaded[dataset_key] = info
except Exception as e:
logger.warning(f"Failed to load metadata for {dataset_key}: {e}")
return downloaded
def get_user_datasets(self) -> Dict[str, Dict]:
"""Get information about user-uploaded/prepared datasets
Returns:
Dictionary mapping user dataset names to their metadata
"""
user_datasets = {}
# Scan training_data directory for user datasets (prefixed with 'user_')
if not self.base_dir.exists():
return user_datasets
for dataset_dir in self.base_dir.iterdir():
if not dataset_dir.is_dir():
continue
dataset_key = dataset_dir.name
# Skip HuggingFace datasets (they're in DATASETS dict)
if dataset_key in self.DATASETS:
continue
# Check for dataset_info.json or metadata indicating it's a user dataset
metadata_path = dataset_dir / 'dataset_info.json'
if metadata_path.exists():
try:
with open(metadata_path, 'r') as f:
info = json.load(f)
# Mark as user dataset
info['is_user_dataset'] = True
info['dataset_key'] = dataset_key
user_datasets[dataset_key] = info
except Exception as e:
logger.warning(f"Failed to load metadata for user dataset {dataset_key}: {e}")
return user_datasets
def get_all_available_datasets(self) -> Dict[str, Dict]:
"""Get all available datasets (both HuggingFace and user-uploaded)
Returns:
Dictionary mapping all dataset keys to their metadata
"""
all_datasets = {}
# Get HuggingFace datasets
all_datasets.update(self.get_downloaded_datasets())
# Get user datasets
all_datasets.update(self.get_user_datasets())
return all_datasets
def download_dataset(self, dataset_key: str, progress_callback=None) -> Dict:
"""
Download a dataset from HuggingFace
Args:
dataset_key: Key identifying the dataset (e.g., 'gtzan')
progress_callback: Optional callback for progress updates
Returns:
Dictionary with dataset info and status
"""
try:
if dataset_key not in self.DATASETS:
raise ValueError(f"Unknown dataset: {dataset_key}")
dataset_config = self.DATASETS[dataset_key]
dataset_name = dataset_config['name']
# Check if already downloaded
if self.is_dataset_downloaded(dataset_key):
if progress_callback:
progress_callback(f"β
Dataset already downloaded: {dataset_name}")
progress_callback(f" Use 'Prepare Datasets' section to prepare for training")
# Load and return existing info
dataset_dir = self.base_dir / dataset_key
metadata_path = dataset_dir / 'dataset_info.json'
with open(metadata_path, 'r') as f:
info = json.load(f)
return {
'success': True,
'dataset': dataset_key,
'info': info,
'already_downloaded': True
}
if progress_callback:
progress_callback(f"π¦ Starting download: {dataset_name}")
# Show dataset size info
size_gb = dataset_config.get('size_gb', 0)
if size_gb > 100.0:
progress_callback(f"β οΈ Large dataset: {size_gb:.1f} GB")
progress_callback(f" This may take significant time to download.")
elif size_gb > 10.0:
progress_callback(f"βΉοΈ Dataset size: ~{size_gb:.1f} GB (may take a few minutes)")
else:
progress_callback(f"βΉοΈ Dataset size: ~{size_gb:.1f} GB")
# Check if dataset is available on HuggingFace
if dataset_config['hf_id'] is None:
# Custom download needed
return self._handle_custom_dataset(dataset_key, dataset_config, progress_callback)
# Download from HuggingFace
return self._download_from_huggingface(dataset_key, dataset_config, progress_callback)
except Exception as e:
logger.error(f"Dataset download failed: {e}", exc_info=True)
return {
'success': False,
'error': str(e),
'dataset': dataset_key
}
def _download_from_huggingface(self, dataset_key: str, config: Dict, progress_callback=None) -> Dict:
"""Download dataset from HuggingFace Hub"""
try:
from datasets import load_dataset
hf_id = config['hf_id']
dataset_dir = self.base_dir / dataset_key
dataset_dir.mkdir(parents=True, exist_ok=True)
if progress_callback:
progress_callback(f"π Loading dataset from HuggingFace Hub: {hf_id}")
logger.info(f"Loading dataset: {hf_id}")
# Prepare load_dataset parameters
load_params = {
'path': hf_id,
'cache_dir': str(dataset_dir / "cache")
}
# Add optional config/split parameters
if 'config' in config:
load_params['name'] = config['config']
if 'split' in config:
load_params['split'] = config['split']
# Download dataset
dataset = load_dataset(**load_params)
# Save dataset info for LoRA training compatibility
dataset_info = {
'name': config['name'],
'type': config['type'],
'hf_id': hf_id,
'description': config['description'],
'size_gb': config.get('size_gb', 0),
'splits': list(dataset.keys()) if hasattr(dataset, 'keys') else ['default'],
'num_examples': {split: len(dataset[split]) for split in dataset.keys()} if hasattr(dataset, 'keys') else len(dataset),
'features': str(dataset[list(dataset.keys())[0]].features) if hasattr(dataset, 'keys') else str(dataset.features),
'path': str(dataset_dir),
# Add placeholders for LoRA training service compatibility
'train_files': [],
'val_files': [],
'train_metadata': [],
'val_metadata': [],
'prepared': False, # Indicates dataset needs preparation before training
'hf_dataset': True # Flag that this is a HuggingFace dataset
}
# Save metadata
metadata_path = dataset_dir / 'dataset_info.json'
with open(metadata_path, 'w') as f:
json.dump(dataset_info, f, indent=2)
if progress_callback:
progress_callback(f"β
Downloaded {config['name']}")
if hasattr(dataset, 'keys'):
for split in dataset.keys():
progress_callback(f" {split}: {len(dataset[split]):,} samples")
else:
progress_callback(f" Total: {len(dataset):,} samples")
logger.info(f"Dataset downloaded successfully: {dataset_key}")
return {
'success': True,
'dataset': dataset_key,
'info': dataset_info
}
except ImportError:
error_msg = "HuggingFace datasets library not installed. Install with: pip install datasets"
logger.error(error_msg)
if progress_callback:
progress_callback(f"β {error_msg}")
return {
'success': False,
'error': error_msg,
'dataset': dataset_key
}
except Exception as e:
error_msg = f"Failed to download {config['name']}: {str(e)}"
logger.error(error_msg, exc_info=True)
# Provide helpful error messages
if progress_callback:
progress_callback(f"β {error_msg}")
if "doesn't exist" in str(e).lower() or "not found" in str(e).lower():
progress_callback(f" π‘ Dataset '{hf_id}' not found on HuggingFace Hub")
progress_callback(f" Check: https://huggingface.co/datasets/{hf_id}")
elif "connection" in str(e).lower() or "timeout" in str(e).lower():
progress_callback(f" π‘ Network issue - check your internet connection")
elif "permission" in str(e).lower() or "access" in str(e).lower():
progress_callback(f" π‘ Dataset may require authentication or have access restrictions")
progress_callback(f"β {error_msg}")
return {
'success': False,
'error': error_msg,
'dataset': dataset_key
}
def prepare_dataset_for_training(
self,
dataset_key: str,
train_val_split: float = 0.8,
max_samples: Optional[int] = None,
progress_callback: Optional[Callable] = None
) -> Dict:
"""
Prepare a downloaded HuggingFace dataset for LoRA training.
Extracts audio files, creates metadata, and splits into train/val sets.
Args:
dataset_key: Key identifying the dataset (e.g., 'gtzan')
train_val_split: Fraction of data to use for training (default: 0.8)
max_samples: Maximum number of samples to prepare (None = all)
progress_callback: Optional callback for progress updates
Returns:
Dictionary with preparation results
"""
try:
from datasets import load_from_disk
import soundfile as sf
import numpy as np
if progress_callback:
progress_callback(f"π§ Preparing dataset: {dataset_key}")
# Check if dataset exists
if dataset_key not in self.DATASETS:
raise ValueError(f"Unknown dataset: {dataset_key}")
config = self.DATASETS[dataset_key]
dataset_dir = self.base_dir / dataset_key
cache_dir = dataset_dir / "cache"
audio_dir = dataset_dir / "audio"
audio_dir.mkdir(parents=True, exist_ok=True)
# Load dataset info
metadata_path = dataset_dir / 'dataset_info.json'
if not metadata_path.exists():
raise ValueError(f"Dataset not downloaded yet. Please download {dataset_key} first.")
with open(metadata_path, 'r') as f:
dataset_info = json.load(f)
if dataset_info.get('prepared'):
if progress_callback:
progress_callback(f"β
Dataset already prepared!")
return {'success': True, 'dataset': dataset_key, 'already_prepared': True}
# Load HuggingFace dataset from cache
if progress_callback:
progress_callback(f"π Loading dataset from cache...")
from datasets import load_dataset, Audio
import librosa
hf_id = config['hf_id']
# Load dataset WITHOUT automatic audio decoding to avoid torchcodec dependency
load_params = {
'path': hf_id,
'cache_dir': str(cache_dir),
}
if 'config' in config:
load_params['name'] = config['config']
if 'split' in config:
load_params['split'] = config['split']
dataset = load_dataset(**load_params)
# Get the appropriate split
if hasattr(dataset, 'keys'):
# Use 'train' split if available, otherwise first available split
split_name = 'train' if 'train' in dataset.keys() else list(dataset.keys())[0]
data = dataset[split_name]
else:
data = dataset
# Determine audio column and disable automatic decoding
audio_column = None
for col in ['audio', 'file', 'path', 'wav']:
if col in data.column_names:
audio_column = col
break
if not audio_column:
raise ValueError(f"Could not find audio column in dataset. Available columns: {data.column_names}")
if progress_callback:
progress_callback(f"π Found audio column: '{audio_column}'")
total_samples = len(data)
if max_samples:
total_samples = min(total_samples, max_samples)
if progress_callback:
progress_callback(f"π Processing {total_samples} samples...")
# Process samples
train_files = []
val_files = []
train_metadata = []
val_metadata = []
num_train = int(total_samples * train_val_split)
for idx in range(total_samples):
try:
# Get raw sample data WITHOUT accessing audio column (avoids torchcodec)
# Access the underlying Arrow data directly
sample_data = data._data.table.slice(idx, 1).to_pydict()
# Get the audio column data
audio_data = sample_data[audio_column][0] if audio_column in sample_data else None
if audio_data is None:
logger.warning(f"No audio data for sample {idx}")
continue
# The audio column in Parquet datasets contains file paths or bytes
audio_path_to_load = None
if isinstance(audio_data, dict):
# Check for 'path' key which contains the cached file path
if 'path' in audio_data and audio_data['path']:
audio_path_to_load = audio_data['path']
elif 'bytes' in audio_data and audio_data['bytes']:
# Write bytes to temp file and load
import tempfile
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp:
tmp.write(audio_data['bytes'])
audio_path_to_load = tmp.name
elif isinstance(audio_data, str):
# Direct file path
audio_path_to_load = audio_data
if not audio_path_to_load:
logger.warning(f"Could not find audio path for sample {idx}: {type(audio_data)}")
continue
# Load audio with librosa (no torchcodec needed)
audio_array, sample_rate = librosa.load(audio_path_to_load, sr=None)
# Save audio file
audio_filename = f"sample_{idx:06d}.wav"
audio_path = audio_dir / audio_filename
sf.write(audio_path, audio_array, sample_rate)
# Create metadata
metadata = {
'audio_file': str(audio_path),
'sample_rate': sample_rate,
'duration': len(audio_array) / sample_rate,
'dataset': dataset_key,
'index': idx
}
# Extract additional metadata from dataset
for key in sample_data.keys():
if key != audio_column and sample_data[key]:
value = sample_data[key][0]
if not isinstance(value, (dict, list)):
metadata[key] = value
# Add to train or val set
if idx < num_train:
train_files.append(str(audio_path))
train_metadata.append(metadata)
else:
val_files.append(str(audio_path))
val_metadata.append(metadata)
# Progress update
if progress_callback and (idx + 1) % 50 == 0:
progress_callback(f" Processed {idx + 1}/{total_samples} samples...")
except Exception as e:
logger.warning(f"Error processing sample {idx}: {str(e)}")
continue
# Update dataset_info.json with training-ready format
dataset_info.update({
'train_files': train_files,
'val_files': val_files,
'train_metadata': train_metadata,
'val_metadata': val_metadata,
'prepared': True,
'preparation_date': datetime.now().isoformat(),
'num_train_samples': len(train_files),
'num_val_samples': len(val_files),
'train_val_split': train_val_split
})
# Save updated metadata
with open(metadata_path, 'w') as f:
json.dump(dataset_info, f, indent=2)
if progress_callback:
progress_callback(f"β
Dataset prepared successfully!")
progress_callback(f" Training samples: {len(train_files)}")
progress_callback(f" Validation samples: {len(val_files)}")
progress_callback(f" Audio files saved to: {audio_dir}")
logger.info(f"Dataset {dataset_key} prepared: {len(train_files)} train, {len(val_files)} val")
return {
'success': True,
'dataset': dataset_key,
'num_train': len(train_files),
'num_val': len(val_files),
'audio_dir': str(audio_dir)
}
except Exception as e:
error_msg = f"Failed to prepare dataset {dataset_key}: {str(e)}"
logger.error(error_msg, exc_info=True)
if progress_callback:
progress_callback(f"β {error_msg}")
return {
'success': False,
'error': error_msg,
'dataset': dataset_key
}
def _handle_custom_dataset(self, dataset_key: str, config: Dict, progress_callback=None) -> Dict:
"""Handle datasets that require custom download"""
if progress_callback:
progress_callback(
f"β οΈ {config['name']} requires manual download\n"
f" Visit: {config.get('custom_url', 'N/A')}\n"
f" Place files in: training_data/{dataset_key}/"
)
return {
'success': False,
'manual_download_required': True,
'dataset': dataset_key,
'url': config.get('custom_url'),
'info': config
}
def list_available_datasets(self) -> Dict[str, Dict]:
"""List all available datasets and their configurations"""
return self.DATASETS
def get_downloaded_dataset_keys(self) -> List[str]:
"""Get list of already downloaded dataset keys (simple list)"""
downloaded = []
for dataset_key in self.DATASETS.keys():
dataset_dir = self.base_dir / dataset_key
metadata_path = dataset_dir / 'dataset_info.json'
if metadata_path.exists():
downloaded.append(dataset_key)
return downloaded
def prepare_for_training(self, dataset_key: str) -> Dict:
"""
Prepare downloaded dataset for LoRA training
Args:
dataset_key: Dataset to prepare
Returns:
Dictionary with prepared dataset info
"""
try:
dataset_dir = self.base_dir / dataset_key
metadata_path = dataset_dir / 'dataset_info.json'
if not metadata_path.exists():
raise ValueError(f"Dataset not downloaded: {dataset_key}")
with open(metadata_path) as f:
dataset_info = json.load(f)
# Create prepared dataset directory
prepared_dir = dataset_dir / "prepared"
prepared_dir.mkdir(parents=True, exist_ok=True)
logger.info(f"Dataset {dataset_key} ready for training")
return {
'success': True,
'dataset': dataset_key,
'path': str(prepared_dir),
'info': dataset_info
}
except Exception as e:
logger.error(f"Dataset preparation failed: {e}")
return {
'success': False,
'error': str(e),
'dataset': dataset_key
}
|