lora_config: bias: none lora_alpha: 64 lora_dropout: 0.05 r: 32 target_modules: - q_proj - v_proj num_workers: 4 pretrained_model: openai/whisper-large-v3 train: huggingface_load: - name: audio_1h path: evie-8/kinyarwanda-speech-hackathon split: train training_args: eval_steps: 200 eval_strategy: steps fp16: true generation_max_length: 200 gradient_accumulation_steps: 2 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false greater_is_better: false hub_model_id: patrickcmd/whisper-large-v3-kin-1h learning_rate: 1.0e-05 load_best_model_at_end: true logging_steps: 200 max_steps: 10000 metric_for_best_model: loss per_device_eval_batch_size: 8 per_device_train_batch_size: 8 predict_with_generate: true push_to_hub: true save_steps: 1000 save_total_limit: 2 warmup_steps: 100 use_peft: false validation: huggingface_load: - path: jq/kinyarwanda-speech-hackathon split: dev_test[:300] source: language: - kin preprocessing: - set_sample_rate: rate: 16000 type: speech target: language: - kin preprocessing: - lower_case type: text