| { | |
| "model": "/root/model/Qwen/Qwen2.5-VL-7B-Instruct", | |
| "model_type": "qwen2_5_vl", | |
| "model_revision": null, | |
| "task_type": "causal_lm", | |
| "torch_dtype": "bfloat16", | |
| "attn_impl": null, | |
| "num_labels": null, | |
| "problem_type": null, | |
| "rope_scaling": null, | |
| "device_map": null, | |
| "max_memory": {}, | |
| "local_repo_path": null, | |
| "template": "qwen2_5_vl", | |
| "system": "Please carefully observe the image, thoroughly understand the conditions provided in the question, use logical reasoning to arrive at the result, and reflect on and verify the reasoning process to ensure the accuracy of the answer. Finally, provide the correct answer.", | |
| "max_length": null, | |
| "truncation_strategy": "left", | |
| "max_pixels": null, | |
| "tools_prompt": "react_en", | |
| "norm_bbox": null, | |
| "response_prefix": null, | |
| "padding_side": "right", | |
| "loss_scale": "last_round", | |
| "sequence_parallel_size": 1, | |
| "use_chat_template": true, | |
| "template_backend": "swift", | |
| "dataset": [ | |
| "/mnt/data/user/zhao_jun/tangjixin/sample_data/games_new_v20_5k.json" | |
| ], | |
| "val_dataset": [], | |
| "split_dataset_ratio": 0.01, | |
| "data_seed": 42, | |
| "dataset_num_proc": 4, | |
| "streaming": false, | |
| "enable_cache": false, | |
| "download_mode": "reuse_dataset_if_exists", | |
| "columns": {}, | |
| "strict": false, | |
| "remove_unused_columns": false, | |
| "model_name": [ | |
| null, | |
| null | |
| ], | |
| "model_author": [ | |
| null, | |
| null | |
| ], | |
| "custom_dataset_info": [], | |
| "quant_method": null, | |
| "quant_bits": null, | |
| "hqq_axis": null, | |
| "bnb_4bit_compute_dtype": "bfloat16", | |
| "bnb_4bit_quant_type": "nf4", | |
| "bnb_4bit_use_double_quant": true, | |
| "bnb_4bit_quant_storage": null, | |
| "max_new_tokens": 64, | |
| "temperature": 1.0, | |
| "top_k": 50, | |
| "top_p": 0.85, | |
| "repetition_penalty": 1.0, | |
| "num_beams": 1, | |
| "stream": false, | |
| "stop_words": [], | |
| "logprobs": false, | |
| "top_logprobs": null, | |
| "ckpt_dir": null, | |
| "load_dataset_config": null, | |
| "lora_modules": [], | |
| "tuner_backend": "peft", | |
| "train_type": "full", | |
| "adapters": [], | |
| "external_plugins": [ | |
| "/mnt/data/user/zhao_jun/tangjixin/ms-swift/examples/train/grpo/plugin/plugin.py" | |
| ], | |
| "seed": 42, | |
| "model_kwargs": {}, | |
| "load_args": false, | |
| "load_data_args": false, | |
| "use_hf": false, | |
| "hub_token": null, | |
| "custom_register_path": [], | |
| "ignore_args_error": false, | |
| "use_swift_lora": false, | |
| "output_dir": "/mnt/data/user/zhao_jun/tangjixin/output/model/qwen2.5vl-7b-grpo_new_v20_5k/v13-20250325-021847", | |
| "overwrite_output_dir": false, | |
| "do_train": false, | |
| "do_eval": false, | |
| "do_predict": false, | |
| "eval_strategy": "steps", | |
| "prediction_loss_only": false, | |
| "per_device_train_batch_size": 3, | |
| "per_device_eval_batch_size": 3, | |
| "per_gpu_train_batch_size": null, | |
| "per_gpu_eval_batch_size": null, | |
| "gradient_accumulation_steps": 2, | |
| "eval_accumulation_steps": null, | |
| "eval_delay": 0, | |
| "torch_empty_cache_steps": null, | |
| "learning_rate": 2e-07, | |
| "weight_decay": 0.1, | |
| "adam_beta1": 0.9, | |
| "adam_beta2": 0.95, | |
| "adam_epsilon": 1e-08, | |
| "max_grad_norm": 1.0, | |
| "num_train_epochs": 1.0, | |
| "max_steps": -1, | |
| "lr_scheduler_type": "constant_with_warmup", | |
| "lr_scheduler_kwargs": null, | |
| "warmup_ratio": 0.05, | |
| "warmup_steps": 0, | |
| "log_level": "passive", | |
| "log_level_replica": "warning", | |
| "log_on_each_node": true, | |
| "logging_dir": "/mnt/data/user/zhao_jun/tangjixin/output/model/qwen2.5vl-7b-grpo_new_v20_5k/v13-20250325-021847/runs", | |
| "logging_strategy": "steps", | |
| "logging_first_step": true, | |
| "logging_steps": 5, | |
| "logging_nan_inf_filter": true, | |
| "save_strategy": "steps", | |
| "save_steps": 250.0, | |
| "save_total_limit": -1, | |
| "save_safetensors": true, | |
| "save_on_each_node": false, | |
| "save_only_model": false, | |
| "restore_callback_states_from_checkpoint": false, | |
| "no_cuda": false, | |
| "use_cpu": false, | |
| "use_mps_device": false, | |
| "jit_mode_eval": false, | |
| "use_ipex": false, | |
| "bf16": true, | |
| "fp16": false, | |
| "fp16_opt_level": "O1", | |
| "half_precision_backend": "auto", | |
| "bf16_full_eval": false, | |
| "fp16_full_eval": false, | |
| "tf32": null, | |
| "local_rank": 0, | |
| "ddp_backend": null, | |
| "tpu_num_cores": null, | |
| "tpu_metrics_debug": false, | |
| "debug": null, | |
| "dataloader_drop_last": false, | |
| "eval_steps": 250.0, | |
| "dataloader_num_workers": 4, | |
| "dataloader_prefetch_factor": null, | |
| "past_index": -1, | |
| "run_name": null, | |
| "disable_tqdm": null, | |
| "label_names": null, | |
| "load_best_model_at_end": false, | |
| "metric_for_best_model": "reward", | |
| "greater_is_better": true, | |
| "ignore_data_skip": false, | |
| "fsdp": "", | |
| "fsdp_min_num_params": 0, | |
| "fsdp_config": null, | |
| "fsdp_transformer_layer_cls_to_wrap": null, | |
| "accelerator_config": { | |
| "dispatch_batches": false | |
| }, | |
| "deepspeed": { | |
| "fp16": { | |
| "enabled": "auto", | |
| "loss_scale": 0, | |
| "loss_scale_window": 1000, | |
| "initial_scale_power": 16, | |
| "hysteresis": 2, | |
| "min_loss_scale": 1 | |
| }, | |
| "bf16": { | |
| "enabled": "auto" | |
| }, | |
| "zero_optimization": { | |
| "stage": 3, | |
| "offload_optimizer": { | |
| "device": "none", | |
| "pin_memory": true | |
| }, | |
| "offload_param": { | |
| "device": "none", | |
| "pin_memory": true | |
| }, | |
| "overlap_comm": false, | |
| "contiguous_gradients": true, | |
| "sub_group_size": 1000000000.0, | |
| "reduce_bucket_size": "auto", | |
| "zero_quantized_weights": false, | |
| "zero_quantized_gradients": false, | |
| "stage3_prefetch_bucket_size": 0, | |
| "stage3_param_persistence_threshold": "auto", | |
| "stage3_max_live_parameters": 1000000000.0, | |
| "stage3_max_reuse_distance": 1000000000.0, | |
| "stage3_gather_16bit_weights_on_model_save": true | |
| }, | |
| "gradient_accumulation_steps": "auto", | |
| "gradient_clipping": "auto", | |
| "steps_per_print": 2000, | |
| "train_batch_size": "auto", | |
| "train_micro_batch_size_per_gpu": "auto", | |
| "wall_clock_breakdown": false | |
| }, | |
| "label_smoothing_factor": 0.0, | |
| "optim": "adamw_torch", | |
| "optim_args": null, | |
| "adafactor": false, | |
| "group_by_length": false, | |
| "length_column_name": "length", | |
| "report_to": [ | |
| "wandb" | |
| ], | |
| "ddp_find_unused_parameters": null, | |
| "ddp_bucket_cap_mb": null, | |
| "ddp_broadcast_buffers": null, | |
| "dataloader_pin_memory": true, | |
| "dataloader_persistent_workers": false, | |
| "skip_memory_metrics": true, | |
| "use_legacy_prediction_loop": false, | |
| "push_to_hub": false, | |
| "resume_from_checkpoint": null, | |
| "hub_model_id": null, | |
| "hub_strategy": "every_save", | |
| "hub_private_repo": null, | |
| "hub_always_push": false, | |
| "gradient_checkpointing": true, | |
| "gradient_checkpointing_kwargs": null, | |
| "include_inputs_for_metrics": false, | |
| "include_for_metrics": [], | |
| "eval_do_concat_batches": true, | |
| "fp16_backend": "auto", | |
| "evaluation_strategy": "steps", | |
| "push_to_hub_model_id": null, | |
| "push_to_hub_organization": null, | |
| "push_to_hub_token": null, | |
| "mp_parameters": "", | |
| "auto_find_batch_size": false, | |
| "full_determinism": false, | |
| "torchdynamo": null, | |
| "ray_scope": "last", | |
| "ddp_timeout": 1800, | |
| "torch_compile": false, | |
| "torch_compile_backend": null, | |
| "torch_compile_mode": null, | |
| "dispatch_batches": null, | |
| "split_batches": null, | |
| "include_tokens_per_second": false, | |
| "include_num_input_tokens_seen": false, | |
| "neftune_noise_alpha": null, | |
| "optim_target_modules": null, | |
| "batch_eval_metrics": false, | |
| "eval_on_start": false, | |
| "use_liger_kernel": false, | |
| "eval_use_gather_object": false, | |
| "average_tokens_across_devices": false, | |
| "sortish_sampler": false, | |
| "predict_with_generate": false, | |
| "generation_max_length": null, | |
| "generation_num_beams": null, | |
| "generation_config": null, | |
| "check_model": true, | |
| "acc_strategy": "token", | |
| "train_sampler_random": true, | |
| "metric_warmup_step": 0, | |
| "fsdp_num": 1, | |
| "acc_steps": 1, | |
| "eval_use_evalscope": false, | |
| "eval_datasets": [], | |
| "eval_limit": null, | |
| "eval_datasets_args": null, | |
| "eval_generation_config": null, | |
| "freeze_parameters": [ | |
| "visual", | |
| "visual.merger" | |
| ], | |
| "freeze_parameters_ratio": 0.0, | |
| "trainable_parameters": [], | |
| "freeze_llm": false, | |
| "freeze_vit": true, | |
| "freeze_aligner": true, | |
| "target_modules": [ | |
| "all-linear" | |
| ], | |
| "target_regex": null, | |
| "modules_to_save": [], | |
| "lora_rank": 8, | |
| "lora_alpha": 32, | |
| "lora_dropout": 0.05, | |
| "lora_bias": "none", | |
| "lora_dtype": null, | |
| "lorap_lr_ratio": null, | |
| "use_rslora": false, | |
| "use_dora": false, | |
| "lora_ga_batch_size": 2, | |
| "lora_ga_iters": 2, | |
| "lora_ga_max_length": 1024, | |
| "lora_ga_direction": "ArB2r", | |
| "lora_ga_scale": "stable", | |
| "lora_ga_stable_gamma": 16, | |
| "init_weights": true, | |
| "fourier_n_frequency": 2000, | |
| "fourier_scaling": 300.0, | |
| "boft_block_size": 4, | |
| "boft_block_num": 0, | |
| "boft_n_butterfly_factor": 1, | |
| "boft_dropout": 0.0, | |
| "vera_rank": 256, | |
| "vera_projection_prng_key": 0, | |
| "vera_dropout": 0.0, | |
| "vera_d_initial": 0.1, | |
| "adapter_act": "gelu", | |
| "adapter_length": 128, | |
| "use_galore": false, | |
| "galore_target_modules": null, | |
| "galore_rank": 128, | |
| "galore_update_proj_gap": 50, | |
| "galore_scale": 1.0, | |
| "galore_proj_type": "std", | |
| "galore_optim_per_parameter": false, | |
| "galore_with_embedding": false, | |
| "galore_quantization": false, | |
| "galore_proj_quant": false, | |
| "galore_proj_bits": 4, | |
| "galore_proj_group_size": 256, | |
| "galore_cos_threshold": 0.4, | |
| "galore_gamma_proj": 2, | |
| "galore_queue_size": 5, | |
| "adalora_target_r": 8, | |
| "adalora_init_r": 12, | |
| "adalora_tinit": 0, | |
| "adalora_tfinal": 0, | |
| "adalora_deltaT": 1, | |
| "adalora_beta1": 0.85, | |
| "adalora_beta2": 0.85, | |
| "adalora_orth_reg_weight": 0.5, | |
| "llamapro_num_new_blocks": 4, | |
| "llamapro_num_groups": null, | |
| "lisa_activated_layers": 0, | |
| "lisa_step_interval": 20, | |
| "reft_layer_key": null, | |
| "reft_layers": null, | |
| "reft_rank": 4, | |
| "reft_intervention_type": "LoreftIntervention", | |
| "reft_args": null, | |
| "use_liger": false, | |
| "swanlab_token": null, | |
| "swanlab_project": null, | |
| "swanlab_workspace": null, | |
| "swanlab_exp_name": null, | |
| "swanlab_mode": "cloud", | |
| "add_version": true, | |
| "resume_only_model": false, | |
| "create_checkpoint_symlink": false, | |
| "packing": false, | |
| "lazy_tokenize": true, | |
| "loss_type": null, | |
| "optimizer": null, | |
| "metric": null, | |
| "zero_hpz_partition_size": null, | |
| "reward_model": null, | |
| "reward_adapters": [], | |
| "reward_model_type": null, | |
| "reward_model_revision": null, | |
| "num_ppo_epochs": 4, | |
| "whiten_rewards": false, | |
| "kl_coef": 0.05, | |
| "cliprange": 0.2, | |
| "vf_coef": 0.1, | |
| "cliprange_value": 0.2, | |
| "gamma": 1.0, | |
| "lam": 0.95, | |
| "num_mini_batches": 1, | |
| "local_rollout_forward_batch_size": 64, | |
| "num_sample_generations": 10, | |
| "response_length": 512, | |
| "missing_eos_penalty": null, | |
| "num_infer_workers": 1, | |
| "vllm_max_num_seqs": 256, | |
| "vllm_enforce_eager": false, | |
| "vllm_limit_mm_per_prompt": null, | |
| "vllm_enable_prefix_caching": true, | |
| "cosine_min_len_value_wrong": 0.0, | |
| "cosine_max_len_value_wrong": -0.5, | |
| "cosine_min_len_value_correct": 1.0, | |
| "cosine_max_len_value_correct": 0.5, | |
| "cosine_max_len": null, | |
| "repetition_n_grams": 3, | |
| "repetition_max_penalty": -1.0, | |
| "use_lmdeploy": false, | |
| "lmdeploy_device": "auto", | |
| "lmdeploy_session_len": null, | |
| "lmdeploy_cache_max_entry_count": 0.8, | |
| "async_generate": true, | |
| "tensor_parallel_size": 1, | |
| "sleep_level": 0, | |
| "move_model_batches": null, | |
| "offload_optimizer": false, | |
| "offload_model": false, | |
| "gc_collect_after_offload": false, | |
| "multi_turn_func": null, | |
| "mini_batch_size": null, | |
| "num_generations": 12, | |
| "max_completion_length": 2048, | |
| "ds3_gather_for_generation": true, | |
| "reward_funcs": [ | |
| "external_r1v_acc" | |
| ], | |
| "reward_weights": null, | |
| "log_completions": true, | |
| "use_vllm": true, | |
| "vllm_device": [ | |
| "auto" | |
| ], | |
| "vllm_gpu_memory_utilization": 0.4, | |
| "vllm_max_model_len": 4096, | |
| "num_iterations": 1, | |
| "epsilon": 0.2, | |
| "rlhf_type": "grpo", | |
| "ref_model": "/root/model/Qwen/Qwen2.5-VL-7B-Instruct", | |
| "ref_model_type": "qwen2_5_vl", | |
| "ref_model_revision": null, | |
| "beta": 0.04, | |
| "label_smoothing": 0, | |
| "rpo_alpha": 1.0, | |
| "cpo_alpha": 1.0, | |
| "simpo_gamma": 1, | |
| "desirable_weight": 1.0, | |
| "undesirable_weight": 1.0, | |
| "rank": 0, | |
| "global_world_size": 4, | |
| "local_world_size": 4, | |
| "model_suffix": "Qwen2.5-VL-7B-Instruct", | |
| "model_info": "ModelInfo(model_type='qwen2_5_vl', model_dir='/root/model/Qwen/Qwen2.5-VL-7B-Instruct', torch_dtype=torch.bfloat16, max_model_len=128000, quant_method=None, quant_bits=None, rope_scaling={'type': 'mrope', 'mrope_section': [16, 24, 24]}, config=None, task_type='causal_lm', num_labels=None)", | |
| "model_meta": "ModelMeta(model_type='qwen2_5_vl', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-VL-72B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen2_5_vl', get_function=<function get_model_tokenizer_qwen2_5_vl at 0x7ff25a6431c0>, model_arch='qwen2_vl', architectures=['Qwen2_5_VLForConditionalGeneration'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=['*.zip', '*.gguf', '*.pth', '*.pt', 'consolidated*', 'onnx/*', '*.safetensors.md', '*.msgpack', '*.onnx', '*.ot', '*.h5', '*.bin', '*.safetensors'], requires=['transformers>=4.49', 'qwen_vl_utils>=0.0.6', 'decord'], tags=[])", | |
| "model_dir": "/root/model/Qwen/Qwen2.5-VL-7B-Instruct", | |
| "hub": "<class 'swift.hub.hub.MSHub'>", | |
| "training_args": "GRPOConfig(output_dir='/mnt/data/user/zhao_jun/tangjixin/output/model/qwen2.5vl-7b-grpo_new_v20_5k/v13-20250325-021847', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=3, per_device_eval_batch_size=3, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-07, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=1.0, max_steps=-1, lr_scheduler_type=<SchedulerType.CONSTANT_WITH_WARMUP: 'constant_with_warmup'>, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/user/zhao_jun/tangjixin/output/model/qwen2.5vl-7b-grpo_new_v20_5k/v13-20250325-021847/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=250, save_total_limit=-1, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=250, dataloader_num_workers=4, dataloader_prefetch_factor=None, past_index=-1, run_name='/mnt/data/user/zhao_jun/tangjixin/output/model/qwen2.5vl-7b-grpo_new_v20_5k/v13-20250325-021847', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='reward', greater_is_better=True, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['wandb'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy='steps', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, model_init_kwargs=None, max_prompt_length=512, num_generations=12, temperature=1.0, max_completion_length=2048, ds3_gather_for_generation=True, use_vllm=True, vllm_device=['auto'], vllm_gpu_memory_utilization=0.4, vllm_dtype='auto', vllm_max_model_len=4096, vllm_enable_prefix_caching=True, vllm_guided_decoding_regex=None, beta=0.04, num_iterations=1, epsilon=0.2, reward_weights=None, sync_ref_model=False, ref_model_mixup_alpha=0.6, ref_model_sync_steps=512, log_completions=True, check_model=True, acc_strategy='token', train_sampler_random=True, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='full', optimizer=None, local_repo_path=None, galore_config=None, top_k=50, top_p=0.85, repetition_penalty=1.0, num_infer_workers=1, vllm_max_num_seqs=256, vllm_enforce_eager=False, vllm_limit_mm_per_prompt={}, cosine_min_len_value_wrong=0.0, cosine_max_len_value_wrong=-0.5, cosine_min_len_value_correct=1.0, cosine_max_len_value_correct=0.5, cosine_max_len=2048, repetition_n_grams=3, repetition_max_penalty=-1.0, use_lmdeploy=False, lmdeploy_device='auto', lmdeploy_session_len=None, lmdeploy_cache_max_entry_count=0.8, async_generate=True, tensor_parallel_size=1, sleep_level=0, move_model_batches=None, offload_optimizer=False, offload_model=False, gc_collect_after_offload=False, multi_turn_func=None, mini_batch_size=None, stop_words=[])" | |
| } | 
