| { | |
| "bits": 8, | |
| "group_size": 128, | |
| "damp_percent": 0.01, | |
| "desc_act": true, | |
| "static_groups": true, | |
| "sym": true, | |
| "true_sequential": true, | |
| "model_name_or_path": "/mnt/llm_dataset/xverse_models/gptq/XVERSE-13B-Chat-GPTQ-Int8", | |
| "model_file_base_name": "gptq_model-8bit-128g", | |
| "is_marlin_format": false, | |
| "quant_method": "gptq" | |
| } |