| { | |
| "bits": 2, | |
| "group_size": 64, | |
| "desc_act": false, | |
| "static_groups": false, | |
| "sym": false, | |
| "lm_head": false, | |
| "damp_percent": 0.01, | |
| "true_sequential": true, | |
| "model_name_or_path": "/cpfs01/user/chenmengzhao/efficientqat_repo/efficientqat_checkpoints_GPTQ/Llama-3-8b-instruct-EfficientQAT-w2g64-GPTQ", | |
| "model_file_base_name": "model", | |
| "quant_method": "gptq", | |
| "checkpoint_format": "gptq_v2", | |
| "meta": { | |
| "quantizer": "gptqmodel:0.9.9-dev0" | |
| } | |
| } |