MiniMax-M2-GPTQMODEL-W4A16 / quantize_config.json
Qubitium's picture
Add files using upload-large-folder tool
1e1c096 verified
{
"bits": 4,
"group_size": 32,
"desc_act": false,
"sym": true,
"lm_head": false,
"quant_method": "gptq",
"checkpoint_format": "gptq",
"pack_dtype": "int32",
"meta": {
"quantizer": [
"gptqmodel:5.1.0-dev"
],
"uri": "https://github.com/modelcloud/gptqmodel",
"damp_percent": 0.05,
"damp_auto_increment": 0.01,
"static_groups": false,
"true_sequential": true,
"mse": 0.0,
"v2": false,
"v2_alpha": 0.25,
"act_group_aware": true
},
"pack_impl": "cpu"
}