File size: 536 Bytes
0a138aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
default_stage:
default_modifiers:
SmoothQuantModifier:
smoothing_strength: 0.8
mappings:
- !!python/tuple
- ['re:.*q_proj', 're:.*k_proj', 're:.*v_proj']
- re:.*input_layernorm
- !!python/tuple
- ['re:.*gate_proj', 're:.*up_proj']
- re:.*post_attention_layernorm
ignore: []
GPTQModifier:
targets: [Linear]
ignore: [lm_head]
scheme: W8A8
sequential_update: true
block_size: 128
dampening_frac: 0.01
offload_hessians: false
|