| base_model: /root/LLM/NQLSG-Qwen2.5-14B-Base2 | |
| chat_template: auto | |
| dtype: bfloat16 | |
| merge_method: sce | |
| parameters: | |
| int8_mask: 1.0 | |
| slices: | |
| - sources: | |
| - layer_range: [0, 48] | |
| model: Lunzima/NQLSG-Qwen2.5-14B-MegaFusion-v3 | |
| - layer_range: [0, 48] | |
| model: Lunzima/NQLSG-Qwen2.5-14B-MegaFusion-v3-alpaca_gpt4_zh | |
| - layer_range: [0, 48] | |
| model: Lunzima/NQLSG-Qwen2.5-14B-MegaFusion-v4 | |
| - layer_range: [0, 48] | |
| model: Lunzima/NQLSG-Qwen2.5-14B-MegaFusion-v4-reasoning | |
| - layer_range: [0, 48] | |
| model: Lunzima/NQLSG-Qwen2.5-14B-MegaFusion-v5 | |
| - layer_range: [0, 48] | |
| model: Lunzima/NQLSG-Qwen2.5-14B-MegaFusion-v5-reasoning | |
| - layer_range: [0, 48] | |
| model: Lunzima/NQLSG-Qwen2.5-14B-MegaFusion-v5-roleplay | |
| - layer_range: [0, 48] | |
| model: /root/LLM/NQLSG-Qwen2.5-14B-Base1 | |
| - layer_range: [0, 48] | |
| model: /root/LLM/NQLSG-Qwen2.5-14B-Base2 | |
| - layer_range: [0, 48] | |
| model: /root/LLM/NQLSG-Qwen2.5-14B-Base3 | |
| tokenizer: {} |