BasedBase-Qwen3-Coder-30B-A3B-Instruct-480B-Distill-V2-MLX-8bit-GS32
/
Qwen3 Coder a3b 480b DISTILL LM STUDIO TOOL USE.preset.json
| { | |
| "identifier": "@local:qwen3-coder-a3b-480b-distill-lm-studio-tool-use", | |
| "name": "Qwen3 Coder a3b - 480b DISTILL - LM STUDIO (TOOL USE)", | |
| "changed": false, | |
| "operation": { | |
| "fields": [ | |
| { | |
| "key": "llm.prediction.systemPrompt", | |
| "value": "TOOL USE RULES\n- If you decide to call a tool, output the tool call ONLY. Do not output any other text in the same message.\n- Do NOT print control tokens like <start_of_turn>user or <start_of_turn>model in your output.\n- After a successful tool call, WAIT for the tool result. Do not immediately call the tool again unless the previous call failed or returned nextThoughtNeeded=true and you have NEW parameters.\n- Never call the same tool twice in a row with identical parameters.\n- After summarizing a tool result once, STOP." | |
| }, | |
| { | |
| "key": "llm.prediction.promptTemplate", | |
| "value": { | |
| "type": "jinja", | |
| "jinjaPromptTemplate": { | |
| "template": "{{ bos_token }}\n{%- if messages and messages[0]['role'] == 'system' -%}\n {%- set first_user_prefix = messages[0]['content'] ~ '\\n\\n' -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = '' -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- set role = 'model' if message['role'] == 'assistant' else message['role'] -%}\n {{ '<start_of_turn>' ~ role ~ '\\n' ~ (first_user_prefix if loop.first else '') }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- elif item['type'] == 'tool_call' -%}\n ```tool_code\n {{ item['code'] | trim }}\n ```\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception('Invalid content type') }}\n {%- endif -%}\n {{ '<end_of_turn>\\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt and (loop_messages | length == 0 or loop_messages[-1]['role'] == 'user') -%}\n {{ '<start_of_turn>model\\n' }}\n{%- endif -%}" | |
| }, | |
| "stopStrings": [ | |
| "<end_of_turn>", | |
| "<start_of_turn>user", | |
| "<start_of_turn>model", | |
| "<start_of_turn>tool" | |
| ], | |
| "manualPromptTemplate": { | |
| "beforeSystem": "<|im_start|>system\n", | |
| "afterSystem": "<|im_end|>\n", | |
| "beforeUser": "<|im_start|>user\n", | |
| "afterUser": "<|im_end|>\n", | |
| "beforeAssistant": "<|im_start|>assistant\n", | |
| "afterAssistant": "<|im_end|>\n" | |
| } | |
| } | |
| }, | |
| { | |
| "key": "llm.prediction.topPSampling", | |
| "value": { | |
| "checked": true, | |
| "value": 0.8 | |
| } | |
| }, | |
| { | |
| "key": "llm.prediction.topKSampling", | |
| "value": 20 | |
| }, | |
| { | |
| "key": "llm.prediction.temperature", | |
| "value": 0.7 | |
| }, | |
| { | |
| "key": "llm.prediction.repeatPenalty", | |
| "value": { | |
| "checked": true, | |
| "value": 1.05 | |
| } | |
| } | |
| ] | |
| }, | |
| "load": { | |
| "fields": [] | |
| } | |
| } |