Created
March 7, 2026 11:57
-
-
Save sudoingX/c2facf7d8f7608c65c1024ef3b22d431 to your computer and use it in GitHub Desktop.
Patched Jinja template for Qwen 3.5 27B - fixes developer role crash + preserves thinking mode (thinking = 1). Drop-in replacement for agent tools (OpenCode, Claude Code, Continue, Cursor, Aider). Without this patch, --chat-template chatml silently kills thinking mode.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| {%- set image_count = namespace(value=0) %} | |
| {%- set video_count = namespace(value=0) %} | |
| {%- macro render_content(content, do_vision_count, is_system_content=false) %} | |
| {%- if content is string %} | |
| {{- content }} | |
| {%- elif content is iterable and content is not mapping %} | |
| {%- for item in content %} | |
| {%- if 'image' in item or 'image_url' in item or item.type == 'image' %} | |
| {%- if is_system_content %} | |
| {{- raise_exception('System message cannot contain images.') }} | |
| {%- endif %} | |
| {%- if do_vision_count %} | |
| {%- set image_count.value = image_count.value + 1 %} | |
| {%- endif %} | |
| {%- if add_vision_id %} | |
| {{- 'Picture ' ~ image_count.value ~ ': ' }} | |
| {%- endif %} | |
| {{- '<|vision_start|><|image_pad|><|vision_end|>' }} | |
| {%- elif 'video' in item or item.type == 'video' %} | |
| {%- if is_system_content %} | |
| {{- raise_exception('System message cannot contain videos.') }} | |
| {%- endif %} | |
| {%- if do_vision_count %} | |
| {%- set video_count.value = video_count.value + 1 %} | |
| {%- endif %} | |
| {%- if add_vision_id %} | |
| {{- 'Video ' ~ video_count.value ~ ': ' }} | |
| {%- endif %} | |
| {{- '<|vision_start|><|video_pad|><|vision_end|>' }} | |
| {%- elif 'text' in item %} | |
| {{- item.text }} | |
| {%- else %} | |
| {{- raise_exception('Unexpected item type in content.') }} | |
| {%- endif %} | |
| {%- endfor %} | |
| {%- elif content is none or content is undefined %} | |
| {{- '' }} | |
| {%- else %} | |
| {{- raise_exception('Unexpected content type.') }} | |
| {%- endif %} | |
| {%- endmacro %} | |
| {%- if not messages %} | |
| {{- raise_exception('No messages provided.') }} | |
| {%- endif %} | |
| {%- if tools and tools is iterable and tools is not mapping %} | |
| {{- '<|im_start|>system\n' }} | |
| {{- "# Tools\n\nYou have access to the following functions:\n\n<tools>" }} | |
| {%- for tool in tools %} | |
| {{- "\n" }} | |
| {{- tool | tojson }} | |
| {%- endfor %} | |
| {{- "\n</tools>" }} | |
| {{- '\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n- Function calls MUST follow the specified format: an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n- Required parameters MUST be specified\n- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n</IMPORTANT>' }} | |
| {%- if messages[0].role == 'system' %} | |
| {%- set content = render_content(messages[0].content, false, true)|trim %} | |
| {%- if content %} | |
| {{- '\n\n' + content }} | |
| {%- endif %} | |
| {%- endif %} | |
| {{- '<|im_end|>\n' }} | |
| {%- else %} | |
| {%- if messages[0].role == 'system' or messages[0].role == 'developer' %} | |
| {%- set content = render_content(messages[0].content, false, true)|trim %} | |
| {{- '<|im_start|>system\n' + content + '<|im_end|>\n' }} | |
| {%- endif %} | |
| {%- endif %} | |
| {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %} | |
| {%- for message in messages[::-1] %} | |
| {%- set index = (messages|length - 1) - loop.index0 %} | |
| {%- if ns.multi_step_tool and message.role == "user" %} | |
| {%- set content = render_content(message.content, false)|trim %} | |
| {%- if not(content.startswith('<tool_response>') and content.endswith('</tool_response>')) %} | |
| {%- set ns.multi_step_tool = false %} | |
| {%- set ns.last_query_index = index %} | |
| {%- endif %} | |
| {%- endif %} | |
| {%- endfor %} | |
| {%- if ns.multi_step_tool %} | |
| {{- raise_exception('No user query found in messages.') }} | |
| {%- endif %} | |
| {%- for message in messages %} | |
| {%- set content = render_content(message.content, true)|trim %} | |
| {%- if message.role == "system" or message.role == "developer" %} | |
| {%- if not loop.first %} | |
| {{- '<|im_start|>user\n' + content + '<|im_end|>\n' }} | |
| {%- endif %} | |
| {%- elif message.role == "user" %} | |
| {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }} | |
| {%- elif message.role == "assistant" %} | |
| {%- set reasoning_content = '' %} | |
| {%- if message.reasoning_content is string %} | |
| {%- set reasoning_content = message.reasoning_content %} | |
| {%- else %} | |
| {%- if '</think>' in content %} | |
| {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %} | |
| {%- set content = content.split('</think>')[-1].lstrip('\n') %} | |
| {%- endif %} | |
| {%- endif %} | |
| {%- set reasoning_content = reasoning_content|trim %} | |
| {%- if loop.index0 > ns.last_query_index %} | |
| {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content + '\n</think>\n\n' + content }} | |
| {%- else %} | |
| {{- '<|im_start|>' + message.role + '\n' + content }} | |
| {%- endif %} | |
| {%- if message.tool_calls and message.tool_calls is iterable and message.tool_calls is not mapping %} | |
| {%- for tool_call in message.tool_calls %} | |
| {%- if tool_call.function is defined %} | |
| {%- set tool_call = tool_call.function %} | |
| {%- endif %} | |
| {%- if loop.first %} | |
| {%- if content|trim %} | |
| {{- '\n\n<tool_call>\n<function=' + tool_call.name + '>\n' }} | |
| {%- else %} | |
| {{- '<tool_call>\n<function=' + tool_call.name + '>\n' }} | |
| {%- endif %} | |
| {%- else %} | |
| {{- '\n<tool_call>\n<function=' + tool_call.name + '>\n' }} | |
| {%- endif %} | |
| {%- if tool_call.arguments is mapping %} | |
| {%- for args_name in tool_call.arguments %} | |
| {%- set args_value = tool_call.arguments[args_name] %} | |
| {{- '<parameter=' + args_name + '>\n' }} | |
| {%- set args_value = args_value | tojson | safe if args_value is mapping or (args_value is sequence and args_value is not string) else args_value | string %} | |
| {{- args_value }} | |
| {{- '\n</parameter>\n' }} | |
| {%- endfor %} | |
| {%- endif %} | |
| {{- '</function>\n</tool_call>' }} | |
| {%- endfor %} | |
| {%- endif %} | |
| {{- '<|im_end|>\n' }} | |
| {%- elif message.role == "tool" %} | |
| {%- if loop.previtem and loop.previtem.role != "tool" %} | |
| {{- '<|im_start|>user' }} | |
| {%- endif %} | |
| {{- '\n<tool_response>\n' }} | |
| {{- content }} | |
| {{- '\n</tool_response>' }} | |
| {%- if not loop.last and loop.nextitem.role != "tool" %} | |
| {{- '<|im_end|>\n' }} | |
| {%- elif loop.last %} | |
| {{- '<|im_end|>\n' }} | |
| {%- endif %} | |
| {%- else %} | |
| {{- '<|im_start|>user\n' + content + '<|im_end|>\n' }} | |
| {%- endif %} | |
| {%- endfor %} | |
| {%- if add_generation_prompt %} | |
| {{- '<|im_start|>assistant\n' }} | |
| {%- if enable_thinking is defined and enable_thinking is false %} | |
| {{- '<think>\n\n</think>\n\n' }} | |
| {%- else %} | |
| {{- '<think>\n' }} | |
| {%- endif %} | |
| {%- endif %} |
Fyi theres a localllama thread regarding empty think blocks silently losing some of the kv cache reuse.
r/LocalLLaMA/comments/1sg076h/i_tracked_a_major_cache_reuse_issue_down_to_qwen
https://gist.github.com/sudoingX/c2facf7d8f7608c65c1024ef3b22d431#file-qwen3-5_chat_template-jinja-L100
1 Liner in this revision Initial runs look pretty good on hermes+Qwen3.5-35B-A3B-GPTQ-Int4 on 2x4060
https://gist.github.com/chuyqa/e44bda6b471792758849971c7cca519c/revisions
.. Hermes is stopping less and my cache %s are looking higher..
(APIServer pid=1) INFO 04-10 01:40:19 [loggers.py:259] Engine 000: Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 85.8 tokens/s, Running: 1 reqs, Waiting: 0 reqs, GPU KV cache usage: 3.8%, Prefix cache hit rate: 96.7%, MM cache hit rate: 33.3%
(APIServer pid=1) INFO 04-10 01:40:29 [loggers.py:259] Engine 000: Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 85.5 tokens/s, Running: 1 reqs, Waiting: 0 reqs, GPU KV cache usage: 3.8%, Prefix cache hit rate: 96.7%, MM cache hit rate: 33.3%
(APIServer pid=1) INFO: 192.168.101.54:51875 - "POST /v1/chat/completions HTTP/1.1" 200 OK
(APIServer pid=1) INFO 04-10 01:40:37 [qwen3xml_tool_parser.py:1178] vLLM Successfully import tool parser Qwen3XMLToolParser !
(APIServer pid=1) INFO 04-10 01:40:39 [loggers.py:259] Engine 000: Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 66.5 tokens/s, Running: 1 reqs, Waiting: 0 reqs, GPU KV cache usage: 5.2%, Prefix cache hit rate: 96.6%, MM cache hit rate: 33.3%
(APIServer pid=1) INFO 04-10 01:40:53 [qwen3xml_tool_parser.py:1178] vLLM Successfully import tool parser Qwen3XMLToolParser !
(APIServer pid=1) INFO 04-10 01:40:59 [loggers.py:259] Engine 000: Avg prompt throughput: 5003.2 tokens/s, Avg generation throughput: 63.1 tokens/s, Running: 1 reqs, Waiting: 0 reqs, GPU KV cache usage: 12.7%, Prefix cache hit rate: 96.6%, MM cache hit rate: 33.3%
Thanks a lot for this, I'm using it on 9B Q4_K_M with an RTX 5080 and the results are pretty good too!
Samesies! Results are pretty good so far, this was an annoying bug for opencode & claude-code
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
couldnt get it to work and switched from "LM Studio" to "llama-server" but why are you guys not using the flags "--temp 0.6 --top-p 0.95 --top-k20..." with Qwen3.5 in llama-server like recommended from unsloth? I thought they're mandatory for good reasoning? I always used them in LM Studio but Im new to llama-server. Did I miss something?