From 5789d78c34c6dae9aaac60ea0e316b1d0d0ae8ad Mon Sep 17 00:00:00 2001 From: "anbei.yuan" Date: Sat, 9 May 2026 10:13:10 +0800 Subject: [PATCH] fix: strip tokens from reasoning model output Models with thinking/reasoning capabilities (DeepSeek-R1, MiniMax-M2.7, QwQ, etc.) include ... blocks in their response content. These internal reasoning tokens leak into agent output and downstream node inputs, corrupting the workflow. Add _strip_thinking_tokens() classmethod to OpenAIProvider that filters ... blocks via regex. Applied in both: - _deserialize_chat_response() (Message content) - _append_chat_response_output() (timeline content) The fix is zero-cost for models without thinking tokens (fast path checks for '' substring before regex). Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../node/agent/providers/openai_provider.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/runtime/node/agent/providers/openai_provider.py b/runtime/node/agent/providers/openai_provider.py index 3b64efcc4f..809f6a00a7 100755 --- a/runtime/node/agent/providers/openai_provider.py +++ b/runtime/node/agent/providers/openai_provider.py @@ -2,6 +2,7 @@ import base64 import hashlib +import re import binascii import os @@ -383,18 +384,31 @@ def _deserialize_chat_response(self, response: Any) -> Message: type="function" )) + content = self._get_attr(msg, "content") or "" + content = self._strip_thinking_tokens(content) + return Message( role=MessageRole.ASSISTANT, - content=self._get_attr(msg, "content") or "", + content=content, tool_calls=tool_calls ) + _THINK_PATTERN = re.compile(r".*?\s*", re.DOTALL) + + @classmethod + def _strip_thinking_tokens(cls, text: str) -> str: + """Strip ... blocks from model output (e.g. DeepSeek-R1, MiniMax-M2.7).""" + if "" not in text: + return text + return cls._THINK_PATTERN.sub("", text).strip() + def _append_chat_response_output(self, timeline: List[Any], response: Any) -> None: """Add chat response to timeline, preserving tool_calls (Chat API compatible).""" msg = response.choices[0].message + content = self._strip_thinking_tokens(msg.content or "") assistant_msg = { "role": "assistant", - "content": msg.content or "" + "content": content } if getattr(msg, "tool_calls", None):