From ea11c21ff65587dce41400f51fa16c30779bab07 Mon Sep 17 00:00:00 2001 From: Gujiassh Date: Sun, 22 Mar 2026 17:36:04 +0900 Subject: [PATCH 1/2] Add Chat#step single-iteration execution Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode) Co-authored-by: Sisyphus --- lib/ruby_llm/chat.rb | 61 ++++++++++++++++++++------------ spec/ruby_llm/chat_tools_spec.rb | 54 ++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 23 deletions(-) diff --git a/lib/ruby_llm/chat.rb b/lib/ruby_llm/chat.rb index afc572342..72995a183 100644 --- a/lib/ruby_llm/chat.rb +++ b/lib/ruby_llm/chat.rb @@ -136,35 +136,25 @@ def each(&) messages.each(&) end - def complete(&) # rubocop:disable Metrics/PerceivedComplexity - response = @provider.complete( - messages, - tools: @tools, - tool_prefs: @tool_prefs, - temperature: @temperature, - model: @model, - params: @params, - headers: @headers, - schema: @schema, - thinking: @thinking, - &wrap_streaming_block(&) - ) + def complete(&) + response = step(&) + return response if response.is_a?(Tool::Halt) + + response.tool_call? ? complete(&) : response + end + + def step(&) + response = provider_complete(&) @on[:new_message]&.call unless block_given? - if @schema && response.content.is_a?(String) && !response.tool_call? - begin - response.content = JSON.parse(response.content) - rescue JSON::ParserError - # If parsing fails, keep content as string - end - end + normalize_schema_response(response) add_message response @on[:end_message]&.call(response) if response.tool_call? - handle_tool_calls(response, &) + handle_tool_calls(response, continue_loop: false, &) || response else response end @@ -186,6 +176,31 @@ def instance_variables private + def provider_complete(&) + @provider.complete( + messages, + tools: @tools, + tool_prefs: @tool_prefs, + temperature: @temperature, + model: @model, + params: @params, + headers: @headers, + schema: @schema, + thinking: @thinking, + &wrap_streaming_block(&) + ) + end + + def normalize_schema_response(response) + return unless @schema && response.content.is_a?(String) && !response.tool_call? + + begin + response.content = JSON.parse(response.content) + rescue JSON::ParserError + # If parsing fails, keep content as string + end + end + def normalize_schema_payload(raw_schema) return nil if raw_schema.nil? return raw_schema unless raw_schema.is_a?(Hash) @@ -231,7 +246,7 @@ def wrap_streaming_block(&block) end end - def handle_tool_calls(response, &) # rubocop:disable Metrics/PerceivedComplexity + def handle_tool_calls(response, continue_loop: true, &) # rubocop:disable Metrics/PerceivedComplexity halt_result = nil response.tool_calls.each_value do |tool_call| @@ -248,7 +263,7 @@ def handle_tool_calls(response, &) # rubocop:disable Metrics/PerceivedComplexity end reset_tool_choice if forced_tool_choice? - halt_result || complete(&) + halt_result || (continue_loop ? complete(&) : nil) end def execute_tool(tool_call) diff --git a/spec/ruby_llm/chat_tools_spec.rb b/spec/ruby_llm/chat_tools_spec.rb index c817c77b8..a3d0b41ad 100644 --- a/spec/ruby_llm/chat_tools_spec.rb +++ b/spec/ruby_llm/chat_tools_spec.rb @@ -603,6 +603,60 @@ def tool_result_message_for(chat, tool_call) expect(response.content).to eq('Task completed successfully') end + it 'step executes a single tool-calling iteration without recursing' do + chat = RubyLLM.chat.with_tool(Weather) + provider = chat.instance_variable_get(:@provider) + tool_call = RubyLLM::ToolCall.new( + id: 'call_1', + name: 'weather', + arguments: { 'latitude' => 52.52, 'longitude' => 13.405 } + ) + + allow(provider).to receive(:complete).and_return( + RubyLLM::Message.new( + role: :assistant, + content: '', + tool_calls: { tool_call.id => tool_call } + ) + ) + + chat.add_message(role: :user, content: "What's the weather in Berlin?") + + response = chat.step + + expect(response).to be_a(RubyLLM::Message) + expect(response.tool_call?).to be(true) + expect(provider).to have_received(:complete).once + expect(chat.messages.map(&:role)).to eq(%i[user assistant tool]) + expect(chat.messages.last.content).to include('15') + end + + it 'step returns Halt when a tool halts' do + chat = RubyLLM.chat.with_tool(HaltingTool) + provider = chat.instance_variable_get(:@provider) + tool_call = RubyLLM::ToolCall.new( + id: 'call_1', + name: 'halting', + arguments: {} + ) + + allow(provider).to receive(:complete).and_return( + RubyLLM::Message.new( + role: :assistant, + content: '', + tool_calls: { tool_call.id => tool_call } + ) + ) + + chat.add_message(role: :user, content: 'Execute the halting tool') + + response = chat.step + + expect(response).to be_a(RubyLLM::Tool::Halt) + expect(response.content).to eq('Task completed successfully') + expect(provider).to have_received(:complete).once + end + it 'does not continue conversation after halt' do call_count = 0 original_complete = described_class.instance_method(:complete) From 9a2aca03255e9808fa3df895accefcf51facb8a4 Mon Sep 17 00:00:00 2001 From: Gujiassh Date: Mon, 23 Mar 2026 10:20:29 +0900 Subject: [PATCH 2/2] Add ActiveRecord Chat#step parity Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-openagent) Co-authored-by: Sisyphus --- lib/ruby_llm/active_record/chat_methods.rb | 8 +++ spec/ruby_llm/active_record/acts_as_spec.rb | 66 +++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/lib/ruby_llm/active_record/chat_methods.rb b/lib/ruby_llm/active_record/chat_methods.rb index 75a14afd4..66c962f2c 100644 --- a/lib/ruby_llm/active_record/chat_methods.rb +++ b/lib/ruby_llm/active_record/chat_methods.rb @@ -227,6 +227,14 @@ def complete(...) raise e end + def step(...) + to_llm.step(...) + rescue RubyLLM::Error => e + cleanup_failed_messages if @message&.persisted? && @message.content.blank? + cleanup_orphaned_tool_results + raise e + end + private def cleanup_failed_messages diff --git a/spec/ruby_llm/active_record/acts_as_spec.rb b/spec/ruby_llm/active_record/acts_as_spec.rb index 1bd648ec9..56835f26b 100644 --- a/spec/ruby_llm/active_record/acts_as_spec.rb +++ b/spec/ruby_llm/active_record/acts_as_spec.rb @@ -738,6 +738,72 @@ def uploaded_file(path, type) end end + describe 'step' do + it 'executes a single tool-calling iteration without recursing' do + chat = Chat.create!(model: model).with_tool(Calculator) + provider = chat.to_llm.instance_variable_get(:@provider) + tool_call = RubyLLM::ToolCall.new( + id: 'call_1', + name: 'calculator', + arguments: { 'expression' => '2 + 2' } + ) + + allow(provider).to receive(:complete).and_return( + RubyLLM::Message.new( + role: :assistant, + content: '', + tool_calls: { tool_call.id => tool_call } + ) + ) + + chat.add_message(role: :user, content: 'What is 2 + 2?') + + response = chat.step + + expect(response).to be_a(RubyLLM::Message) + expect(response.tool_call?).to be(true) + expect(provider).to have_received(:complete).once + expect(chat.messages.order(:id).pluck(:role)).to eq(%w[user assistant tool]) + expect(chat.messages.order(:id).last.content).to eq('4') + end + + it 'returns Halt when a tool halts' do + stub_const('HaltingTool', Class.new(RubyLLM::Tool) do + description 'A tool that halts' + + def execute + halt('Task completed successfully') + end + end) + + chat = Chat.create!(model: model).with_tool(HaltingTool) + provider = chat.to_llm.instance_variable_get(:@provider) + tool_call = RubyLLM::ToolCall.new( + id: 'call_1', + name: 'halting', + arguments: {} + ) + + allow(provider).to receive(:complete).and_return( + RubyLLM::Message.new( + role: :assistant, + content: '', + tool_calls: { tool_call.id => tool_call } + ) + ) + + chat.add_message(role: :user, content: 'Execute the halting tool') + + response = chat.step + + expect(response).to be_a(RubyLLM::Tool::Halt) + expect(response.content).to eq('Task completed successfully') + expect(provider).to have_received(:complete).once + expect(chat.messages.order(:id).pluck(:role)).to eq(%w[user assistant tool]) + expect(chat.messages.order(:id).last.content).to eq('Task completed successfully') + end + end + describe 'error recovery' do it 'does not clean up complete tool interactions when error occurs after tool execution' do chat = Chat.create!(model: model)