Skip to content

Commit a4a5086

Browse files
eb8680kiranandcode
andauthored
Clean up internal LLM API (#484)
* Fresh diff * remove instructionhandler * updated internal interface to make all tests pass * fixed caching tests * updated llm.ipynb * removed unnecessarily defensive validation * updated tool call decoding to use concrete type of tool result instead of annotations * updated completions to fix basic type errors * updated call assistant to handle decoding tool calls * dropped stale comments * moved model and param model back to internals of `completions` --------- Co-authored-by: Kiran Gopinathan <kiran@basis.ai>
1 parent 02facf3 commit a4a5086

29 files changed

Lines changed: 684 additions & 904 deletions

File tree

docs/source/agent.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,9 @@
33
from effectful.handlers.llm import Template
44
from effectful.handlers.llm.completions import (
55
LiteLLMProvider,
6-
compute_response,
7-
format_model_input,
6+
Message,
7+
call_assistant,
8+
call_user,
89
)
910
from effectful.ops.semantics import fwd, handler
1011
from effectful.ops.syntax import defop
@@ -31,28 +32,27 @@ def wrapper(self, *args, **kwargs):
3132
with handler(
3233
{
3334
Agent.current_agent: lambda: self,
34-
format_model_input: self._format_model_input,
35-
compute_response: self._compute_response,
35+
call_user: self._format_model_input,
36+
call_assistant: self._compute_response,
3637
}
3738
):
3839
return template(self, *args, **kwargs)
3940

4041
setattr(cls, method_name, wrapper)
4142

42-
def _format_model_input(self, template, other, *args, **kwargs):
43+
def _format_model_input(self, template, env):
4344
# update prompt with previous list of messages
4445
prompt = fwd()
4546
if Agent.current_agent() is self:
46-
assert self is other
4747
self.state.extend(prompt)
4848
prompt = self.state
4949
return prompt
5050

5151
def _compute_response(self, *args, **kwargs):
5252
# save response into persisted state
53-
response = fwd()
53+
response: Message = fwd()
5454
if Agent.current_agent() is self:
55-
self.state.append(response.choices[0].message.model_dump())
55+
self.state.append(response)
5656
return response
5757

5858

docs/source/llm.ipynb

Lines changed: 45 additions & 108 deletions
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)