-
Notifications
You must be signed in to change notification settings - Fork 4k
Expand file tree
/
Copy pathrun_config.py
More file actions
193 lines (143 loc) · 6.85 KB
/
run_config.py
File metadata and controls
193 lines (143 loc) · 6.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
from __future__ import annotations
import os
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Callable, Generic
from typing_extensions import NotRequired, TypedDict
from .guardrail import InputGuardrail, OutputGuardrail
from .handoffs import HandoffHistoryMapper, HandoffInputFilter
from .items import TResponseInputItem
from .lifecycle import RunHooks
from .memory import Session, SessionInputCallback, SessionSettings
from .model_settings import ModelSettings
from .models.interface import Model, ModelProvider
from .models.multi_provider import MultiProvider
from .run_context import TContext
from .run_error_handlers import RunErrorHandlers
from .tracing import TracingConfig
from .util._types import MaybeAwaitable
if TYPE_CHECKING:
from .agent import Agent
DEFAULT_MAX_TURNS = 10
def _default_trace_include_sensitive_data() -> bool:
"""Return the default for trace_include_sensitive_data based on environment."""
val = os.getenv("OPENAI_AGENTS_TRACE_INCLUDE_SENSITIVE_DATA", "false")
return val.strip().lower() in ("1", "true", "yes", "on")
@dataclass
class ModelInputData:
"""Container for the data that will be sent to the model."""
input: list[TResponseInputItem]
instructions: str | None
@dataclass
class CallModelData(Generic[TContext]):
"""Data passed to `RunConfig.call_model_input_filter` prior to model call."""
model_data: ModelInputData
agent: Agent[TContext]
context: TContext | None
CallModelInputFilter = Callable[[CallModelData[Any]], MaybeAwaitable[ModelInputData]]
@dataclass
class RunConfig:
"""Configures settings for the entire agent run."""
model: str | Model | None = None
"""The model to use for the entire agent run. If set, will override the model set on every
agent. The model_provider passed in below must be able to resolve this model name.
"""
model_provider: ModelProvider = field(default_factory=MultiProvider)
"""The model provider to use when looking up string model names. Defaults to OpenAI."""
model_settings: ModelSettings | None = None
"""Configure global model settings. Any non-null values will override the agent-specific model
settings.
"""
session_settings: SessionSettings | None = None
"""Configure session settings. Any non-null values will override the session's default
settings. Used to control session behavior like the number of items to retrieve.
"""
handoff_input_filter: HandoffInputFilter | None = None
"""A global input filter to apply to all handoffs. If `Handoff.input_filter` is set, then that
will take precedence. The input filter allows you to edit the inputs that are sent to the new
agent. See the documentation in `Handoff.input_filter` for more details.
"""
nest_handoff_history: bool = True
"""Wrap prior run history in a single assistant message before handing off when no custom
input filter is set. Set to False to preserve the raw transcript behavior from previous
releases.
"""
handoff_history_mapper: HandoffHistoryMapper | None = None
"""Optional function that receives the normalized transcript (history + handoff items) and
returns the input history that should be passed to the next agent. When left as `None`, the
runner collapses the transcript into a single assistant message. This function only runs when
`nest_handoff_history` is True.
"""
input_guardrails: list[InputGuardrail[Any]] | None = None
"""A list of input guardrails to run on the initial run input."""
output_guardrails: list[OutputGuardrail[Any]] | None = None
"""A list of output guardrails to run on the final output of the run."""
tracing_disabled: bool = False
"""Whether tracing is disabled for the agent run. If disabled, we will not trace the agent run.
"""
tracing: TracingConfig | None = None
"""Tracing configuration for this run."""
trace_include_sensitive_data: bool = field(
default_factory=_default_trace_include_sensitive_data
)
"""Whether we include potentially sensitive data (for example: inputs/outputs of tool calls or
LLM generations) in traces. If False, we'll still create spans for these events, but the
sensitive data will not be included.
"""
workflow_name: str = "Agent workflow"
"""The name of the run, used for tracing. Should be a logical name for the run, like
"Code generation workflow" or "Customer support agent".
"""
trace_id: str | None = None
"""A custom trace ID to use for tracing. If not provided, we will generate a new trace ID."""
group_id: str | None = None
"""
A grouping identifier to use for tracing, to link multiple traces from the same conversation
or process. For example, you might use a chat thread ID.
"""
trace_metadata: dict[str, Any] | None = None
"""
An optional dictionary of additional metadata to include with the trace.
"""
session_input_callback: SessionInputCallback | None = None
"""Defines how to handle session history when new input is provided.
- `None` (default): The new input is appended to the session history.
- `SessionInputCallback`: A custom function that receives the history and new input, and
returns the desired combined list of items.
"""
call_model_input_filter: CallModelInputFilter | None = None
"""
Optional callback that is invoked immediately before calling the model. It receives the current
agent, context and the model input (instructions and input items), and must return a possibly
modified `ModelInputData` to use for the model call.
This allows you to edit the input sent to the model e.g. to stay within a token limit.
For example, you can use this to add a system prompt to the input.
"""
class RunOptions(TypedDict, Generic[TContext]):
"""Arguments for ``AgentRunner`` methods."""
context: NotRequired[TContext | None]
"""The context for the run."""
max_turns: NotRequired[int]
"""The maximum number of turns to run for."""
hooks: NotRequired[RunHooks[TContext] | None]
"""Lifecycle hooks for the run."""
run_config: NotRequired[RunConfig | None]
"""Run configuration."""
previous_response_id: NotRequired[str | None]
"""The ID of the previous response, if any."""
auto_previous_response_id: NotRequired[bool]
"""Enable automatic response chaining for the first turn."""
conversation_id: NotRequired[str | None]
"""The ID of the stored conversation, if any."""
session: NotRequired[Session | None]
"""The session for the run."""
error_handlers: NotRequired[RunErrorHandlers[TContext] | None]
"""Error handlers keyed by error kind. Currently supports max_turns."""
__all__ = [
"DEFAULT_MAX_TURNS",
"CallModelData",
"CallModelInputFilter",
"ModelInputData",
"RunConfig",
"RunOptions",
"_default_trace_include_sensitive_data",
]