-
Notifications
You must be signed in to change notification settings - Fork 815
Expand file tree
/
Copy pathbedrock.py
More file actions
1148 lines (968 loc) · 49.6 KB
/
bedrock.py
File metadata and controls
1148 lines (968 loc) · 49.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""AWS Bedrock model provider.
- Docs: https://aws.amazon.com/bedrock/
"""
import asyncio
import json
import logging
import os
import warnings
from collections.abc import AsyncGenerator, Callable, Iterable, ValuesView
from typing import Any, Literal, TypeVar, cast
import boto3
from botocore.config import Config as BotocoreConfig
from botocore.exceptions import ClientError
from pydantic import BaseModel
from typing_extensions import TypedDict, Unpack, override
from strands.types.media import S3Location, SourceLocation
from .._exception_notes import add_exception_note
from ..event_loop import streaming
from ..tools import convert_pydantic_to_tool_spec
from ..tools._tool_helpers import noop_tool
from ..types.content import ContentBlock, Messages, SystemContentBlock
from ..types.exceptions import (
ContextWindowOverflowException,
ModelThrottledException,
)
from ..types.streaming import CitationsDelta, StreamEvent
from ..types.tools import ToolChoice, ToolSpec
from ._validation import validate_config_keys
from .model import CacheConfig, Model
logger = logging.getLogger(__name__)
# See: `BedrockModel._get_default_model_with_warning` for why we need both
DEFAULT_BEDROCK_MODEL_ID = "us.anthropic.claude-sonnet-4-20250514-v1:0"
_DEFAULT_BEDROCK_MODEL_ID = "{}.anthropic.claude-sonnet-4-20250514-v1:0"
DEFAULT_BEDROCK_REGION = "us-west-2"
BEDROCK_CONTEXT_WINDOW_OVERFLOW_MESSAGES = [
"Input is too long for requested model",
"input length and `max_tokens` exceed context limit",
"too many total text bytes",
"prompt is too long",
]
# Models that should include tool result status (include_tool_result_status = True)
_MODELS_INCLUDE_STATUS = [
"anthropic.claude",
]
T = TypeVar("T", bound=BaseModel)
DEFAULT_READ_TIMEOUT = 120
class BedrockModel(Model):
"""AWS Bedrock model provider implementation.
The implementation handles Bedrock-specific features such as:
- Tool configuration for function calling
- Guardrails integration
- Caching points for system prompts and tools
- Streaming responses
- Context window overflow detection
"""
class BedrockConfig(TypedDict, total=False):
"""Configuration options for Bedrock models.
Attributes:
additional_args: Any additional arguments to include in the request
additional_request_fields: Additional fields to include in the Bedrock request
additional_response_field_paths: Additional response field paths to extract
cache_prompt: Cache point type for the system prompt (deprecated, use cache_config)
cache_config: Configuration for prompt caching. Use CacheConfig(strategy="auto") for automatic caching.
cache_tools: Cache point type for tools
guardrail_id: ID of the guardrail to apply
guardrail_trace: Guardrail trace mode. Defaults to enabled.
guardrail_version: Version of the guardrail to apply
guardrail_stream_processing_mode: The guardrail processing mode
guardrail_redact_input: Flag to redact input if a guardrail is triggered. Defaults to True.
guardrail_redact_input_message: If a Bedrock Input guardrail triggers, replace the input with this message.
guardrail_redact_output: Flag to redact output if guardrail is triggered. Defaults to False.
guardrail_redact_output_message: If a Bedrock Output guardrail triggers, replace output with this message.
guardrail_latest_message: Flag to send only the lastest user message to guardrails.
Defaults to False.
max_tokens: Maximum number of tokens to generate in the response
model_id: The Bedrock model ID (e.g., "us.anthropic.claude-sonnet-4-20250514-v1:0")
include_tool_result_status: Flag to include status field in tool results.
True includes status, False removes status, "auto" determines based on model_id. Defaults to "auto".
stop_sequences: List of sequences that will stop generation when encountered
streaming: Flag to enable/disable streaming. Defaults to True.
temperature: Controls randomness in generation (higher = more random)
top_p: Controls diversity via nucleus sampling (alternative to temperature)
"""
additional_args: dict[str, Any] | None
additional_request_fields: dict[str, Any] | None
additional_response_field_paths: list[str] | None
cache_prompt: str | None
cache_config: CacheConfig | None
cache_tools: str | None
guardrail_id: str | None
guardrail_trace: Literal["enabled", "disabled", "enabled_full"] | None
guardrail_stream_processing_mode: Literal["sync", "async"] | None
guardrail_version: str | None
guardrail_redact_input: bool | None
guardrail_redact_input_message: str | None
guardrail_redact_output: bool | None
guardrail_redact_output_message: str | None
guardrail_latest_message: bool | None
max_tokens: int | None
model_id: str
include_tool_result_status: Literal["auto"] | bool | None
stop_sequences: list[str] | None
streaming: bool | None
temperature: float | None
top_p: float | None
def __init__(
self,
*,
boto_session: boto3.Session | None = None,
boto_client_config: BotocoreConfig | None = None,
region_name: str | None = None,
endpoint_url: str | None = None,
**model_config: Unpack[BedrockConfig],
):
"""Initialize provider instance.
Args:
boto_session: Boto Session to use when calling the Bedrock Model.
boto_client_config: Configuration to use when creating the Bedrock-Runtime Boto Client.
region_name: AWS region to use for the Bedrock service.
Defaults to the AWS_REGION environment variable if set, or "us-west-2" if not set.
endpoint_url: Custom endpoint URL for VPC endpoints (PrivateLink)
**model_config: Configuration options for the Bedrock model.
"""
if region_name and boto_session:
raise ValueError("Cannot specify both `region_name` and `boto_session`.")
session = boto_session or boto3.Session()
resolved_region = region_name or session.region_name or os.environ.get("AWS_REGION") or DEFAULT_BEDROCK_REGION
self.config = BedrockModel.BedrockConfig(
model_id=BedrockModel._get_default_model_with_warning(resolved_region, model_config),
include_tool_result_status="auto",
)
self.update_config(**model_config)
logger.debug("config=<%s> | initializing", self.config)
# Add strands-agents to the request user agent
if boto_client_config:
existing_user_agent = getattr(boto_client_config, "user_agent_extra", None)
# Append 'strands-agents' to existing user_agent_extra or set it if not present
if existing_user_agent:
new_user_agent = f"{existing_user_agent} strands-agents"
else:
new_user_agent = "strands-agents"
client_config = boto_client_config.merge(BotocoreConfig(user_agent_extra=new_user_agent))
else:
client_config = BotocoreConfig(user_agent_extra="strands-agents", read_timeout=DEFAULT_READ_TIMEOUT)
self.client = session.client(
service_name="bedrock-runtime",
config=client_config,
endpoint_url=endpoint_url,
region_name=resolved_region,
)
logger.debug("region=<%s> | bedrock client created", self.client.meta.region_name)
@property
def _cache_strategy(self) -> str | None:
"""The cache strategy for this model based on its model ID.
Returns the appropriate cache strategy name, or None if automatic caching is not supported for this model.
"""
model_id = self.config.get("model_id", "").lower()
if "claude" in model_id or "anthropic" in model_id:
return "anthropic"
# Application / cross-region inference profile ARNs don't contain the
# foundation model name, so the substring check above misses them.
# When the user explicitly opted into caching via cache_config, we
# optimistically enable caching for inference-profile ARNs. Currently
# only Anthropic Claude models support prompt caching on Bedrock; for
# non-caching models, the cache point is silently ignored by the API.
if model_id.startswith("arn:") and "inference-profile" in model_id:
return "anthropic"
return None
@override
def update_config(self, **model_config: Unpack[BedrockConfig]) -> None: # type: ignore
"""Update the Bedrock Model configuration with the provided arguments.
Args:
**model_config: Configuration overrides.
"""
validate_config_keys(model_config, self.BedrockConfig)
self.config.update(model_config)
@override
def get_config(self) -> BedrockConfig:
"""Get the current Bedrock Model configuration.
Returns:
The Bedrock model configuration.
"""
return self.config
def _format_request(
self,
messages: Messages,
tool_specs: list[ToolSpec] | None = None,
system_prompt_content: list[SystemContentBlock] | None = None,
tool_choice: ToolChoice | None = None,
) -> dict[str, Any]:
"""Format a Bedrock converse stream request.
Args:
messages: List of message objects to be processed by the model.
tool_specs: List of tool specifications to make available to the model.
tool_choice: Selection strategy for tool invocation.
system_prompt_content: System prompt content blocks to provide context to the model.
Returns:
A Bedrock converse stream request.
"""
if not tool_specs:
has_tool_content = any(
any("toolUse" in block or "toolResult" in block for block in msg.get("content", [])) for msg in messages
)
if has_tool_content:
tool_specs = [noop_tool.tool_spec]
# Use system_prompt_content directly (copy for mutability)
system_blocks: list[SystemContentBlock] = system_prompt_content.copy() if system_prompt_content else []
# Add cache point if configured (backwards compatibility)
if cache_prompt := self.config.get("cache_prompt"):
warnings.warn(
"cache_prompt is deprecated. Use SystemContentBlock with cachePoint instead.", UserWarning, stacklevel=3
)
system_blocks.append({"cachePoint": {"type": cache_prompt}})
return {
"modelId": self.config["model_id"],
"messages": self._format_bedrock_messages(messages),
"system": system_blocks,
**(
{
"toolConfig": {
"tools": [
*[
{
"toolSpec": {
"name": tool_spec["name"],
"description": tool_spec["description"],
"inputSchema": tool_spec["inputSchema"],
}
}
for tool_spec in tool_specs
],
*(
[{"cachePoint": {"type": self.config["cache_tools"]}}]
if self.config.get("cache_tools")
else []
),
],
**({"toolChoice": tool_choice if tool_choice else {"auto": {}}}),
}
}
if tool_specs
else {}
),
**(self._get_additional_request_fields(tool_choice)),
**(
{"additionalModelResponseFieldPaths": self.config["additional_response_field_paths"]}
if self.config.get("additional_response_field_paths")
else {}
),
**(
{
"guardrailConfig": {
"guardrailIdentifier": self.config["guardrail_id"],
"guardrailVersion": self.config["guardrail_version"],
"trace": self.config.get("guardrail_trace", "enabled"),
**(
{"streamProcessingMode": self.config.get("guardrail_stream_processing_mode")}
if self.config.get("guardrail_stream_processing_mode")
else {}
),
}
}
if self.config.get("guardrail_id") and self.config.get("guardrail_version")
else {}
),
"inferenceConfig": {
key: value
for key, value in [
("maxTokens", self.config.get("max_tokens")),
("temperature", self.config.get("temperature")),
("topP", self.config.get("top_p")),
("stopSequences", self.config.get("stop_sequences")),
]
if value is not None
},
**(
self.config["additional_args"]
if "additional_args" in self.config and self.config["additional_args"] is not None
else {}
),
}
def _get_additional_request_fields(self, tool_choice: ToolChoice | None) -> dict[str, Any]:
"""Get additional request fields, removing thinking if tool_choice forces tool use.
Bedrock's API does not allow thinking mode when tool_choice forces tool use.
When forcing a tool (e.g., for structured_output retry), we temporarily disable thinking.
Args:
tool_choice: The tool choice configuration.
Returns:
A dict containing additionalModelRequestFields if configured, or empty dict.
"""
additional_fields = self.config.get("additional_request_fields")
if not additional_fields:
return {}
# Check if tool_choice is forcing tool use ("any" or specific "tool")
is_forcing_tool = tool_choice is not None and ("any" in tool_choice or "tool" in tool_choice)
if is_forcing_tool and "thinking" in additional_fields:
# Create a copy without the thinking key
fields_without_thinking = {k: v for k, v in additional_fields.items() if k != "thinking"}
if fields_without_thinking:
return {"additionalModelRequestFields": fields_without_thinking}
return {}
return {"additionalModelRequestFields": additional_fields}
def _inject_cache_point(self, messages: list[dict[str, Any]]) -> None:
"""Inject a cache point at the end of the last user message.
Args:
messages: List of messages to inject cache point into (modified in place).
"""
if not messages:
return
last_user_idx: int | None = None
for msg_idx, msg in enumerate(messages):
content = msg.get("content", [])
for block_idx, block in reversed(list(enumerate(content))):
if "cachePoint" in block:
del content[block_idx]
logger.warning(
"msg_idx=<%s>, block_idx=<%s> | stripped existing cache point (auto mode manages cache points)",
msg_idx,
block_idx,
)
if msg.get("role") == "user":
last_user_idx = msg_idx
if last_user_idx is not None and messages[last_user_idx].get("content"):
messages[last_user_idx]["content"].append({"cachePoint": {"type": "default"}})
logger.debug("msg_idx=<%s> | added cache point to last user message", last_user_idx)
def _find_last_user_text_message_index(self, messages: Messages) -> int | None:
"""Find the index of the last user message containing text or image content.
This is used for guardrail_latest_message to ensure that guardContent wrapping
targets the correct message even when toolResult messages follow.
Args:
messages: List of messages to search
Returns:
Index of the last user message with text/image content, or None if not found
"""
for idx, msg in reversed(list(enumerate(messages))):
if msg["role"] == "user" and any("text" in cb or "image" in cb for cb in msg.get("content", [])):
return idx
return None
def _format_bedrock_messages(self, messages: Messages) -> list[dict[str, Any]]:
"""Format messages for Bedrock API compatibility.
This function ensures messages conform to Bedrock's expected format by:
- Filtering out SDK_UNKNOWN_MEMBER content blocks
- Eagerly filtering content blocks to only include Bedrock-supported fields
- Ensuring all message content blocks are properly formatted for the Bedrock API
- Optionally wrapping the last user message in guardrailConverseContent blocks
- Injecting cache points when cache_config is set with strategy="auto"
Args:
messages: List of messages to format
Returns:
Messages formatted for Bedrock API compatibility
Note:
Unlike other APIs that ignore unknown fields, Bedrock only accepts a strict
subset of fields for each content block type and throws validation exceptions
when presented with unexpected fields. Therefore, we must eagerly filter all
content blocks to remove any additional fields before sending to Bedrock.
https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ContentBlock.html
"""
cleaned_messages: list[dict[str, Any]] = []
filtered_unknown_members = False
dropped_deepseek_reasoning_content = False
# Pre-compute the index of the last user message containing text or image content.
# This ensures guardContent wrapping is maintained across tool execution cycles, where
# the final message in the list is a toolResult (role=user) rather than text/image content.
last_user_text_idx = None
if self.config.get("guardrail_latest_message", False):
last_user_text_idx = self._find_last_user_text_message_index(messages)
for idx, message in enumerate(messages):
cleaned_content: list[dict[str, Any]] = []
for content_block in message["content"]:
# Filter out SDK_UNKNOWN_MEMBER content blocks
if "SDK_UNKNOWN_MEMBER" in content_block:
filtered_unknown_members = True
continue
# DeepSeek models have issues with reasoningContent
# TODO: Replace with systematic model configuration registry (https://github.com/strands-agents/sdk-python/issues/780)
if "deepseek" in self.config["model_id"].lower() and "reasoningContent" in content_block:
dropped_deepseek_reasoning_content = True
continue
# Format content blocks for Bedrock API compatibility
formatted_content = self._format_request_message_content(content_block)
if formatted_content is None:
continue
# Wrap text or image content in guardContent if this is the last user text/image message
if idx == last_user_text_idx and ("text" in formatted_content or "image" in formatted_content):
if "text" in formatted_content:
formatted_content = {"guardContent": {"text": {"text": formatted_content["text"]}}}
elif "image" in formatted_content:
formatted_content = {"guardContent": {"image": formatted_content["image"]}}
cleaned_content.append(formatted_content)
# Create new message with cleaned content (skip if empty)
if cleaned_content:
cleaned_messages.append({"content": cleaned_content, "role": message["role"]})
if filtered_unknown_members:
logger.warning(
"Filtered out SDK_UNKNOWN_MEMBER content blocks from messages, consider upgrading boto3 version"
)
if dropped_deepseek_reasoning_content:
logger.debug(
"Filtered DeepSeek reasoningContent content blocks from messages - https://api-docs.deepseek.com/guides/reasoning_model#multi-round-conversation"
)
# Inject cache point into cleaned_messages (not original messages) if cache_config is set
cache_config = self.config.get("cache_config")
if cache_config:
strategy: str | None = cache_config.strategy
if strategy == "auto":
strategy = self._cache_strategy
if not strategy:
logger.warning(
"model_id=<%s> | cache_config is enabled but this model does not support automatic caching",
self.config.get("model_id"),
)
if strategy == "anthropic":
self._inject_cache_point(cleaned_messages)
return cleaned_messages
def _should_include_tool_result_status(self) -> bool:
"""Determine whether to include tool result status based on current config."""
include_status = self.config.get("include_tool_result_status", "auto")
if include_status is True:
return True
elif include_status is False:
return False
else: # "auto"
return any(model in self.config["model_id"] for model in _MODELS_INCLUDE_STATUS)
def _handle_location(self, location: SourceLocation) -> dict[str, Any] | None:
"""Convert location content block to Bedrock format if its an S3Location."""
if location["type"] == "s3":
s3_location = cast(S3Location, location)
formatted_document_s3: dict[str, Any] = {"uri": s3_location["uri"]}
if "bucketOwner" in s3_location:
formatted_document_s3["bucketOwner"] = s3_location["bucketOwner"]
return {"s3Location": formatted_document_s3}
else:
logger.warning("Non s3 location sources are not supported by Bedrock | skipping content block")
return None
def _format_request_message_content(self, content: ContentBlock) -> dict[str, Any] | None:
"""Format a Bedrock content block.
Bedrock strictly validates content blocks and throws exceptions for unknown fields.
This function extracts only the fields that Bedrock supports for each content type.
Args:
content: Content block to format.
Returns:
Bedrock formatted content block.
Raises:
TypeError: If the content block type is not supported by Bedrock.
"""
# https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_CachePointBlock.html
if "cachePoint" in content:
return {"cachePoint": {"type": content["cachePoint"]["type"]}}
# https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_DocumentBlock.html
if "document" in content:
document = content["document"]
result: dict[str, Any] = {}
# Handle required fields (all optional due to total=False)
if "name" in document:
result["name"] = document["name"]
if "format" in document:
result["format"] = document["format"]
# Handle source - supports bytes or location
if "source" in document:
source = document["source"]
formatted_document_source: dict[str, Any] | None
if "location" in source:
formatted_document_source = self._handle_location(source["location"])
if formatted_document_source is None:
return None
elif "bytes" in source:
formatted_document_source = {"bytes": source["bytes"]}
result["source"] = formatted_document_source
# Handle optional fields
if "citations" in document and document["citations"] is not None:
result["citations"] = {"enabled": document["citations"]["enabled"]}
if "context" in document:
result["context"] = document["context"]
return {"document": result}
# https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_GuardrailConverseContentBlock.html
if "guardContent" in content:
guard = content["guardContent"]
guard_text = guard["text"]
result = {"text": {"text": guard_text["text"], "qualifiers": guard_text["qualifiers"]}}
return {"guardContent": result}
# https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ImageBlock.html
if "image" in content:
image = content["image"]
source = image["source"]
formatted_image_source: dict[str, Any] | None
if "location" in source:
formatted_image_source = self._handle_location(source["location"])
if formatted_image_source is None:
return None
elif "bytes" in source:
formatted_image_source = {"bytes": source["bytes"]}
result = {"format": image["format"], "source": formatted_image_source}
return {"image": result}
# https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ReasoningContentBlock.html
if "reasoningContent" in content:
reasoning = content["reasoningContent"]
result = {}
if "reasoningText" in reasoning:
reasoning_text = reasoning["reasoningText"]
result["reasoningText"] = {}
if "text" in reasoning_text:
result["reasoningText"]["text"] = reasoning_text["text"]
# Only include signature if truthy (avoid empty strings)
if reasoning_text.get("signature"):
result["reasoningText"]["signature"] = reasoning_text["signature"]
if "redactedContent" in reasoning:
result["redactedContent"] = reasoning["redactedContent"]
return {"reasoningContent": result}
# Pass through text and other simple content types
if "text" in content:
return {"text": content["text"]}
# https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ToolResultBlock.html
if "toolResult" in content:
tool_result = content["toolResult"]
formatted_content: list[dict[str, Any]] = []
for tool_result_content in tool_result["content"]:
if "json" in tool_result_content:
# Handle json field since not in ContentBlock but valid in ToolResultContent
formatted_content.append({"json": tool_result_content["json"]})
else:
formatted_message_content = self._format_request_message_content(
cast(ContentBlock, tool_result_content)
)
if formatted_message_content is None:
continue
formatted_content.append(formatted_message_content)
result = {
"content": formatted_content,
"toolUseId": tool_result["toolUseId"],
}
if "status" in tool_result and self._should_include_tool_result_status():
result["status"] = tool_result["status"]
return {"toolResult": result}
# https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_ToolUseBlock.html
if "toolUse" in content:
tool_use = content["toolUse"]
return {
"toolUse": {
"input": tool_use["input"],
"name": tool_use["name"],
"toolUseId": tool_use["toolUseId"],
}
}
# https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_VideoBlock.html
if "video" in content:
video = content["video"]
source = video["source"]
formatted_video_source: dict[str, Any] | None
if "location" in source:
formatted_video_source = self._handle_location(source["location"])
if formatted_video_source is None:
return None
elif "bytes" in source:
formatted_video_source = {"bytes": source["bytes"]}
result = {"format": video["format"], "source": formatted_video_source}
return {"video": result}
# https://docs.aws.amazon.com/bedrock/latest/APIReference/API_runtime_CitationsContentBlock.html
if "citationsContent" in content:
citations = content["citationsContent"]
result = {}
if "citations" in citations:
result["citations"] = []
for citation in citations["citations"]:
filtered_citation: dict[str, Any] = {}
if "location" in citation:
filtered_citation["location"] = citation["location"]
if "sourceContent" in citation:
filtered_source_content: list[dict[str, Any]] = []
for source_content in citation["sourceContent"]:
if "text" in source_content:
filtered_source_content.append({"text": source_content["text"]})
if filtered_source_content:
filtered_citation["sourceContent"] = filtered_source_content
if "title" in citation:
filtered_citation["title"] = citation["title"]
result["citations"].append(filtered_citation)
if "content" in citations:
filtered_content: list[dict[str, Any]] = []
for generated_content in citations["content"]:
if "text" in generated_content:
filtered_content.append({"text": generated_content["text"]})
if filtered_content:
result["content"] = filtered_content
return {"citationsContent": result}
raise TypeError(f"content_type=<{next(iter(content))}> | unsupported type")
def _has_blocked_guardrail(self, guardrail_data: dict[str, Any]) -> bool:
"""Check if guardrail data contains any blocked policies.
Args:
guardrail_data: Guardrail data from trace information.
Returns:
True if any blocked guardrail is detected, False otherwise.
"""
input_assessment = guardrail_data.get("inputAssessment", {})
output_assessments = guardrail_data.get("outputAssessments", {})
# Check input assessments
if any(self._find_detected_and_blocked_policy(assessment) for assessment in input_assessment.values()):
return True
# Check output assessments
if any(self._find_detected_and_blocked_policy(assessment) for assessment in output_assessments.values()):
return True
return False
def _generate_redaction_events(self) -> list[StreamEvent]:
"""Generate redaction events based on configuration.
Returns:
List of redaction events to yield.
"""
events: list[StreamEvent] = []
if self.config.get("guardrail_redact_input", True):
logger.debug("Redacting user input due to guardrail.")
events.append(
{
"redactContent": {
"redactUserContentMessage": self.config.get(
"guardrail_redact_input_message", "[User input redacted.]"
)
}
}
)
if self.config.get("guardrail_redact_output", False):
logger.debug("Redacting assistant output due to guardrail.")
events.append(
{
"redactContent": {
"redactAssistantContentMessage": self.config.get(
"guardrail_redact_output_message",
"[Assistant output redacted.]",
)
}
}
)
return events
@override
async def stream(
self,
messages: Messages,
tool_specs: list[ToolSpec] | None = None,
system_prompt: str | None = None,
*,
tool_choice: ToolChoice | None = None,
system_prompt_content: list[SystemContentBlock] | None = None,
**kwargs: Any,
) -> AsyncGenerator[StreamEvent, None]:
"""Stream conversation with the Bedrock model.
This method calls either the Bedrock converse_stream API or the converse API
based on the streaming parameter in the configuration.
Args:
messages: List of message objects to be processed by the model.
tool_specs: List of tool specifications to make available to the model.
system_prompt: System prompt to provide context to the model.
tool_choice: Selection strategy for tool invocation.
system_prompt_content: System prompt content blocks to provide context to the model.
**kwargs: Additional keyword arguments for future extensibility.
Yields:
Model events.
Raises:
ContextWindowOverflowException: If the input exceeds the model's context window.
ModelThrottledException: If the model service is throttling requests.
"""
def callback(event: StreamEvent | None = None) -> None:
loop.call_soon_threadsafe(queue.put_nowait, event)
if event is None:
return
loop = asyncio.get_event_loop()
queue: asyncio.Queue[StreamEvent | None] = asyncio.Queue()
# Handle backward compatibility: if system_prompt is provided but system_prompt_content is None
if system_prompt and system_prompt_content is None:
system_prompt_content = [{"text": system_prompt}]
thread = asyncio.to_thread(self._stream, callback, messages, tool_specs, system_prompt_content, tool_choice)
task = asyncio.create_task(thread)
while True:
event = await queue.get()
if event is None:
break
yield event
await task
def _stream(
self,
callback: Callable[..., None],
messages: Messages,
tool_specs: list[ToolSpec] | None = None,
system_prompt_content: list[SystemContentBlock] | None = None,
tool_choice: ToolChoice | None = None,
) -> None:
"""Stream conversation with the Bedrock model.
This method operates in a separate thread to avoid blocking the async event loop with the call to
Bedrock's converse_stream.
Args:
callback: Function to send events to the main thread.
messages: List of message objects to be processed by the model.
tool_specs: List of tool specifications to make available to the model.
system_prompt_content: System prompt content blocks to provide context to the model.
tool_choice: Selection strategy for tool invocation.
Raises:
ContextWindowOverflowException: If the input exceeds the model's context window.
ModelThrottledException: If the model service is throttling requests.
"""
try:
logger.debug("formatting request")
request = self._format_request(messages, tool_specs, system_prompt_content, tool_choice)
logger.debug("request=<%s>", request)
logger.debug("invoking model")
streaming = self.config.get("streaming", True)
logger.debug("got response from model")
if streaming:
response = self.client.converse_stream(**request)
# Track tool use events to fix stopReason for streaming responses
has_tool_use = False
for chunk in response["stream"]:
if (
"metadata" in chunk
and "trace" in chunk["metadata"]
and "guardrail" in chunk["metadata"]["trace"]
):
guardrail_data = chunk["metadata"]["trace"]["guardrail"]
if self._has_blocked_guardrail(guardrail_data):
for event in self._generate_redaction_events():
callback(event)
# Track if we see tool use events
if "contentBlockStart" in chunk and chunk["contentBlockStart"].get("start", {}).get("toolUse"):
has_tool_use = True
# Fix stopReason for streaming responses that contain tool use
if (
has_tool_use
and "messageStop" in chunk
and (message_stop := chunk["messageStop"]).get("stopReason") == "end_turn"
):
# Create corrected chunk with tool_use stopReason
modified_chunk = chunk.copy()
modified_chunk["messageStop"] = message_stop.copy()
modified_chunk["messageStop"]["stopReason"] = "tool_use"
logger.warning("Override stop reason from end_turn to tool_use")
callback(modified_chunk)
else:
callback(chunk)
else:
response = self.client.converse(**request)
for event in self._convert_non_streaming_to_streaming(response):
callback(event)
if (
"trace" in response
and "guardrail" in response["trace"]
and self._has_blocked_guardrail(response["trace"]["guardrail"])
):
for event in self._generate_redaction_events():
callback(event)
except ClientError as e:
error_message = str(e)
if (
e.response["Error"]["Code"] == "ThrottlingException"
or e.response["Error"]["Code"] == "throttlingException"
):
raise ModelThrottledException(error_message) from e
if any(overflow_message in error_message for overflow_message in BEDROCK_CONTEXT_WINDOW_OVERFLOW_MESSAGES):
logger.warning("bedrock threw context window overflow error")
raise ContextWindowOverflowException(e) from e
region = self.client.meta.region_name
# Aid in debugging by adding more information
add_exception_note(e, f"└ Bedrock region: {region}")
add_exception_note(e, f"└ Model id: {self.config.get('model_id')}")
if (
e.response["Error"]["Code"] == "AccessDeniedException"
and "You don't have access to the model" in error_message
):
add_exception_note(
e,
"└ For more information see "
"https://strandsagents.com/latest/user-guide/concepts/model-providers/amazon-bedrock/#model-access-issue",
)
if (
e.response["Error"]["Code"] == "ValidationException"
and "with on-demand throughput isn’t supported" in error_message
):
add_exception_note(
e,
"└ For more information see "
"https://strandsagents.com/latest/user-guide/concepts/model-providers/amazon-bedrock/#on-demand-throughput-isnt-supported",
)
raise e
finally:
callback()
logger.debug("finished streaming response from model")
def _convert_non_streaming_to_streaming(self, response: dict[str, Any]) -> Iterable[StreamEvent]:
"""Convert a non-streaming response to the streaming format.
Args:
response: The non-streaming response from the Bedrock model.
Returns:
An iterable of response events in the streaming format.
"""
# Yield messageStart event
yield {"messageStart": {"role": response["output"]["message"]["role"]}}
# Process content blocks
for content in cast(list[ContentBlock], response["output"]["message"]["content"]):
# Yield contentBlockStart event if needed
if "toolUse" in content:
yield {
"contentBlockStart": {
"start": {
"toolUse": {
"toolUseId": content["toolUse"]["toolUseId"],
"name": content["toolUse"]["name"],
}
},
}
}
# For tool use, we need to yield the input as a delta
input_value = json.dumps(content["toolUse"]["input"])
yield {"contentBlockDelta": {"delta": {"toolUse": {"input": input_value}}}}
elif "text" in content:
# Then yield the text as a delta
yield {
"contentBlockDelta": {
"delta": {"text": content["text"]},
}
}
elif "reasoningContent" in content:
# Then yield the reasoning content as a delta
yield {
"contentBlockDelta": {
"delta": {"reasoningContent": {"text": content["reasoningContent"]["reasoningText"]["text"]}}
}
}
if "signature" in content["reasoningContent"]["reasoningText"]:
yield {
"contentBlockDelta": {
"delta": {
"reasoningContent": {
"signature": content["reasoningContent"]["reasoningText"]["signature"]
}
}
}
}
elif "citationsContent" in content:
# For non-streaming citations, emit text and metadata deltas in sequence
# to match streaming behavior where they flow naturally
if "content" in content["citationsContent"]:
text_content = "".join([content["text"] for content in content["citationsContent"]["content"]])
yield {
"contentBlockDelta": {"delta": {"text": text_content}},
}
for citation in content["citationsContent"]["citations"]:
# Then emit citation metadata (for structure)
citation_metadata: CitationsDelta = {
"title": citation["title"],
"location": citation["location"],
"sourceContent": citation["sourceContent"],
}
yield {"contentBlockDelta": {"delta": {"citation": citation_metadata}}}