diff --git a/tests/unit/vertexai/genai/test_agent_engines.py b/tests/unit/vertexai/genai/test_agent_engines.py index f1b31590b7..a3d9e8fca8 100644 --- a/tests/unit/vertexai/genai/test_agent_engines.py +++ b/tests/unit/vertexai/genai/test_agent_engines.py @@ -2866,6 +2866,151 @@ def test_query_agent_engine(self): None, ) + @mock.patch("google.cloud.storage.Client") + @mock.patch.object(agent_engines.AgentEngines, "_get") + @mock.patch("uuid.uuid4") + def test_run_query_job_agent_engine(self, mock_uuid, get_mock, mock_storage_client): + with mock.patch.object( + self.client.agent_engines._api_client, "request" + ) as request_mock: + request_mock.return_value = genai_types.HttpResponse( + body='{"name": "projects/123/locations/us-central1/reasoningEngines/456/operations/789"}' + ) + + # Mock the GCS bucket and blob so we don't actually try to use GCS + mock_bucket = mock.Mock() + mock_bucket.exists.return_value = False + mock_blob = mock.Mock() + mock_blob.exists.return_value = False + mock_bucket.blob.return_value = mock_blob + mock_storage_client.return_value.bucket.return_value = mock_bucket + + # mock uuid + mock_uuid.return_value.hex = "b92b9b89-4585-4146-8ee5-22fe99802a8e" + + # Mock _get to return a dummy resource + get_mock.return_value = _genai_types.ReasoningEngine( + name=_TEST_AGENT_ENGINE_RESOURCE_NAME, + spec=_genai_types.ReasoningEngineSpec( + deployment_spec=_genai_types.ReasoningEngineSpecDeploymentSpec( + env=[_genai_types.EnvVar(name="input_gcs_uri", value="")] + ) + ), + ) + + result = self.client.agent_engines.run_query_job( + name=_TEST_AGENT_ENGINE_RESOURCE_NAME, + config={ + "query": _TEST_QUERY_PROMPT, + "gcs_bucket": "gs://my-input-bucket/", + }, + ) + + # Verify bucket creation + assert mock_bucket.create.call_count == 1 + # Verify file upload + mock_blob.upload_from_string.assert_called_once_with(_TEST_QUERY_PROMPT) + + assert result == _genai_types.RunQueryJobResult( + job_name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + input_gcs_uri="gs://my-input-bucket/input_b92b9b89-4585-4146-8ee5-22fe99802a8e.json", + output_gcs_uri="gs://my-input-bucket/output_b92b9b89-4585-4146-8ee5-22fe99802a8e.json", + ) + + request_mock.assert_called_with( + "post", + f"{_TEST_AGENT_ENGINE_RESOURCE_NAME}:asyncQuery", + { + "_url": {"name": _TEST_AGENT_ENGINE_RESOURCE_NAME}, + "inputGcsUri": "gs://my-input-bucket/input_b92b9b89-4585-4146-8ee5-22fe99802a8e.json", + "outputGcsUri": "gs://my-input-bucket/output_b92b9b89-4585-4146-8ee5-22fe99802a8e.json", + }, + None, + ) + + def test_run_query_job_agent_engine_missing_query(self): + with pytest.raises( + ValueError, match="`query` is required in the config object." + ): + self.client.agent_engines.run_query_job( + name=_TEST_AGENT_ENGINE_RESOURCE_NAME, + config={"gcs_bucket": "gs://my-input-bucket/"}, + ) + + def test_run_query_job_agent_engine_missing_bucket(self): + with pytest.raises( + ValueError, match="`gcs_bucket` is required in the config object." + ): + self.client.agent_engines.run_query_job( + name=_TEST_AGENT_ENGINE_RESOURCE_NAME, + config={"query": _TEST_QUERY_PROMPT}, + ) + + @mock.patch.object(agent_engines.AgentEngines, "_get") + def test_run_query_job_agent_engine_missing_cloud_run_job(self, get_mock): + get_mock.return_value = _genai_types.ReasoningEngine( + name=_TEST_AGENT_ENGINE_RESOURCE_NAME, + spec=_genai_types.ReasoningEngineSpec( + deployment_spec=_genai_types.ReasoningEngineSpecDeploymentSpec(env=[]) + ), + ) + with pytest.raises( + ValueError, + match="Your ReasoningEngine does not support long running queries, please update your ReasoningEngine and try again.", + ): + self.client.agent_engines.run_query_job( + name=_TEST_AGENT_ENGINE_RESOURCE_NAME, + config={ + "query": _TEST_QUERY_PROMPT, + "gcs_bucket": "gs://my-input-bucket/", + }, + ) + + @mock.patch("google.cloud.storage.Client") + @mock.patch.object(agent_engines.AgentEngines, "_get") + @mock.patch("uuid.uuid4") + def test_run_query_job_agent_engine_bucket_creation_forbidden( + self, mock_uuid, get_mock, mock_storage_client + ): + with mock.patch.object( + self.client.agent_engines._api_client, "request" + ) as request_mock: + request_mock.return_value = genai_types.HttpResponse( + body='{"name": "projects/123/locations/us-central1/reasoningEngines/456/operations/789"}' + ) + + from google.api_core import exceptions as api_core_exceptions + + mock_bucket = mock.Mock() + mock_bucket.exists.side_effect = api_core_exceptions.Forbidden( + "403 GET Bucket" + ) + mock_blob = mock.Mock() + mock_bucket.blob.return_value = mock_blob + mock_storage_client.return_value.bucket.return_value = mock_bucket + + mock_uuid.return_value.hex = "b92b9b89-4585-4146-8ee5-22fe99802a8e" + + get_mock.return_value = _genai_types.ReasoningEngine( + name=_TEST_AGENT_ENGINE_RESOURCE_NAME, + spec=_genai_types.ReasoningEngineSpec( + deployment_spec=_genai_types.ReasoningEngineSpecDeploymentSpec( + env=[_genai_types.EnvVar(name="input_gcs_uri", value="")] + ) + ), + ) + + with pytest.raises( + ValueError, match="Permission denied to check existence of bucket" + ): + self.client.agent_engines.run_query_job( + name=_TEST_AGENT_ENGINE_RESOURCE_NAME, + config={ + "query": _TEST_QUERY_PROMPT, + "gcs_bucket": "gs://my-input-bucket/", + }, + ) + def test_query_agent_engine_async(self): agent = self.client.agent_engines._register_api_methods( agent_engine=_genai_types.AgentEngine( @@ -2898,6 +3043,120 @@ def test_query_agent_engine_async(self): None, ) + def test_check_query_job_agent_engine(self): + with mock.patch.object( + self.client.agent_engines._api_client, "request" + ) as request_mock: + request_mock.return_value = genai_types.HttpResponse( + body='{"done": true, "metadata": {"output_gcs_uri": "gs://my-output-bucket/output.json", "input_gcs_uri": "gs://my-input-bucket/input.json"}}' + ) + with mock.patch("google.cloud.storage.Client") as mock_storage_client: + mock_bucket = mock.Mock() + mock_blob = mock.Mock() + mock_blob.exists.return_value = True + mock_blob.download_as_string.return_value = b'{"success": true}' + mock_bucket.blob.return_value = mock_blob + mock_storage_client.return_value.bucket.return_value = mock_bucket + + result = self.client.agent_engines.check_query_job( + name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + config={"retrieve_result": True}, + ) + + assert result == _genai_types.CheckQueryJobResult( + operation_name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + status="SUCCESS", + input_gcs_uri="gs://my-input-bucket/input.json", + output_gcs_uri="gs://my-output-bucket/output.json", + result='{"success": true}', + ) + + def test_check_query_job_agent_engine_running(self): + with mock.patch.object( + self.client.agent_engines._api_client, "request" + ) as request_mock: + request_mock.return_value = genai_types.HttpResponse( + body='{"done": false, "metadata": {"output_gcs_uri": "gs://my-output-bucket/output.json", "input_gcs_uri": "gs://my-input-bucket/input.json"}}' + ) + + result = self.client.agent_engines.check_query_job( + name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + config={"retrieve_result": True}, + ) + + assert result == _genai_types.CheckQueryJobResult( + operation_name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + status="RUNNING", + input_gcs_uri="gs://my-input-bucket/input.json", + output_gcs_uri="gs://my-output-bucket/output.json", + result=None, + ) + + def test_check_query_job_agent_engine_failed(self): + with mock.patch.object( + self.client.agent_engines._api_client, "request" + ) as request_mock: + request_mock.return_value = genai_types.HttpResponse( + body='{"done": true, "error": {"message": "Job failed with errors."}}' + ) + + result = self.client.agent_engines.check_query_job( + name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + config={"retrieve_result": True}, + ) + + assert result == _genai_types.CheckQueryJobResult( + operation_name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + status="FAILED", + input_gcs_uri=None, + output_gcs_uri=None, + result="{'message': 'Job failed with errors.'}", + ) + + def test_check_query_job_agent_engine_no_retrieve(self): + with mock.patch.object( + self.client.agent_engines._api_client, "request" + ) as request_mock: + request_mock.return_value = genai_types.HttpResponse( + body='{"done": true, "metadata": {"output_gcs_uri": "gs://my-output-bucket/output.json", "input_gcs_uri": "gs://my-input-bucket/input.json"}}' + ) + + result = self.client.agent_engines.check_query_job( + name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + config={"retrieve_result": False}, + ) + + assert result == _genai_types.CheckQueryJobResult( + operation_name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + status="SUCCESS", + input_gcs_uri="gs://my-input-bucket/input.json", + output_gcs_uri="gs://my-output-bucket/output.json", + result=None, + ) + + def test_check_query_job_agent_engine_blob_not_exists(self): + with mock.patch.object( + self.client.agent_engines._api_client, "request" + ) as request_mock: + request_mock.return_value = genai_types.HttpResponse( + body='{"done": true, "metadata": {"output_gcs_uri": "gs://my-output-bucket/output.json", "input_gcs_uri": "gs://my-input-bucket/input.json"}}' + ) + with mock.patch("google.cloud.storage.Client") as mock_storage_client: + mock_bucket = mock.Mock() + mock_blob = mock.Mock() + mock_blob.exists.return_value = False + mock_bucket.blob.return_value = mock_blob + mock_storage_client.return_value.bucket.return_value = mock_bucket + + with pytest.raises( + ValueError, + match="Failed to retrieve blob results for gs://my-output-bucket/output.json", + ): + self.client.agent_engines.check_query_job( + name="projects/123/locations/us-central1/reasoningEngines/456/operations/789", + config={"retrieve_result": True}, + ) + def test_query_agent_engine_stream(self): with mock.patch.object( self.client.agent_engines._api_client, "request_streamed" diff --git a/vertexai/_genai/agent_engines.py b/vertexai/_genai/agent_engines.py index 819ad034d9..4e2a2810cc 100644 --- a/vertexai/_genai/agent_engines.py +++ b/vertexai/_genai/agent_engines.py @@ -49,6 +49,62 @@ logger.setLevel(logging.INFO) +def _CheckQueryJobAgentEngineConfig_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + + if getv(from_object, ["retrieve_result"]) is not None: + setv(parent_object, ["retrieveResult"], getv(from_object, ["retrieve_result"])) + + return to_object + + +def _CheckQueryJobAgentEngineRequestParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv( + to_object, + ["config"], + _CheckQueryJobAgentEngineConfig_to_vertex( + getv(from_object, ["config"]), to_object + ), + ) + + return to_object + + +def _CheckQueryJobResult_from_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + + if getv(parent_object, ["operationName"]) is not None: + setv(to_object, ["operation_name"], getv(parent_object, ["operationName"])) + + if getv(parent_object, ["inputGcsUri"]) is not None: + setv(to_object, ["input_gcs_uri"], getv(parent_object, ["inputGcsUri"])) + + if getv(parent_object, ["outputGcsUri"]) is not None: + setv(to_object, ["output_gcs_uri"], getv(parent_object, ["outputGcsUri"])) + + if getv(parent_object, ["status"]) is not None: + setv(to_object, ["status"], getv(parent_object, ["status"])) + + if getv(parent_object, ["result"]) is not None: + setv(to_object, ["result"], getv(parent_object, ["result"])) + + return to_object + + def _CreateAgentEngineConfig_to_vertex( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -246,6 +302,41 @@ def _QueryAgentEngineRequestParameters_to_vertex( return to_object +def _RunQueryJobAgentEngineConfig_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + + if getv(from_object, ["input_gcs_uri"]) is not None: + setv(parent_object, ["inputGcsUri"], getv(from_object, ["input_gcs_uri"])) + + if getv(from_object, ["output_gcs_uri"]) is not None: + setv(parent_object, ["outputGcsUri"], getv(from_object, ["output_gcs_uri"])) + + return to_object + + +def _RunQueryJobAgentEngineRequestParameters_to_vertex( + from_object: Union[dict[str, Any], object], + parent_object: Optional[dict[str, Any]] = None, +) -> dict[str, Any]: + to_object: dict[str, Any] = {} + if getv(from_object, ["name"]) is not None: + setv(to_object, ["_url", "name"], getv(from_object, ["name"])) + + if getv(from_object, ["config"]) is not None: + setv( + to_object, + ["config"], + _RunQueryJobAgentEngineConfig_to_vertex( + getv(from_object, ["config"]), to_object + ), + ) + + return to_object + + def _UpdateAgentEngineConfig_to_vertex( from_object: Union[dict[str, Any], object], parent_object: Optional[dict[str, Any]] = None, @@ -337,6 +428,119 @@ def _UpdateAgentEngineRequestParameters_to_vertex( class AgentEngines(_api_module.BaseModule): + def _check_query_job( + self, + *, + name: str, + config: Optional[types.CheckQueryJobAgentEngineConfigOrDict] = None, + ) -> types.CheckQueryJobResult: + """ + Query an Agent Engine asynchronously. + """ + + parameter_model = types._CheckQueryJobAgentEngineRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _CheckQueryJobAgentEngineRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}:checkQueryJob".format_map(request_url_dict) + else: + path = "{name}:checkQueryJob" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("post", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + if self._api_client.vertexai: + response_dict = _CheckQueryJobResult_from_vertex(response_dict) + + return_value = types.CheckQueryJobResult._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + def _run_query_job( + self, + *, + name: str, + config: Optional[types._RunQueryJobAgentEngineConfigOrDict] = None, + ) -> types.AgentEngineOperation: + """ + Run a query job on an agent engine. + """ + + parameter_model = types._RunQueryJobAgentEngineRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _RunQueryJobAgentEngineRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}:asyncQuery".format_map(request_url_dict) + else: + path = "{name}:asyncQuery" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = self._api_client.request("post", path, request_dict, http_options) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.AgentEngineOperation._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + def _create( self, *, config: Optional[types.CreateAgentEngineConfigOrDict] = None ) -> types.AgentEngineOperation: @@ -783,6 +987,116 @@ def _list_pager( config, ) + def check_query_job( + self, + *, + name: str, + config: Optional[types.CheckQueryJobAgentEngineConfigOrDict] = None, + ) -> types.CheckQueryJobResult: + """Checks a query job on an agent engine and optionally returns the results. + + Args: + name (str): + Required. A fully-qualified resource name or ID. + config (CheckQueryJobAgentEngineConfigOrDict): + Optional. The configuration for the async query. If not provided, + the default configuration will be used. This can be used to specify + the following fields: + - retrieve_result: Whether to retrieve the results of the query job. + """ + from google.cloud import storage # type: ignore[attr-defined] + + if config is None: + config = types.CheckQueryJobAgentEngineConfig() + elif isinstance(config, dict): + config = types.CheckQueryJobAgentEngineConfig(**config) + + try: + operation = self._get_agent_operation(operation_name=name) + except Exception as e: + raise ValueError(f"Failed to retrieve operation {name}: {e}") + + status = "RUNNING" + if operation.done: + status = "FAILED" if operation.error else "SUCCESS" + + output_gcs_uri = None + input_gcs_uri = None + + print(f"Operation={operation}") + print(f"DEBUG CheckQueryJob: Status={status}") + print(f"DEBUG CheckQueryJob metadata type: {type(operation.metadata)}") + print( + f"DEBUG CheckQueryJob metadata dir: {dir(operation.metadata) if operation.metadata else None}" + ) + + if operation.metadata: + if isinstance(operation.metadata, dict): + output_gcs_uri = operation.metadata.get("output_gcs_uri") + input_gcs_uri = operation.metadata.get("input_gcs_uri") + print(f"DEBUG CheckQueryJob dict metadata: {operation.metadata}") + else: + output_gcs_uri = getattr(operation.metadata, "output_gcs_uri", None) + input_gcs_uri = getattr(operation.metadata, "input_gcs_uri", None) + print( + f"DEBUG CheckQueryJob object metadata: output_gcs_uri={output_gcs_uri}, input_gcs_uri={input_gcs_uri}" + ) + + print(f"DEBUG CheckQueryJob response type: {type(operation.response)}") + print( + f"DEBUG CheckQueryJob response dir: {dir(operation.response) if operation.response else None}" + ) + + if not output_gcs_uri and operation.response: + if isinstance(operation.response, dict): + output_gcs_uri = output_gcs_uri or operation.response.get( + "output_gcs_uri" + ) + input_gcs_uri = input_gcs_uri or operation.response.get("input_gcs_uri") + print(f"DEBUG CheckQueryJob dict response: {operation.response}") + else: + output_gcs_uri = output_gcs_uri or getattr( + operation.response, "output_gcs_uri", None + ) + input_gcs_uri = input_gcs_uri or getattr( + operation.response, "input_gcs_uri", None + ) + print( + f"DEBUG CheckQueryJob object response: output_gcs_uri={output_gcs_uri}, input_gcs_uri={input_gcs_uri}" + ) + + print( + f"DEBUG CheckQueryJob resolved URIs: input={input_gcs_uri}, output={output_gcs_uri}" + ) + + result_str = None + if status == "SUCCESS" and config.retrieve_result and output_gcs_uri: + storage_client = storage.Client( + project=self._api_client.project, + credentials=self._api_client._credentials, + ) + bucket_name = output_gcs_uri.replace("gs://", "").split("/")[0] + blob_name = output_gcs_uri.replace(f"gs://{bucket_name}/", "") + bucket = storage_client.bucket(bucket_name) + blob = bucket.blob(blob_name) + if blob.exists(): + result_str = blob.download_as_string().decode("utf-8") + else: + raise ValueError( + f"Failed to retrieve blob results for {output_gcs_uri}" + ) + + elif status == "FAILED" and operation.error: + result_str = str(operation.error) + + return types.CheckQueryJobResult( + operation_name=name, + input_gcs_uri=input_gcs_uri, + output_gcs_uri=output_gcs_uri, + status=status, + result=result_str, + ) + def _is_lightweight_creation( self, agent: Any, config: types.AgentEngineConfig ) -> bool: @@ -795,6 +1109,112 @@ def _is_lightweight_creation( return False return True + def run_query_job( + self, + *, + name: str, + config: Optional[types.RunQueryJobAgentEngineConfigOrDict] = None, + ) -> types.RunQueryJobResult: + """Runs a query job on an agent engine asynchronously. + + Args: + name (str): + Required. A fully-qualified resource name or ID. + config (RunQueryJobAgentEngineConfigOrDict): + Optional. The configuration for the async query. If not provided, + the default configuration will be used. This can be used to specify + the following fields: + - query: The query to send to the agent engine. + - gcs_bucket: The GCS bucket to use for the query. + """ + from google.cloud import storage # type: ignore[attr-defined] + from google.api_core import exceptions + import uuid + + if config is None: + config = types.RunQueryJobAgentEngineConfig() + elif isinstance(config, dict): + config = types.RunQueryJobAgentEngineConfig(**config) + + if not config.query: + raise ValueError("`query` is required in the config object.") + if not config.gcs_bucket: + raise ValueError("`gcs_bucket` is required in the config object.") + + api_resource = self._get(name=name) + + is_supported = False + print(f"DEBUG api_resource: api_resource={api_resource}") + for env in api_resource.spec.deployment_spec.env: + if env.name in [ + "INPUT_GCS_URI", + "OUTPUT_GCS_URI", + "input_gcs_uri", + "output_gcs_uri", + ]: + is_supported = True + break + + if not is_supported: + raise ValueError( + "Your ReasoningEngine does not support long running queries, " + "please update your ReasoningEngine and try again." + ) + + gcs_bucket = config.gcs_bucket.rstrip("/") + + storage_client = storage.Client( + project=self._api_client.project, credentials=self._api_client._credentials + ) + + # Handle creating the bucket if it does not exist + bucket_name = gcs_bucket.replace("gs://", "").split("/")[0] + bucket = storage_client.bucket(bucket_name) + + try: + print(f"DEBUG bucket: bucket={bucket}") + bucket_exists = bucket.exists() + except exceptions.Forbidden as e: + raise ValueError( + f"Permission denied to check existence of bucket '{bucket_name}'. " + "The service account may lack 'storage.buckets.get' permission." + ) from e + + if not bucket_exists: + try: + print(f"DEBUG create bucket: bucket={bucket}") + bucket.create() + except exceptions.Forbidden as e: + raise ValueError( + f"Permission denied to create bucket '{bucket_name}'. " + "The service account may lack 'storage.buckets.create' permission." + ) from e + + job_uuid = uuid.uuid4().hex + input_blob_name = f"input_{job_uuid}.json" + input_gcs_uri = f"{gcs_bucket}/{input_blob_name}" + blob = bucket.blob(input_blob_name) + print(f"DEBUG uploading to bucket: bucket={blob}") + blob.upload_from_string(config.query) + + output_blob_name = f"output_{job_uuid}.json" + output_gcs_uri = f"{gcs_bucket}/{output_blob_name}" + + new_config = types._RunQueryJobAgentEngineConfig( + input_gcs_uri=input_gcs_uri, + output_gcs_uri=output_gcs_uri, + ) + + print(f"DEBUG triggering _run_query_job with config: {new_config}") + # Proceed with sending the async query via the auto-generated method + operation = self._run_query_job(name=name, config=new_config) + + return types.RunQueryJobResult( + job_name=operation.name, + input_gcs_uri=input_gcs_uri, + output_gcs_uri=output_gcs_uri, + ) + def get( self, *, @@ -2055,6 +2475,123 @@ def list_session_events( class AsyncAgentEngines(_api_module.BaseModule): + async def _check_query_job( + self, + *, + name: str, + config: Optional[types.CheckQueryJobAgentEngineConfigOrDict] = None, + ) -> types.CheckQueryJobResult: + """ + Query an Agent Engine asynchronously. + """ + + parameter_model = types._CheckQueryJobAgentEngineRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _CheckQueryJobAgentEngineRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}:checkQueryJob".format_map(request_url_dict) + else: + path = "{name}:checkQueryJob" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "post", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + if self._api_client.vertexai: + response_dict = _CheckQueryJobResult_from_vertex(response_dict) + + return_value = types.CheckQueryJobResult._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + + async def _run_query_job( + self, + *, + name: str, + config: Optional[types._RunQueryJobAgentEngineConfigOrDict] = None, + ) -> types.AgentEngineOperation: + """ + Run a query job on an agent engine. + """ + + parameter_model = types._RunQueryJobAgentEngineRequestParameters( + name=name, + config=config, + ) + + request_url_dict: Optional[dict[str, str]] + if not self._api_client.vertexai: + raise ValueError("This method is only supported in the Vertex AI client.") + else: + request_dict = _RunQueryJobAgentEngineRequestParameters_to_vertex( + parameter_model + ) + request_url_dict = request_dict.get("_url") + if request_url_dict: + path = "{name}:asyncQuery".format_map(request_url_dict) + else: + path = "{name}:asyncQuery" + + query_params = request_dict.get("_query") + if query_params: + path = f"{path}?{urlencode(query_params)}" + # TODO: remove the hack that pops config. + request_dict.pop("config", None) + + http_options: Optional[types.HttpOptions] = None + if ( + parameter_model.config is not None + and parameter_model.config.http_options is not None + ): + http_options = parameter_model.config.http_options + + request_dict = _common.convert_to_dict(request_dict) + request_dict = _common.encode_unserializable_types(request_dict) + + response = await self._api_client.async_request( + "post", path, request_dict, http_options + ) + + response_dict = {} if not response.body else json.loads(response.body) + + return_value = types.AgentEngineOperation._from_response( + response=response_dict, kwargs=parameter_model.model_dump() + ) + + self._api_client._verify_response(return_value) + return return_value + async def _create( self, *, config: Optional[types.CreateAgentEngineConfigOrDict] = None ) -> types.AgentEngineOperation: diff --git a/vertexai/_genai/types/__init__.py b/vertexai/_genai/types/__init__.py index edbf8caec6..d69c0b3cd0 100644 --- a/vertexai/_genai/types/__init__.py +++ b/vertexai/_genai/types/__init__.py @@ -26,6 +26,7 @@ from .common import _AppendAgentEngineTaskEventRequestParameters from .common import _AssembleDatasetParameters from .common import _AssessDatasetParameters +from .common import _CheckQueryJobAgentEngineRequestParameters from .common import _CreateAgentEngineMemoryRequestParameters from .common import _CreateAgentEngineRequestParameters from .common import _CreateAgentEngineSandboxRequestParameters @@ -90,6 +91,10 @@ from .common import _RestoreVersionRequestParameters from .common import _RetrieveAgentEngineMemoriesRequestParameters from .common import _RollbackAgentEngineMemoryRequestParameters +from .common import _RunQueryJobAgentEngineConfig +from .common import _RunQueryJobAgentEngineConfigDict +from .common import _RunQueryJobAgentEngineConfigOrDict +from .common import _RunQueryJobAgentEngineRequestParameters from .common import _UpdateAgentEngineMemoryRequestParameters from .common import _UpdateAgentEngineRequestParameters from .common import _UpdateAgentEngineSessionRequestParameters @@ -196,6 +201,12 @@ from .common import CandidateResponseOrDict from .common import CandidateResult from .common import CandidateResultDict +from .common import CheckQueryJobAgentEngineConfig +from .common import CheckQueryJobAgentEngineConfigDict +from .common import CheckQueryJobAgentEngineConfigOrDict +from .common import CheckQueryJobResult +from .common import CheckQueryJobResultDict +from .common import CheckQueryJobResultOrDict from .common import Chunk from .common import ChunkDict from .common import ChunkOrDict @@ -941,6 +952,12 @@ from .common import RubricGroupOrDict from .common import RubricVerdict from .common import RubricVerdictDict +from .common import RunQueryJobAgentEngineConfig +from .common import RunQueryJobAgentEngineConfigDict +from .common import RunQueryJobAgentEngineConfigOrDict +from .common import RunQueryJobResult +from .common import RunQueryJobResultDict +from .common import RunQueryJobResultOrDict from .common import SamplingConfig from .common import SamplingConfigDict from .common import SamplingConfigOrDict @@ -1646,45 +1663,15 @@ "VertexBaseConfig", "VertexBaseConfigDict", "VertexBaseConfigOrDict", - "SecretRef", - "SecretRefDict", - "SecretRefOrDict", - "SecretEnvVar", - "SecretEnvVarDict", - "SecretEnvVarOrDict", - "ReasoningEngineSpecDeploymentSpec", - "ReasoningEngineSpecDeploymentSpecDict", - "ReasoningEngineSpecDeploymentSpecOrDict", - "ReasoningEngineSpecPackageSpec", - "ReasoningEngineSpecPackageSpecDict", - "ReasoningEngineSpecPackageSpecOrDict", - "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfig", - "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigDict", - "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigOrDict", - "ReasoningEngineSpecSourceCodeSpecInlineSource", - "ReasoningEngineSpecSourceCodeSpecInlineSourceDict", - "ReasoningEngineSpecSourceCodeSpecInlineSourceOrDict", - "ReasoningEngineSpecSourceCodeSpecAgentConfigSource", - "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceDict", - "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceOrDict", - "ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfig", - "ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigDict", - "ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigOrDict", - "ReasoningEngineSpecSourceCodeSpecDeveloperConnectSource", - "ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceDict", - "ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceOrDict", - "ReasoningEngineSpecSourceCodeSpecImageSpec", - "ReasoningEngineSpecSourceCodeSpecImageSpecDict", - "ReasoningEngineSpecSourceCodeSpecImageSpecOrDict", - "ReasoningEngineSpecSourceCodeSpecPythonSpec", - "ReasoningEngineSpecSourceCodeSpecPythonSpecDict", - "ReasoningEngineSpecSourceCodeSpecPythonSpecOrDict", - "ReasoningEngineSpecSourceCodeSpec", - "ReasoningEngineSpecSourceCodeSpecDict", - "ReasoningEngineSpecSourceCodeSpecOrDict", - "ReasoningEngineSpec", - "ReasoningEngineSpecDict", - "ReasoningEngineSpecOrDict", + "CheckQueryJobAgentEngineConfig", + "CheckQueryJobAgentEngineConfigDict", + "CheckQueryJobAgentEngineConfigOrDict", + "CheckQueryJobResult", + "CheckQueryJobResultDict", + "CheckQueryJobResultOrDict", + "_RunQueryJobAgentEngineConfig", + "_RunQueryJobAgentEngineConfigDict", + "_RunQueryJobAgentEngineConfigOrDict", "MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEvent", "MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventDict", "MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventOrDict", @@ -1730,15 +1717,54 @@ "ReasoningEngineContextSpec", "ReasoningEngineContextSpecDict", "ReasoningEngineContextSpecOrDict", - "CreateAgentEngineConfig", - "CreateAgentEngineConfigDict", - "CreateAgentEngineConfigOrDict", + "SecretRef", + "SecretRefDict", + "SecretRefOrDict", + "SecretEnvVar", + "SecretEnvVarDict", + "SecretEnvVarOrDict", + "ReasoningEngineSpecDeploymentSpec", + "ReasoningEngineSpecDeploymentSpecDict", + "ReasoningEngineSpecDeploymentSpecOrDict", + "ReasoningEngineSpecPackageSpec", + "ReasoningEngineSpecPackageSpecDict", + "ReasoningEngineSpecPackageSpecOrDict", + "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfig", + "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigDict", + "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigOrDict", + "ReasoningEngineSpecSourceCodeSpecInlineSource", + "ReasoningEngineSpecSourceCodeSpecInlineSourceDict", + "ReasoningEngineSpecSourceCodeSpecInlineSourceOrDict", + "ReasoningEngineSpecSourceCodeSpecAgentConfigSource", + "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceDict", + "ReasoningEngineSpecSourceCodeSpecAgentConfigSourceOrDict", + "ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfig", + "ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigDict", + "ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigOrDict", + "ReasoningEngineSpecSourceCodeSpecDeveloperConnectSource", + "ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceDict", + "ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceOrDict", + "ReasoningEngineSpecSourceCodeSpecImageSpec", + "ReasoningEngineSpecSourceCodeSpecImageSpecDict", + "ReasoningEngineSpecSourceCodeSpecImageSpecOrDict", + "ReasoningEngineSpecSourceCodeSpecPythonSpec", + "ReasoningEngineSpecSourceCodeSpecPythonSpecDict", + "ReasoningEngineSpecSourceCodeSpecPythonSpecOrDict", + "ReasoningEngineSpecSourceCodeSpec", + "ReasoningEngineSpecSourceCodeSpecDict", + "ReasoningEngineSpecSourceCodeSpecOrDict", + "ReasoningEngineSpec", + "ReasoningEngineSpecDict", + "ReasoningEngineSpecOrDict", "ReasoningEngine", "ReasoningEngineDict", "ReasoningEngineOrDict", "AgentEngineOperation", "AgentEngineOperationDict", "AgentEngineOperationOrDict", + "CreateAgentEngineConfig", + "CreateAgentEngineConfigDict", + "CreateAgentEngineConfigOrDict", "DeleteAgentEngineConfig", "DeleteAgentEngineConfigDict", "DeleteAgentEngineConfigOrDict", @@ -2174,6 +2200,12 @@ "AgentEngineConfig", "AgentEngineConfigDict", "AgentEngineConfigOrDict", + "RunQueryJobAgentEngineConfig", + "RunQueryJobAgentEngineConfigDict", + "RunQueryJobAgentEngineConfigOrDict", + "RunQueryJobResult", + "RunQueryJobResultDict", + "RunQueryJobResultOrDict", "AssembleDataset", "AssembleDatasetDict", "AssembleDatasetOrDict", @@ -2225,9 +2257,9 @@ "AcceleratorType", "Type", "JobState", + "ManagedTopicEnum", "IdentityType", "AgentServerMode", - "ManagedTopicEnum", "Operator", "MachineConfig", "Framework", @@ -2278,6 +2310,8 @@ "_OptimizeRequestParameters", "_CustomJobParameters", "_GetCustomJobParameters", + "_CheckQueryJobAgentEngineRequestParameters", + "_RunQueryJobAgentEngineRequestParameters", "_CreateAgentEngineRequestParameters", "_DeleteAgentEngineRequestParameters", "_GetAgentEngineRequestParameters", diff --git a/vertexai/_genai/types/common.py b/vertexai/_genai/types/common.py index 4a7512d0fe..8b7f69f00e 100644 --- a/vertexai/_genai/types/common.py +++ b/vertexai/_genai/types/common.py @@ -321,6 +321,21 @@ class JobState(_common.CaseInSensitiveEnum): """The job is partially succeeded, some results may be missing due to errors.""" +class ManagedTopicEnum(_common.CaseInSensitiveEnum): + """The managed memory topic.""" + + MANAGED_TOPIC_ENUM_UNSPECIFIED = "MANAGED_TOPIC_ENUM_UNSPECIFIED" + """Unspecified topic. This value should not be used.""" + USER_PERSONAL_INFO = "USER_PERSONAL_INFO" + """Significant personal information about the User like first names, relationships, hobbies, important dates.""" + USER_PREFERENCES = "USER_PREFERENCES" + """Stated or implied likes, dislikes, preferred styles, or patterns.""" + KEY_CONVERSATION_DETAILS = "KEY_CONVERSATION_DETAILS" + """Important milestones or conclusions within the dialogue.""" + EXPLICIT_INSTRUCTIONS = "EXPLICIT_INSTRUCTIONS" + """Information that the user explicitly requested to remember or forget.""" + + class IdentityType(_common.CaseInSensitiveEnum): """The identity type to use for the Reasoning Engine. If not specified, the `service_account` field will be used if set, otherwise the default Vertex AI Reasoning Engine Service Agent in the project will be used.""" @@ -343,21 +358,6 @@ class AgentServerMode(_common.CaseInSensitiveEnum): """Experimental agent server mode. This mode contains experimental features.""" -class ManagedTopicEnum(_common.CaseInSensitiveEnum): - """The managed memory topic.""" - - MANAGED_TOPIC_ENUM_UNSPECIFIED = "MANAGED_TOPIC_ENUM_UNSPECIFIED" - """Unspecified topic. This value should not be used.""" - USER_PERSONAL_INFO = "USER_PERSONAL_INFO" - """Significant personal information about the User like first names, relationships, hobbies, important dates.""" - USER_PREFERENCES = "USER_PREFERENCES" - """Stated or implied likes, dislikes, preferred styles, or patterns.""" - KEY_CONVERSATION_DETAILS = "KEY_CONVERSATION_DETAILS" - """Important milestones or conclusions within the dialogue.""" - EXPLICIT_INSTRUCTIONS = "EXPLICIT_INSTRUCTIONS" - """Information that the user explicitly requested to remember or forget.""" - - class Operator(_common.CaseInSensitiveEnum): """Operator to apply to the filter. If not set, then EQUAL will be used.""" @@ -6424,1004 +6424,1289 @@ class _GetCustomJobParametersDict(TypedDict, total=False): ] -class SecretRef(_common.BaseModel): - """Reference to a secret stored in the Cloud Secret Manager that will provide the value for this environment variable.""" +class CheckQueryJobAgentEngineConfig(_common.BaseModel): + """Config for async querying agent engines.""" - secret: Optional[str] = Field( - default=None, - description="""Required. The name of the secret in Cloud Secret Manager. Format: {secret_name}.""", + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" ) - version: Optional[str] = Field( + retrieve_result: Optional[bool] = Field( default=None, - description="""The Cloud Secret Manager secret version. Can be 'latest' for the latest version, an integer for a specific version, or a version alias.""", + description="""Whether to retrieve the results of the query job.""", ) -class SecretRefDict(TypedDict, total=False): - """Reference to a secret stored in the Cloud Secret Manager that will provide the value for this environment variable.""" +class CheckQueryJobAgentEngineConfigDict(TypedDict, total=False): + """Config for async querying agent engines.""" - secret: Optional[str] - """Required. The name of the secret in Cloud Secret Manager. Format: {secret_name}.""" + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" - version: Optional[str] - """The Cloud Secret Manager secret version. Can be 'latest' for the latest version, an integer for a specific version, or a version alias.""" + retrieve_result: Optional[bool] + """Whether to retrieve the results of the query job.""" -SecretRefOrDict = Union[SecretRef, SecretRefDict] +CheckQueryJobAgentEngineConfigOrDict = Union[ + CheckQueryJobAgentEngineConfig, CheckQueryJobAgentEngineConfigDict +] -class SecretEnvVar(_common.BaseModel): - """Represents an environment variable where the value is a secret in Cloud Secret Manager.""" +class _CheckQueryJobAgentEngineRequestParameters(_common.BaseModel): + """Parameters for async querying agent engines.""" - name: Optional[str] = Field( - default=None, - description="""Required. Name of the secret environment variable.""", - ) - secret_ref: Optional[SecretRef] = Field( - default=None, - description="""Required. Reference to a secret stored in the Cloud Secret Manager that will provide the value for this environment variable.""", + name: Optional[str] = Field(default=None, description="""Name of the query job.""") + config: Optional[CheckQueryJobAgentEngineConfig] = Field( + default=None, description="""""" ) -class SecretEnvVarDict(TypedDict, total=False): - """Represents an environment variable where the value is a secret in Cloud Secret Manager.""" +class _CheckQueryJobAgentEngineRequestParametersDict(TypedDict, total=False): + """Parameters for async querying agent engines.""" name: Optional[str] - """Required. Name of the secret environment variable.""" + """Name of the query job.""" - secret_ref: Optional[SecretRefDict] - """Required. Reference to a secret stored in the Cloud Secret Manager that will provide the value for this environment variable.""" + config: Optional[CheckQueryJobAgentEngineConfigDict] + """""" -SecretEnvVarOrDict = Union[SecretEnvVar, SecretEnvVarDict] +_CheckQueryJobAgentEngineRequestParametersOrDict = Union[ + _CheckQueryJobAgentEngineRequestParameters, + _CheckQueryJobAgentEngineRequestParametersDict, +] -class ReasoningEngineSpecDeploymentSpec(_common.BaseModel): - """The specification of a Reasoning Engine deployment.""" +class CheckQueryJobResult(_common.BaseModel): + """Result of checking a query job.""" - agent_server_mode: Optional[AgentServerMode] = Field( - default=None, description="""The agent server mode.""" - ) - container_concurrency: Optional[int] = Field( - default=None, - description="""Optional. Concurrency for each container and agent server. Recommended value: 2 * cpu + 1. Defaults to 9.""", - ) - env: Optional[list[EnvVar]] = Field( - default=None, - description="""Optional. Environment variables to be set with the Reasoning Engine deployment. The environment variables can be updated through the UpdateReasoningEngine API.""", + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" ) - max_instances: Optional[int] = Field( - default=None, - description="""Optional. The maximum number of application instances that can be launched to handle increased traffic. Defaults to 100. Range: [1, 1000]. If VPC-SC or PSC-I is enabled, the acceptable range is [1, 100].""", + operation_name: Optional[str] = Field( + default=None, description="""Name of the agent engine operation.""" ) - min_instances: Optional[int] = Field( - default=None, - description="""Optional. The minimum number of application instances that will be kept running at all times. Defaults to 1. Range: [0, 10].""", + input_gcs_uri: Optional[str] = Field( + default=None, description="""The GCS URI of the input file.""" ) - psc_interface_config: Optional[PscInterfaceConfig] = Field( - default=None, description="""Optional. Configuration for PSC-I.""" + output_gcs_uri: Optional[str] = Field( + default=None, description="""The GCS URI of the output file.""" ) - resource_limits: Optional[dict[str, str]] = Field( - default=None, - description="""Optional. Resource limits for each container. Only 'cpu' and 'memory' keys are supported. Defaults to {"cpu": "4", "memory": "4Gi"}. * The only supported values for CPU are '1', '2', '4', '6' and '8'. For more information, go to https://cloud.google.com/run/docs/configuring/cpu. * The only supported values for memory are '1Gi', '2Gi', ... '32 Gi'. * For required cpu on different memory values, go to https://cloud.google.com/run/docs/configuring/memory-limits""", + status: Optional[str] = Field( + default=None, description="""Status of the operation.""" ) - secret_env: Optional[list[SecretEnvVar]] = Field( - default=None, - description="""Optional. Environment variables where the value is a secret in Cloud Secret Manager. To use this feature, add 'Secret Manager Secret Accessor' role (roles/secretmanager.secretAccessor) to AI Platform Reasoning Engine Service Agent.""", + result: Optional[str] = Field( + default=None, description="""JSON result of the operation.""" ) -class ReasoningEngineSpecDeploymentSpecDict(TypedDict, total=False): - """The specification of a Reasoning Engine deployment.""" - - agent_server_mode: Optional[AgentServerMode] - """The agent server mode.""" - - container_concurrency: Optional[int] - """Optional. Concurrency for each container and agent server. Recommended value: 2 * cpu + 1. Defaults to 9.""" +class CheckQueryJobResultDict(TypedDict, total=False): + """Result of checking a query job.""" - env: Optional[list[EnvVarDict]] - """Optional. Environment variables to be set with the Reasoning Engine deployment. The environment variables can be updated through the UpdateReasoningEngine API.""" + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" - max_instances: Optional[int] - """Optional. The maximum number of application instances that can be launched to handle increased traffic. Defaults to 100. Range: [1, 1000]. If VPC-SC or PSC-I is enabled, the acceptable range is [1, 100].""" + operation_name: Optional[str] + """Name of the agent engine operation.""" - min_instances: Optional[int] - """Optional. The minimum number of application instances that will be kept running at all times. Defaults to 1. Range: [0, 10].""" + input_gcs_uri: Optional[str] + """The GCS URI of the input file.""" - psc_interface_config: Optional[PscInterfaceConfigDict] - """Optional. Configuration for PSC-I.""" + output_gcs_uri: Optional[str] + """The GCS URI of the output file.""" - resource_limits: Optional[dict[str, str]] - """Optional. Resource limits for each container. Only 'cpu' and 'memory' keys are supported. Defaults to {"cpu": "4", "memory": "4Gi"}. * The only supported values for CPU are '1', '2', '4', '6' and '8'. For more information, go to https://cloud.google.com/run/docs/configuring/cpu. * The only supported values for memory are '1Gi', '2Gi', ... '32 Gi'. * For required cpu on different memory values, go to https://cloud.google.com/run/docs/configuring/memory-limits""" + status: Optional[str] + """Status of the operation.""" - secret_env: Optional[list[SecretEnvVarDict]] - """Optional. Environment variables where the value is a secret in Cloud Secret Manager. To use this feature, add 'Secret Manager Secret Accessor' role (roles/secretmanager.secretAccessor) to AI Platform Reasoning Engine Service Agent.""" + result: Optional[str] + """JSON result of the operation.""" -ReasoningEngineSpecDeploymentSpecOrDict = Union[ - ReasoningEngineSpecDeploymentSpec, ReasoningEngineSpecDeploymentSpecDict -] +CheckQueryJobResultOrDict = Union[CheckQueryJobResult, CheckQueryJobResultDict] -class ReasoningEngineSpecPackageSpec(_common.BaseModel): - """User-provided package specification, containing pickled object and package requirements.""" +class _RunQueryJobAgentEngineConfig(_common.BaseModel): + """Config for running a query job on an agent engine.""" - dependency_files_gcs_uri: Optional[str] = Field( - default=None, - description="""Optional. The Cloud Storage URI of the dependency files in tar.gz format.""", - ) - pickle_object_gcs_uri: Optional[str] = Field( - default=None, - description="""Optional. The Cloud Storage URI of the pickled python object.""", + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" ) - python_version: Optional[str] = Field( - default=None, - description="""Optional. The Python version. Supported values are 3.9, 3.10, 3.11, 3.12, 3.13, 3.14. If not specified, the default value is 3.10.""", + input_gcs_uri: Optional[str] = Field( + default=None, description="""The GCS URI of the input file.""" ) - requirements_gcs_uri: Optional[str] = Field( - default=None, - description="""Optional. The Cloud Storage URI of the `requirements.txt` file""", + output_gcs_uri: Optional[str] = Field( + default=None, description="""The GCS URI of the output file.""" ) -class ReasoningEngineSpecPackageSpecDict(TypedDict, total=False): - """User-provided package specification, containing pickled object and package requirements.""" - - dependency_files_gcs_uri: Optional[str] - """Optional. The Cloud Storage URI of the dependency files in tar.gz format.""" +class _RunQueryJobAgentEngineConfigDict(TypedDict, total=False): + """Config for running a query job on an agent engine.""" - pickle_object_gcs_uri: Optional[str] - """Optional. The Cloud Storage URI of the pickled python object.""" + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" - python_version: Optional[str] - """Optional. The Python version. Supported values are 3.9, 3.10, 3.11, 3.12, 3.13, 3.14. If not specified, the default value is 3.10.""" + input_gcs_uri: Optional[str] + """The GCS URI of the input file.""" - requirements_gcs_uri: Optional[str] - """Optional. The Cloud Storage URI of the `requirements.txt` file""" + output_gcs_uri: Optional[str] + """The GCS URI of the output file.""" -ReasoningEngineSpecPackageSpecOrDict = Union[ - ReasoningEngineSpecPackageSpec, ReasoningEngineSpecPackageSpecDict +_RunQueryJobAgentEngineConfigOrDict = Union[ + _RunQueryJobAgentEngineConfig, _RunQueryJobAgentEngineConfigDict ] -class ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfig(_common.BaseModel): - """Configuration for the Agent Development Kit (ADK).""" +class _RunQueryJobAgentEngineRequestParameters(_common.BaseModel): + """Parameters for running a query job on an agent engine.""" - json_config: Optional[dict[str, Any]] = Field( - default=None, - description="""Required. The value of the ADK config in JSON format.""", + name: Optional[str] = Field( + default=None, description="""Name of the agent engine.""" + ) + config: Optional[_RunQueryJobAgentEngineConfig] = Field( + default=None, description="""""" ) -class ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigDict( - TypedDict, total=False -): - """Configuration for the Agent Development Kit (ADK).""" +class _RunQueryJobAgentEngineRequestParametersDict(TypedDict, total=False): + """Parameters for running a query job on an agent engine.""" - json_config: Optional[dict[str, Any]] - """Required. The value of the ADK config in JSON format.""" + name: Optional[str] + """Name of the agent engine.""" + + config: Optional[_RunQueryJobAgentEngineConfigDict] + """""" -ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigOrDict = Union[ - ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfig, - ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigDict, +_RunQueryJobAgentEngineRequestParametersOrDict = Union[ + _RunQueryJobAgentEngineRequestParameters, + _RunQueryJobAgentEngineRequestParametersDict, ] -class ReasoningEngineSpecSourceCodeSpecInlineSource(_common.BaseModel): - """Specifies source code provided as a byte stream.""" +class MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEvent( + _common.BaseModel +): + """The conversation source event for generating memories.""" - source_archive: Optional[bytes] = Field( - default=None, - description="""Required. Input only. The application source code archive. It must be a compressed tarball (.tar.gz) file.""", + content: Optional[genai_types.Content] = Field( + default=None, description="""Required. The content of the event.""" ) -class ReasoningEngineSpecSourceCodeSpecInlineSourceDict(TypedDict, total=False): - """Specifies source code provided as a byte stream.""" +class MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventDict( + TypedDict, total=False +): + """The conversation source event for generating memories.""" - source_archive: Optional[bytes] - """Required. Input only. The application source code archive. It must be a compressed tarball (.tar.gz) file.""" + content: Optional[genai_types.ContentDict] + """Required. The content of the event.""" -ReasoningEngineSpecSourceCodeSpecInlineSourceOrDict = Union[ - ReasoningEngineSpecSourceCodeSpecInlineSource, - ReasoningEngineSpecSourceCodeSpecInlineSourceDict, -] +MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventOrDict = ( + Union[ + MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEvent, + MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventDict, + ] +) -class ReasoningEngineSpecSourceCodeSpecAgentConfigSource(_common.BaseModel): - """Specification for the deploying from agent config.""" +class MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSource( + _common.BaseModel +): + """A conversation source for the example. This is similar to `DirectContentsSource`.""" - adk_config: Optional[ - ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfig - ] = Field(default=None, description="""Required. The ADK configuration.""") - inline_source: Optional[ReasoningEngineSpecSourceCodeSpecInlineSource] = Field( + events: Optional[ + list[ + MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEvent + ] + ] = Field( default=None, - description="""Optional. Any additional files needed to interpret the config. If a `requirements.txt` file is present in the `inline_source`, the corresponding packages will be installed. If no `requirements.txt` file is present in `inline_source`, then the latest version of `google-adk` will be installed for interpreting the ADK config.""", + description="""Optional. The input conversation events for the example.""", ) -class ReasoningEngineSpecSourceCodeSpecAgentConfigSourceDict(TypedDict, total=False): - """Specification for the deploying from agent config.""" +class MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceDict( + TypedDict, total=False +): + """A conversation source for the example. This is similar to `DirectContentsSource`.""" - adk_config: Optional[ - ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigDict + events: Optional[ + list[ + MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventDict + ] ] - """Required. The ADK configuration.""" - - inline_source: Optional[ReasoningEngineSpecSourceCodeSpecInlineSourceDict] - """Optional. Any additional files needed to interpret the config. If a `requirements.txt` file is present in the `inline_source`, the corresponding packages will be installed. If no `requirements.txt` file is present in `inline_source`, then the latest version of `google-adk` will be installed for interpreting the ADK config.""" + """Optional. The input conversation events for the example.""" -ReasoningEngineSpecSourceCodeSpecAgentConfigSourceOrDict = Union[ - ReasoningEngineSpecSourceCodeSpecAgentConfigSource, - ReasoningEngineSpecSourceCodeSpecAgentConfigSourceDict, +MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceOrDict = Union[ + MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSource, + MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceDict, ] -class ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfig(_common.BaseModel): - """Specifies the configuration for fetching source code from a Git repository that is managed by Developer Connect. - - This includes the repository, revision, and directory to use. - """ +class MemoryTopicId(_common.BaseModel): + """The topic ID for a memory.""" - git_repository_link: Optional[str] = Field( - default=None, - description="""Required. The Developer Connect Git repository link, formatted as `projects/{project_id}/locations/{location_id}/connections/{connection_id}/gitRepositoryLink/{repository_link_id}`.""", - ) - dir: Optional[str] = Field( - default=None, - description="""Required. Directory, relative to the source root, in which to run the build.""", + custom_memory_topic_label: Optional[str] = Field( + default=None, description="""Optional. The custom memory topic label.""" ) - revision: Optional[str] = Field( - default=None, - description="""Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref.""", + managed_memory_topic: Optional[ManagedTopicEnum] = Field( + default=None, description="""Optional. The managed memory topic.""" ) -class ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigDict( - TypedDict, total=False -): - """Specifies the configuration for fetching source code from a Git repository that is managed by Developer Connect. - - This includes the repository, revision, and directory to use. - """ - - git_repository_link: Optional[str] - """Required. The Developer Connect Git repository link, formatted as `projects/{project_id}/locations/{location_id}/connections/{connection_id}/gitRepositoryLink/{repository_link_id}`.""" +class MemoryTopicIdDict(TypedDict, total=False): + """The topic ID for a memory.""" - dir: Optional[str] - """Required. Directory, relative to the source root, in which to run the build.""" + custom_memory_topic_label: Optional[str] + """Optional. The custom memory topic label.""" - revision: Optional[str] - """Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref.""" + managed_memory_topic: Optional[ManagedTopicEnum] + """Optional. The managed memory topic.""" -ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigOrDict = Union[ - ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfig, - ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigDict, -] +MemoryTopicIdOrDict = Union[MemoryTopicId, MemoryTopicIdDict] -class ReasoningEngineSpecSourceCodeSpecDeveloperConnectSource(_common.BaseModel): - """Specifies source code to be fetched from a Git repository managed through the Developer Connect service.""" +class MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemory( + _common.BaseModel +): + """A memory generated by the operation.""" - config: Optional[ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfig] = Field( + fact: Optional[str] = Field( + default=None, description="""Required. The fact to generate a memory from.""" + ) + topics: Optional[list[MemoryTopicId]] = Field( default=None, - description="""Required. The Developer Connect configuration that defines the specific repository, revision, and directory to use as the source code root.""", + description="""Optional. The list of topics that the memory should be associated with. For example, use `custom_memory_topic_label = "jargon"` if the extracted memory is an example of memory extraction for the custom topic `jargon`.""", ) -class ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceDict( +class MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemoryDict( TypedDict, total=False ): - """Specifies source code to be fetched from a Git repository managed through the Developer Connect service.""" + """A memory generated by the operation.""" - config: Optional[ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigDict] - """Required. The Developer Connect configuration that defines the specific repository, revision, and directory to use as the source code root.""" + fact: Optional[str] + """Required. The fact to generate a memory from.""" + topics: Optional[list[MemoryTopicIdDict]] + """Optional. The list of topics that the memory should be associated with. For example, use `custom_memory_topic_label = "jargon"` if the extracted memory is an example of memory extraction for the custom topic `jargon`.""" -ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceOrDict = Union[ - ReasoningEngineSpecSourceCodeSpecDeveloperConnectSource, - ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceDict, -] +MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemoryOrDict = Union[ + MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemory, + MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemoryDict, +] -class ReasoningEngineSpecSourceCodeSpecImageSpec(_common.BaseModel): - """The image spec for building an image (within a single build step). - It is based on the config file (i.e. Dockerfile) in the source directory. - """ +class MemoryBankCustomizationConfigGenerateMemoriesExample(_common.BaseModel): + """An example of how to generate memories for a particular scope.""" - build_args: Optional[dict[str, str]] = Field( + conversation_source: Optional[ + MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSource + ] = Field(default=None, description="""A conversation source for the example.""") + generated_memories: Optional[ + list[MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemory] + ] = Field( default=None, - description="""Optional. Build arguments to be used. They will be passed through --build-arg flags.""", + description="""Optional. The memories that are expected to be generated from the input conversation. An empty list indicates that no memories are expected to be generated for the input conversation.""", ) -class ReasoningEngineSpecSourceCodeSpecImageSpecDict(TypedDict, total=False): - """The image spec for building an image (within a single build step). +class MemoryBankCustomizationConfigGenerateMemoriesExampleDict(TypedDict, total=False): + """An example of how to generate memories for a particular scope.""" - It is based on the config file (i.e. Dockerfile) in the source directory. - """ + conversation_source: Optional[ + MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceDict + ] + """A conversation source for the example.""" - build_args: Optional[dict[str, str]] - """Optional. Build arguments to be used. They will be passed through --build-arg flags.""" + generated_memories: Optional[ + list[MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemoryDict] + ] + """Optional. The memories that are expected to be generated from the input conversation. An empty list indicates that no memories are expected to be generated for the input conversation.""" -ReasoningEngineSpecSourceCodeSpecImageSpecOrDict = Union[ - ReasoningEngineSpecSourceCodeSpecImageSpec, - ReasoningEngineSpecSourceCodeSpecImageSpecDict, +MemoryBankCustomizationConfigGenerateMemoriesExampleOrDict = Union[ + MemoryBankCustomizationConfigGenerateMemoriesExample, + MemoryBankCustomizationConfigGenerateMemoriesExampleDict, ] -class ReasoningEngineSpecSourceCodeSpecPythonSpec(_common.BaseModel): - """Specification for running a Python application from source.""" +class MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopic(_common.BaseModel): + """A custom memory topic defined by the developer.""" - entrypoint_module: Optional[str] = Field( - default=None, - description="""Optional. The Python module to load as the entrypoint, specified as a fully qualified module name. For example: path.to.agent. If not specified, defaults to "agent". The project root will be added to Python sys.path, allowing imports to be specified relative to the root. This field should not be set if the source is `agent_config_source`.""", - ) - entrypoint_object: Optional[str] = Field( - default=None, - description="""Optional. The name of the callable object within the `entrypoint_module` to use as the application If not specified, defaults to "root_agent". This field should not be set if the source is `agent_config_source`.""", - ) - requirements_file: Optional[str] = Field( - default=None, - description="""Optional. The path to the requirements file, relative to the source root. If not specified, defaults to "requirements.txt".""", + label: Optional[str] = Field( + default=None, description="""Required. The label of the topic.""" ) - version: Optional[str] = Field( + description: Optional[str] = Field( default=None, - description="""Optional. The version of Python to use. Support version includes 3.9, 3.10, 3.11, 3.12, 3.13, 3.14. If not specified, default value is 3.10.""", + description="""Required. Description of the memory topic. This should explain what information should be extracted for this topic.""", ) -class ReasoningEngineSpecSourceCodeSpecPythonSpecDict(TypedDict, total=False): - """Specification for running a Python application from source.""" +class MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopicDict( + TypedDict, total=False +): + """A custom memory topic defined by the developer.""" - entrypoint_module: Optional[str] - """Optional. The Python module to load as the entrypoint, specified as a fully qualified module name. For example: path.to.agent. If not specified, defaults to "agent". The project root will be added to Python sys.path, allowing imports to be specified relative to the root. This field should not be set if the source is `agent_config_source`.""" + label: Optional[str] + """Required. The label of the topic.""" - entrypoint_object: Optional[str] - """Optional. The name of the callable object within the `entrypoint_module` to use as the application If not specified, defaults to "root_agent". This field should not be set if the source is `agent_config_source`.""" + description: Optional[str] + """Required. Description of the memory topic. This should explain what information should be extracted for this topic.""" - requirements_file: Optional[str] - """Optional. The path to the requirements file, relative to the source root. If not specified, defaults to "requirements.txt".""" - version: Optional[str] - """Optional. The version of Python to use. Support version includes 3.9, 3.10, 3.11, 3.12, 3.13, 3.14. If not specified, default value is 3.10.""" +MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopicOrDict = Union[ + MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopic, + MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopicDict, +] -ReasoningEngineSpecSourceCodeSpecPythonSpecOrDict = Union[ - ReasoningEngineSpecSourceCodeSpecPythonSpec, - ReasoningEngineSpecSourceCodeSpecPythonSpecDict, +class MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopic(_common.BaseModel): + """A managed memory topic defined by the system.""" + + managed_topic_enum: Optional[ManagedTopicEnum] = Field( + default=None, description="""Required. The managed topic.""" + ) + + +class MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopicDict( + TypedDict, total=False +): + """A managed memory topic defined by the system.""" + + managed_topic_enum: Optional[ManagedTopicEnum] + """Required. The managed topic.""" + + +MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopicOrDict = Union[ + MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopic, + MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopicDict, ] -class ReasoningEngineSpecSourceCodeSpec(_common.BaseModel): - """Specification for deploying from source code.""" +class MemoryBankCustomizationConfigMemoryTopic(_common.BaseModel): + """A topic of information that should be extracted from conversations and stored as memories.""" - agent_config_source: Optional[ - ReasoningEngineSpecSourceCodeSpecAgentConfigSource + custom_memory_topic: Optional[ + MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopic ] = Field( - default=None, description="""Source code is generated from the agent config.""" + default=None, description="""A custom memory topic defined by the developer.""" ) - developer_connect_source: Optional[ - ReasoningEngineSpecSourceCodeSpecDeveloperConnectSource + managed_memory_topic: Optional[ + MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopic ] = Field( - default=None, - description="""Source code is in a Git repository managed by Developer Connect.""", - ) - image_spec: Optional[ReasoningEngineSpecSourceCodeSpecImageSpec] = Field( - default=None, - description="""Optional. Configuration for building an image with custom config file.""", - ) - inline_source: Optional[ReasoningEngineSpecSourceCodeSpecInlineSource] = Field( - default=None, description="""Source code is provided directly in the request.""" - ) - python_spec: Optional[ReasoningEngineSpecSourceCodeSpecPythonSpec] = Field( - default=None, description="""Configuration for a Python application.""" + default=None, description="""A managed memory topic defined by Memory Bank.""" ) -class ReasoningEngineSpecSourceCodeSpecDict(TypedDict, total=False): - """Specification for deploying from source code.""" +class MemoryBankCustomizationConfigMemoryTopicDict(TypedDict, total=False): + """A topic of information that should be extracted from conversations and stored as memories.""" - agent_config_source: Optional[ - ReasoningEngineSpecSourceCodeSpecAgentConfigSourceDict + custom_memory_topic: Optional[ + MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopicDict ] - """Source code is generated from the agent config.""" + """A custom memory topic defined by the developer.""" - developer_connect_source: Optional[ - ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceDict + managed_memory_topic: Optional[ + MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopicDict ] - """Source code is in a Git repository managed by Developer Connect.""" - - image_spec: Optional[ReasoningEngineSpecSourceCodeSpecImageSpecDict] - """Optional. Configuration for building an image with custom config file.""" - - inline_source: Optional[ReasoningEngineSpecSourceCodeSpecInlineSourceDict] - """Source code is provided directly in the request.""" - - python_spec: Optional[ReasoningEngineSpecSourceCodeSpecPythonSpecDict] - """Configuration for a Python application.""" + """A managed memory topic defined by Memory Bank.""" -ReasoningEngineSpecSourceCodeSpecOrDict = Union[ - ReasoningEngineSpecSourceCodeSpec, ReasoningEngineSpecSourceCodeSpecDict +MemoryBankCustomizationConfigMemoryTopicOrDict = Union[ + MemoryBankCustomizationConfigMemoryTopic, + MemoryBankCustomizationConfigMemoryTopicDict, ] -class ReasoningEngineSpec(_common.BaseModel): - """The specification of an agent engine.""" +class MemoryBankCustomizationConfig(_common.BaseModel): + """Configuration for organizing memories for a particular scope.""" - agent_card: Optional[dict[str, Any]] = Field( - default=None, - description="""Optional. The A2A Agent Card for the agent (if available). It follows the specification at https://a2a-protocol.org/latest/specification/#5-agent-discovery-the-agent-card.""", - ) - agent_framework: Optional[str] = Field( + enable_third_person_memories: Optional[bool] = Field( default=None, - description="""Optional. The OSS agent framework used to develop the agent. Currently supported values: "google-adk", "langchain", "langgraph", "ag2", "llama-index", "custom".""", + description="""Optional. If true, then the memories will be generated in the third person (i.e. "The user generates memories with Memory Bank."). By default, the memories will be generated in the first person (i.e. "I generate memories with Memory Bank.")""", ) - class_methods: Optional[list[dict[str, Any]]] = Field( + generate_memories_examples: Optional[ + list[MemoryBankCustomizationConfigGenerateMemoriesExample] + ] = Field( default=None, - description="""Optional. Declarations for object class methods in OpenAPI specification format.""", + description="""Optional. Examples of how to generate memories for a particular scope.""", ) - deployment_spec: Optional[ReasoningEngineSpecDeploymentSpec] = Field( + memory_topics: Optional[list[MemoryBankCustomizationConfigMemoryTopic]] = Field( default=None, - description="""Optional. The specification of a Reasoning Engine deployment.""", + description="""Optional. Topics of information that should be extracted from conversations and stored as memories. If not set, then Memory Bank's default topics will be used.""", ) - effective_identity: Optional[str] = Field( + scope_keys: Optional[list[str]] = Field( default=None, - description="""Output only. The identity to use for the Reasoning Engine. It can contain one of the following values: * service-{project}@gcp-sa-aiplatform-re.googleapis.com (for SERVICE_AGENT identity type) * {name}@{project}.gserviceaccount.com (for SERVICE_ACCOUNT identity type) * agents.global.{org}.system.id.goog/resources/aiplatform/projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine} (for AGENT_IDENTITY identity type)""", + description="""Optional. The scope keys (i.e. 'user_id') for which to use this config. A request's scope must include all of the provided keys for the config to be used (order does not matter). If empty, then the config will be used for all requests that do not have a more specific config. Only one default config is allowed per Memory Bank.""", ) - identity_type: Optional[IdentityType] = Field( - default=None, - description="""Optional. The identity type to use for the Reasoning Engine. If not specified, the `service_account` field will be used if set, otherwise the default Vertex AI Reasoning Engine Service Agent in the project will be used.""", - ) - package_spec: Optional[ReasoningEngineSpecPackageSpec] = Field( - default=None, - description="""Optional. User provided package spec of the ReasoningEngine. Ignored when users directly specify a deployment image through `deployment_spec.first_party_image_override`, but keeping the field_behavior to avoid introducing breaking changes. The `deployment_source` field should not be set if `package_spec` is specified.""", - ) - service_account: Optional[str] = Field( - default=None, - description="""Optional. The service account that the Reasoning Engine artifact runs as. It should have "roles/storage.objectViewer" for reading the user project's Cloud Storage and "roles/aiplatform.user" for using Vertex extensions. If not specified, the Vertex AI Reasoning Engine Service Agent in the project will be used.""", - ) - source_code_spec: Optional[ReasoningEngineSpecSourceCodeSpec] = Field( - default=None, - description="""Deploy from source code files with a defined entrypoint.""", - ) - - -class ReasoningEngineSpecDict(TypedDict, total=False): - """The specification of an agent engine.""" - - agent_card: Optional[dict[str, Any]] - """Optional. The A2A Agent Card for the agent (if available). It follows the specification at https://a2a-protocol.org/latest/specification/#5-agent-discovery-the-agent-card.""" - - agent_framework: Optional[str] - """Optional. The OSS agent framework used to develop the agent. Currently supported values: "google-adk", "langchain", "langgraph", "ag2", "llama-index", "custom".""" - class_methods: Optional[list[dict[str, Any]]] - """Optional. Declarations for object class methods in OpenAPI specification format.""" - - deployment_spec: Optional[ReasoningEngineSpecDeploymentSpecDict] - """Optional. The specification of a Reasoning Engine deployment.""" - effective_identity: Optional[str] - """Output only. The identity to use for the Reasoning Engine. It can contain one of the following values: * service-{project}@gcp-sa-aiplatform-re.googleapis.com (for SERVICE_AGENT identity type) * {name}@{project}.gserviceaccount.com (for SERVICE_ACCOUNT identity type) * agents.global.{org}.system.id.goog/resources/aiplatform/projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine} (for AGENT_IDENTITY identity type)""" +class MemoryBankCustomizationConfigDict(TypedDict, total=False): + """Configuration for organizing memories for a particular scope.""" - identity_type: Optional[IdentityType] - """Optional. The identity type to use for the Reasoning Engine. If not specified, the `service_account` field will be used if set, otherwise the default Vertex AI Reasoning Engine Service Agent in the project will be used.""" + enable_third_person_memories: Optional[bool] + """Optional. If true, then the memories will be generated in the third person (i.e. "The user generates memories with Memory Bank."). By default, the memories will be generated in the first person (i.e. "I generate memories with Memory Bank.")""" - package_spec: Optional[ReasoningEngineSpecPackageSpecDict] - """Optional. User provided package spec of the ReasoningEngine. Ignored when users directly specify a deployment image through `deployment_spec.first_party_image_override`, but keeping the field_behavior to avoid introducing breaking changes. The `deployment_source` field should not be set if `package_spec` is specified.""" + generate_memories_examples: Optional[ + list[MemoryBankCustomizationConfigGenerateMemoriesExampleDict] + ] + """Optional. Examples of how to generate memories for a particular scope.""" - service_account: Optional[str] - """Optional. The service account that the Reasoning Engine artifact runs as. It should have "roles/storage.objectViewer" for reading the user project's Cloud Storage and "roles/aiplatform.user" for using Vertex extensions. If not specified, the Vertex AI Reasoning Engine Service Agent in the project will be used.""" + memory_topics: Optional[list[MemoryBankCustomizationConfigMemoryTopicDict]] + """Optional. Topics of information that should be extracted from conversations and stored as memories. If not set, then Memory Bank's default topics will be used.""" - source_code_spec: Optional[ReasoningEngineSpecSourceCodeSpecDict] - """Deploy from source code files with a defined entrypoint.""" + scope_keys: Optional[list[str]] + """Optional. The scope keys (i.e. 'user_id') for which to use this config. A request's scope must include all of the provided keys for the config to be used (order does not matter). If empty, then the config will be used for all requests that do not have a more specific config. Only one default config is allowed per Memory Bank.""" -ReasoningEngineSpecOrDict = Union[ReasoningEngineSpec, ReasoningEngineSpecDict] +MemoryBankCustomizationConfigOrDict = Union[ + MemoryBankCustomizationConfig, MemoryBankCustomizationConfigDict +] -class MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEvent( - _common.BaseModel -): - """The conversation source event for generating memories.""" +class ReasoningEngineContextSpecMemoryBankConfigGenerationConfig(_common.BaseModel): + """Configuration for how to generate memories.""" - content: Optional[genai_types.Content] = Field( - default=None, description="""Required. The content of the event.""" + model: Optional[str] = Field( + default=None, + description="""Required. The model used to generate memories. Format: `projects/{project}/locations/{location}/publishers/google/models/{model}`.""", ) -class MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventDict( +class ReasoningEngineContextSpecMemoryBankConfigGenerationConfigDict( TypedDict, total=False ): - """The conversation source event for generating memories.""" + """Configuration for how to generate memories.""" - content: Optional[genai_types.ContentDict] - """Required. The content of the event.""" + model: Optional[str] + """Required. The model used to generate memories. Format: `projects/{project}/locations/{location}/publishers/google/models/{model}`.""" -MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventOrDict = ( - Union[ - MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEvent, - MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventDict, - ] -) +ReasoningEngineContextSpecMemoryBankConfigGenerationConfigOrDict = Union[ + ReasoningEngineContextSpecMemoryBankConfigGenerationConfig, + ReasoningEngineContextSpecMemoryBankConfigGenerationConfigDict, +] -class MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSource( +class ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfig( _common.BaseModel ): - """A conversation source for the example. This is similar to `DirectContentsSource`.""" + """Configuration for how to perform similarity search on memories.""" - events: Optional[ - list[ - MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEvent - ] - ] = Field( + embedding_model: Optional[str] = Field( default=None, - description="""Optional. The input conversation events for the example.""", + description="""Required. The model used to generate embeddings to lookup similar memories. Format: `projects/{project}/locations/{location}/publishers/google/models/{model}`.""", ) -class MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceDict( +class ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfigDict( TypedDict, total=False ): - """A conversation source for the example. This is similar to `DirectContentsSource`.""" + """Configuration for how to perform similarity search on memories.""" - events: Optional[ - list[ - MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceEventDict - ] - ] - """Optional. The input conversation events for the example.""" + embedding_model: Optional[str] + """Required. The model used to generate embeddings to lookup similar memories. Format: `projects/{project}/locations/{location}/publishers/google/models/{model}`.""" -MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceOrDict = Union[ - MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSource, - MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceDict, +ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfigOrDict = Union[ + ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfig, + ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfigDict, ] -class MemoryTopicId(_common.BaseModel): - """The topic ID for a memory.""" - - custom_memory_topic_label: Optional[str] = Field( - default=None, description="""Optional. The custom memory topic label.""" - ) - managed_memory_topic: Optional[ManagedTopicEnum] = Field( - default=None, description="""Optional. The managed memory topic.""" - ) - - -class MemoryTopicIdDict(TypedDict, total=False): - """The topic ID for a memory.""" - - custom_memory_topic_label: Optional[str] - """Optional. The custom memory topic label.""" - - managed_memory_topic: Optional[ManagedTopicEnum] - """Optional. The managed memory topic.""" - - -MemoryTopicIdOrDict = Union[MemoryTopicId, MemoryTopicIdDict] - - -class MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemory( +class ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfig( _common.BaseModel ): - """A memory generated by the operation.""" + """Configuration for TTL of the memories in the Memory Bank based on the action that created or updated the memory.""" - fact: Optional[str] = Field( - default=None, description="""Required. The fact to generate a memory from.""" + create_ttl: Optional[str] = Field( + default=None, + description="""Optional. The TTL duration for memories uploaded via CreateMemory.""", ) - topics: Optional[list[MemoryTopicId]] = Field( + generate_created_ttl: Optional[str] = Field( default=None, - description="""Optional. The list of topics that the memory should be associated with. For example, use `custom_memory_topic_label = "jargon"` if the extracted memory is an example of memory extraction for the custom topic `jargon`.""", + description="""Optional. The TTL duration for memories newly generated via GenerateMemories (GenerateMemoriesResponse.GeneratedMemory.Action.CREATED).""", + ) + generate_updated_ttl: Optional[str] = Field( + default=None, + description="""Optional. The TTL duration for memories updated via GenerateMemories (GenerateMemoriesResponse.GeneratedMemory.Action.UPDATED). In the case of an UPDATE action, the `expire_time` of the existing memory will be updated to the new value (now + TTL).""", ) -class MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemoryDict( +class ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfigDict( TypedDict, total=False ): - """A memory generated by the operation.""" + """Configuration for TTL of the memories in the Memory Bank based on the action that created or updated the memory.""" - fact: Optional[str] - """Required. The fact to generate a memory from.""" + create_ttl: Optional[str] + """Optional. The TTL duration for memories uploaded via CreateMemory.""" - topics: Optional[list[MemoryTopicIdDict]] - """Optional. The list of topics that the memory should be associated with. For example, use `custom_memory_topic_label = "jargon"` if the extracted memory is an example of memory extraction for the custom topic `jargon`.""" + generate_created_ttl: Optional[str] + """Optional. The TTL duration for memories newly generated via GenerateMemories (GenerateMemoriesResponse.GeneratedMemory.Action.CREATED).""" + + generate_updated_ttl: Optional[str] + """Optional. The TTL duration for memories updated via GenerateMemories (GenerateMemoriesResponse.GeneratedMemory.Action.UPDATED). In the case of an UPDATE action, the `expire_time` of the existing memory will be updated to the new value (now + TTL).""" -MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemoryOrDict = Union[ - MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemory, - MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemoryDict, +ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfigOrDict = Union[ + ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfig, + ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfigDict, ] -class MemoryBankCustomizationConfigGenerateMemoriesExample(_common.BaseModel): - """An example of how to generate memories for a particular scope.""" +class ReasoningEngineContextSpecMemoryBankConfigTtlConfig(_common.BaseModel): + """Configuration for automatically setting the TTL ("time-to-live") of the memories in the Memory Bank.""" - conversation_source: Optional[ - MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSource - ] = Field(default=None, description="""A conversation source for the example.""") - generated_memories: Optional[ - list[MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemory] + default_ttl: Optional[str] = Field( + default=None, + description="""Optional. The default TTL duration of the memories in the Memory Bank. This applies to all operations that create or update a memory.""", + ) + granular_ttl_config: Optional[ + ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfig ] = Field( default=None, - description="""Optional. The memories that are expected to be generated from the input conversation. An empty list indicates that no memories are expected to be generated for the input conversation.""", + description="""Optional. The granular TTL configuration of the memories in the Memory Bank.""", + ) + memory_revision_default_ttl: Optional[str] = Field( + default=None, + description="""Optional. The default TTL duration of the memory revisions in the Memory Bank. This applies to all operations that create a memory revision. If not set, a default TTL of 365 days will be used.""", ) -class MemoryBankCustomizationConfigGenerateMemoriesExampleDict(TypedDict, total=False): - """An example of how to generate memories for a particular scope.""" +class ReasoningEngineContextSpecMemoryBankConfigTtlConfigDict(TypedDict, total=False): + """Configuration for automatically setting the TTL ("time-to-live") of the memories in the Memory Bank.""" - conversation_source: Optional[ - MemoryBankCustomizationConfigGenerateMemoriesExampleConversationSourceDict - ] - """A conversation source for the example.""" + default_ttl: Optional[str] + """Optional. The default TTL duration of the memories in the Memory Bank. This applies to all operations that create or update a memory.""" - generated_memories: Optional[ - list[MemoryBankCustomizationConfigGenerateMemoriesExampleGeneratedMemoryDict] + granular_ttl_config: Optional[ + ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfigDict ] - """Optional. The memories that are expected to be generated from the input conversation. An empty list indicates that no memories are expected to be generated for the input conversation.""" + """Optional. The granular TTL configuration of the memories in the Memory Bank.""" + memory_revision_default_ttl: Optional[str] + """Optional. The default TTL duration of the memory revisions in the Memory Bank. This applies to all operations that create a memory revision. If not set, a default TTL of 365 days will be used.""" -MemoryBankCustomizationConfigGenerateMemoriesExampleOrDict = Union[ - MemoryBankCustomizationConfigGenerateMemoriesExample, - MemoryBankCustomizationConfigGenerateMemoriesExampleDict, + +ReasoningEngineContextSpecMemoryBankConfigTtlConfigOrDict = Union[ + ReasoningEngineContextSpecMemoryBankConfigTtlConfig, + ReasoningEngineContextSpecMemoryBankConfigTtlConfigDict, ] -class MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopic(_common.BaseModel): - """A custom memory topic defined by the developer.""" +class ReasoningEngineContextSpecMemoryBankConfig(_common.BaseModel): + """Specification for a Memory Bank.""" - label: Optional[str] = Field( - default=None, description="""Required. The label of the topic.""" + customization_configs: Optional[list[MemoryBankCustomizationConfig]] = Field( + default=None, + description="""Optional. Configuration for how to customize Memory Bank behavior for a particular scope.""", ) - description: Optional[str] = Field( + disable_memory_revisions: Optional[bool] = Field( default=None, - description="""Required. Description of the memory topic. This should explain what information should be extracted for this topic.""", + description="""If true, no memory revisions will be created for any requests to the Memory Bank.""", + ) + generation_config: Optional[ + ReasoningEngineContextSpecMemoryBankConfigGenerationConfig + ] = Field( + default=None, + description="""Optional. Configuration for how to generate memories for the Memory Bank.""", + ) + similarity_search_config: Optional[ + ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfig + ] = Field( + default=None, + description="""Optional. Configuration for how to perform similarity search on memories. If not set, the Memory Bank will use the default embedding model `text-embedding-005`.""", + ) + ttl_config: Optional[ReasoningEngineContextSpecMemoryBankConfigTtlConfig] = Field( + default=None, + description="""Optional. Configuration for automatic TTL ("time-to-live") of the memories in the Memory Bank. If not set, TTL will not be applied automatically. The TTL can be explicitly set by modifying the `expire_time` of each Memory resource.""", ) -class MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopicDict( - TypedDict, total=False -): - """A custom memory topic defined by the developer.""" - - label: Optional[str] - """Required. The label of the topic.""" +class ReasoningEngineContextSpecMemoryBankConfigDict(TypedDict, total=False): + """Specification for a Memory Bank.""" - description: Optional[str] - """Required. Description of the memory topic. This should explain what information should be extracted for this topic.""" + customization_configs: Optional[list[MemoryBankCustomizationConfigDict]] + """Optional. Configuration for how to customize Memory Bank behavior for a particular scope.""" + disable_memory_revisions: Optional[bool] + """If true, no memory revisions will be created for any requests to the Memory Bank.""" -MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopicOrDict = Union[ - MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopic, - MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopicDict, + generation_config: Optional[ + ReasoningEngineContextSpecMemoryBankConfigGenerationConfigDict + ] + """Optional. Configuration for how to generate memories for the Memory Bank.""" + + similarity_search_config: Optional[ + ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfigDict + ] + """Optional. Configuration for how to perform similarity search on memories. If not set, the Memory Bank will use the default embedding model `text-embedding-005`.""" + + ttl_config: Optional[ReasoningEngineContextSpecMemoryBankConfigTtlConfigDict] + """Optional. Configuration for automatic TTL ("time-to-live") of the memories in the Memory Bank. If not set, TTL will not be applied automatically. The TTL can be explicitly set by modifying the `expire_time` of each Memory resource.""" + + +ReasoningEngineContextSpecMemoryBankConfigOrDict = Union[ + ReasoningEngineContextSpecMemoryBankConfig, + ReasoningEngineContextSpecMemoryBankConfigDict, ] -class MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopic(_common.BaseModel): - """A managed memory topic defined by the system.""" +class ReasoningEngineContextSpec(_common.BaseModel): + """The configuration for agent engine sub-resources to manage context.""" - managed_topic_enum: Optional[ManagedTopicEnum] = Field( - default=None, description="""Required. The managed topic.""" + memory_bank_config: Optional[ReasoningEngineContextSpecMemoryBankConfig] = Field( + default=None, + description="""Optional. Specification for a Memory Bank, which manages memories for the Agent Engine.""", ) -class MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopicDict( - TypedDict, total=False -): - """A managed memory topic defined by the system.""" +class ReasoningEngineContextSpecDict(TypedDict, total=False): + """The configuration for agent engine sub-resources to manage context.""" - managed_topic_enum: Optional[ManagedTopicEnum] - """Required. The managed topic.""" + memory_bank_config: Optional[ReasoningEngineContextSpecMemoryBankConfigDict] + """Optional. Specification for a Memory Bank, which manages memories for the Agent Engine.""" -MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopicOrDict = Union[ - MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopic, - MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopicDict, +ReasoningEngineContextSpecOrDict = Union[ + ReasoningEngineContextSpec, ReasoningEngineContextSpecDict ] -class MemoryBankCustomizationConfigMemoryTopic(_common.BaseModel): - """A topic of information that should be extracted from conversations and stored as memories.""" +class SecretRef(_common.BaseModel): + """Reference to a secret stored in the Cloud Secret Manager that will provide the value for this environment variable.""" - custom_memory_topic: Optional[ - MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopic - ] = Field( - default=None, description="""A custom memory topic defined by the developer.""" + secret: Optional[str] = Field( + default=None, + description="""Required. The name of the secret in Cloud Secret Manager. Format: {secret_name}.""", ) - managed_memory_topic: Optional[ - MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopic - ] = Field( - default=None, description="""A managed memory topic defined by Memory Bank.""" + version: Optional[str] = Field( + default=None, + description="""The Cloud Secret Manager secret version. Can be 'latest' for the latest version, an integer for a specific version, or a version alias.""", ) -class MemoryBankCustomizationConfigMemoryTopicDict(TypedDict, total=False): - """A topic of information that should be extracted from conversations and stored as memories.""" +class SecretRefDict(TypedDict, total=False): + """Reference to a secret stored in the Cloud Secret Manager that will provide the value for this environment variable.""" - custom_memory_topic: Optional[ - MemoryBankCustomizationConfigMemoryTopicCustomMemoryTopicDict - ] - """A custom memory topic defined by the developer.""" + secret: Optional[str] + """Required. The name of the secret in Cloud Secret Manager. Format: {secret_name}.""" - managed_memory_topic: Optional[ - MemoryBankCustomizationConfigMemoryTopicManagedMemoryTopicDict - ] - """A managed memory topic defined by Memory Bank.""" + version: Optional[str] + """The Cloud Secret Manager secret version. Can be 'latest' for the latest version, an integer for a specific version, or a version alias.""" -MemoryBankCustomizationConfigMemoryTopicOrDict = Union[ - MemoryBankCustomizationConfigMemoryTopic, - MemoryBankCustomizationConfigMemoryTopicDict, -] +SecretRefOrDict = Union[SecretRef, SecretRefDict] -class MemoryBankCustomizationConfig(_common.BaseModel): - """Configuration for organizing memories for a particular scope.""" +class SecretEnvVar(_common.BaseModel): + """Represents an environment variable where the value is a secret in Cloud Secret Manager.""" - enable_third_person_memories: Optional[bool] = Field( - default=None, - description="""Optional. If true, then the memories will be generated in the third person (i.e. "The user generates memories with Memory Bank."). By default, the memories will be generated in the first person (i.e. "I generate memories with Memory Bank.")""", - ) - generate_memories_examples: Optional[ - list[MemoryBankCustomizationConfigGenerateMemoriesExample] - ] = Field( - default=None, - description="""Optional. Examples of how to generate memories for a particular scope.""", - ) - memory_topics: Optional[list[MemoryBankCustomizationConfigMemoryTopic]] = Field( + name: Optional[str] = Field( default=None, - description="""Optional. Topics of information that should be extracted from conversations and stored as memories. If not set, then Memory Bank's default topics will be used.""", + description="""Required. Name of the secret environment variable.""", ) - scope_keys: Optional[list[str]] = Field( + secret_ref: Optional[SecretRef] = Field( default=None, - description="""Optional. The scope keys (i.e. 'user_id') for which to use this config. A request's scope must include all of the provided keys for the config to be used (order does not matter). If empty, then the config will be used for all requests that do not have a more specific config. Only one default config is allowed per Memory Bank.""", + description="""Required. Reference to a secret stored in the Cloud Secret Manager that will provide the value for this environment variable.""", ) -class MemoryBankCustomizationConfigDict(TypedDict, total=False): - """Configuration for organizing memories for a particular scope.""" - - enable_third_person_memories: Optional[bool] - """Optional. If true, then the memories will be generated in the third person (i.e. "The user generates memories with Memory Bank."). By default, the memories will be generated in the first person (i.e. "I generate memories with Memory Bank.")""" - - generate_memories_examples: Optional[ - list[MemoryBankCustomizationConfigGenerateMemoriesExampleDict] - ] - """Optional. Examples of how to generate memories for a particular scope.""" +class SecretEnvVarDict(TypedDict, total=False): + """Represents an environment variable where the value is a secret in Cloud Secret Manager.""" - memory_topics: Optional[list[MemoryBankCustomizationConfigMemoryTopicDict]] - """Optional. Topics of information that should be extracted from conversations and stored as memories. If not set, then Memory Bank's default topics will be used.""" + name: Optional[str] + """Required. Name of the secret environment variable.""" - scope_keys: Optional[list[str]] - """Optional. The scope keys (i.e. 'user_id') for which to use this config. A request's scope must include all of the provided keys for the config to be used (order does not matter). If empty, then the config will be used for all requests that do not have a more specific config. Only one default config is allowed per Memory Bank.""" + secret_ref: Optional[SecretRefDict] + """Required. Reference to a secret stored in the Cloud Secret Manager that will provide the value for this environment variable.""" -MemoryBankCustomizationConfigOrDict = Union[ - MemoryBankCustomizationConfig, MemoryBankCustomizationConfigDict -] +SecretEnvVarOrDict = Union[SecretEnvVar, SecretEnvVarDict] -class ReasoningEngineContextSpecMemoryBankConfigGenerationConfig(_common.BaseModel): - """Configuration for how to generate memories.""" +class ReasoningEngineSpecDeploymentSpec(_common.BaseModel): + """The specification of a Reasoning Engine deployment.""" - model: Optional[str] = Field( + agent_server_mode: Optional[AgentServerMode] = Field( + default=None, description="""The agent server mode.""" + ) + container_concurrency: Optional[int] = Field( default=None, - description="""Required. The model used to generate memories. Format: `projects/{project}/locations/{location}/publishers/google/models/{model}`.""", + description="""Optional. Concurrency for each container and agent server. Recommended value: 2 * cpu + 1. Defaults to 9.""", + ) + env: Optional[list[EnvVar]] = Field( + default=None, + description="""Optional. Environment variables to be set with the Reasoning Engine deployment. The environment variables can be updated through the UpdateReasoningEngine API.""", + ) + max_instances: Optional[int] = Field( + default=None, + description="""Optional. The maximum number of application instances that can be launched to handle increased traffic. Defaults to 100. Range: [1, 1000]. If VPC-SC or PSC-I is enabled, the acceptable range is [1, 100].""", + ) + min_instances: Optional[int] = Field( + default=None, + description="""Optional. The minimum number of application instances that will be kept running at all times. Defaults to 1. Range: [0, 10].""", + ) + psc_interface_config: Optional[PscInterfaceConfig] = Field( + default=None, description="""Optional. Configuration for PSC-I.""" + ) + resource_limits: Optional[dict[str, str]] = Field( + default=None, + description="""Optional. Resource limits for each container. Only 'cpu' and 'memory' keys are supported. Defaults to {"cpu": "4", "memory": "4Gi"}. * The only supported values for CPU are '1', '2', '4', '6' and '8'. For more information, go to https://cloud.google.com/run/docs/configuring/cpu. * The only supported values for memory are '1Gi', '2Gi', ... '32 Gi'. * For required cpu on different memory values, go to https://cloud.google.com/run/docs/configuring/memory-limits""", + ) + secret_env: Optional[list[SecretEnvVar]] = Field( + default=None, + description="""Optional. Environment variables where the value is a secret in Cloud Secret Manager. To use this feature, add 'Secret Manager Secret Accessor' role (roles/secretmanager.secretAccessor) to AI Platform Reasoning Engine Service Agent.""", ) -class ReasoningEngineContextSpecMemoryBankConfigGenerationConfigDict( - TypedDict, total=False -): - """Configuration for how to generate memories.""" - - model: Optional[str] - """Required. The model used to generate memories. Format: `projects/{project}/locations/{location}/publishers/google/models/{model}`.""" +class ReasoningEngineSpecDeploymentSpecDict(TypedDict, total=False): + """The specification of a Reasoning Engine deployment.""" + agent_server_mode: Optional[AgentServerMode] + """The agent server mode.""" -ReasoningEngineContextSpecMemoryBankConfigGenerationConfigOrDict = Union[ - ReasoningEngineContextSpecMemoryBankConfigGenerationConfig, - ReasoningEngineContextSpecMemoryBankConfigGenerationConfigDict, -] + container_concurrency: Optional[int] + """Optional. Concurrency for each container and agent server. Recommended value: 2 * cpu + 1. Defaults to 9.""" + env: Optional[list[EnvVarDict]] + """Optional. Environment variables to be set with the Reasoning Engine deployment. The environment variables can be updated through the UpdateReasoningEngine API.""" -class ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfig( - _common.BaseModel -): - """Configuration for how to perform similarity search on memories.""" + max_instances: Optional[int] + """Optional. The maximum number of application instances that can be launched to handle increased traffic. Defaults to 100. Range: [1, 1000]. If VPC-SC or PSC-I is enabled, the acceptable range is [1, 100].""" - embedding_model: Optional[str] = Field( - default=None, - description="""Required. The model used to generate embeddings to lookup similar memories. Format: `projects/{project}/locations/{location}/publishers/google/models/{model}`.""", - ) + min_instances: Optional[int] + """Optional. The minimum number of application instances that will be kept running at all times. Defaults to 1. Range: [0, 10].""" + psc_interface_config: Optional[PscInterfaceConfigDict] + """Optional. Configuration for PSC-I.""" -class ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfigDict( - TypedDict, total=False -): - """Configuration for how to perform similarity search on memories.""" + resource_limits: Optional[dict[str, str]] + """Optional. Resource limits for each container. Only 'cpu' and 'memory' keys are supported. Defaults to {"cpu": "4", "memory": "4Gi"}. * The only supported values for CPU are '1', '2', '4', '6' and '8'. For more information, go to https://cloud.google.com/run/docs/configuring/cpu. * The only supported values for memory are '1Gi', '2Gi', ... '32 Gi'. * For required cpu on different memory values, go to https://cloud.google.com/run/docs/configuring/memory-limits""" - embedding_model: Optional[str] - """Required. The model used to generate embeddings to lookup similar memories. Format: `projects/{project}/locations/{location}/publishers/google/models/{model}`.""" + secret_env: Optional[list[SecretEnvVarDict]] + """Optional. Environment variables where the value is a secret in Cloud Secret Manager. To use this feature, add 'Secret Manager Secret Accessor' role (roles/secretmanager.secretAccessor) to AI Platform Reasoning Engine Service Agent.""" -ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfigOrDict = Union[ - ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfig, - ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfigDict, +ReasoningEngineSpecDeploymentSpecOrDict = Union[ + ReasoningEngineSpecDeploymentSpec, ReasoningEngineSpecDeploymentSpecDict ] -class ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfig( - _common.BaseModel -): - """Configuration for TTL of the memories in the Memory Bank based on the action that created or updated the memory.""" +class ReasoningEngineSpecPackageSpec(_common.BaseModel): + """User-provided package specification, containing pickled object and package requirements.""" - create_ttl: Optional[str] = Field( + dependency_files_gcs_uri: Optional[str] = Field( default=None, - description="""Optional. The TTL duration for memories uploaded via CreateMemory.""", + description="""Optional. The Cloud Storage URI of the dependency files in tar.gz format.""", ) - generate_created_ttl: Optional[str] = Field( + pickle_object_gcs_uri: Optional[str] = Field( default=None, - description="""Optional. The TTL duration for memories newly generated via GenerateMemories (GenerateMemoriesResponse.GeneratedMemory.Action.CREATED).""", + description="""Optional. The Cloud Storage URI of the pickled python object.""", ) - generate_updated_ttl: Optional[str] = Field( + python_version: Optional[str] = Field( default=None, - description="""Optional. The TTL duration for memories updated via GenerateMemories (GenerateMemoriesResponse.GeneratedMemory.Action.UPDATED). In the case of an UPDATE action, the `expire_time` of the existing memory will be updated to the new value (now + TTL).""", + description="""Optional. The Python version. Supported values are 3.9, 3.10, 3.11, 3.12, 3.13, 3.14. If not specified, the default value is 3.10.""", + ) + requirements_gcs_uri: Optional[str] = Field( + default=None, + description="""Optional. The Cloud Storage URI of the `requirements.txt` file""", ) -class ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfigDict( - TypedDict, total=False -): - """Configuration for TTL of the memories in the Memory Bank based on the action that created or updated the memory.""" - - create_ttl: Optional[str] - """Optional. The TTL duration for memories uploaded via CreateMemory.""" +class ReasoningEngineSpecPackageSpecDict(TypedDict, total=False): + """User-provided package specification, containing pickled object and package requirements.""" - generate_created_ttl: Optional[str] - """Optional. The TTL duration for memories newly generated via GenerateMemories (GenerateMemoriesResponse.GeneratedMemory.Action.CREATED).""" + dependency_files_gcs_uri: Optional[str] + """Optional. The Cloud Storage URI of the dependency files in tar.gz format.""" - generate_updated_ttl: Optional[str] - """Optional. The TTL duration for memories updated via GenerateMemories (GenerateMemoriesResponse.GeneratedMemory.Action.UPDATED). In the case of an UPDATE action, the `expire_time` of the existing memory will be updated to the new value (now + TTL).""" + pickle_object_gcs_uri: Optional[str] + """Optional. The Cloud Storage URI of the pickled python object.""" + python_version: Optional[str] + """Optional. The Python version. Supported values are 3.9, 3.10, 3.11, 3.12, 3.13, 3.14. If not specified, the default value is 3.10.""" -ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfigOrDict = Union[ - ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfig, - ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfigDict, + requirements_gcs_uri: Optional[str] + """Optional. The Cloud Storage URI of the `requirements.txt` file""" + + +ReasoningEngineSpecPackageSpecOrDict = Union[ + ReasoningEngineSpecPackageSpec, ReasoningEngineSpecPackageSpecDict ] -class ReasoningEngineContextSpecMemoryBankConfigTtlConfig(_common.BaseModel): - """Configuration for automatically setting the TTL ("time-to-live") of the memories in the Memory Bank.""" +class ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfig(_common.BaseModel): + """Configuration for the Agent Development Kit (ADK).""" - default_ttl: Optional[str] = Field( + json_config: Optional[dict[str, Any]] = Field( default=None, - description="""Optional. The default TTL duration of the memories in the Memory Bank. This applies to all operations that create or update a memory.""", + description="""Required. The value of the ADK config in JSON format.""", ) - granular_ttl_config: Optional[ - ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfig + + +class ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigDict( + TypedDict, total=False +): + """Configuration for the Agent Development Kit (ADK).""" + + json_config: Optional[dict[str, Any]] + """Required. The value of the ADK config in JSON format.""" + + +ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigOrDict = Union[ + ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfig, + ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigDict, +] + + +class ReasoningEngineSpecSourceCodeSpecInlineSource(_common.BaseModel): + """Specifies source code provided as a byte stream.""" + + source_archive: Optional[bytes] = Field( + default=None, + description="""Required. Input only. The application source code archive. It must be a compressed tarball (.tar.gz) file.""", + ) + + +class ReasoningEngineSpecSourceCodeSpecInlineSourceDict(TypedDict, total=False): + """Specifies source code provided as a byte stream.""" + + source_archive: Optional[bytes] + """Required. Input only. The application source code archive. It must be a compressed tarball (.tar.gz) file.""" + + +ReasoningEngineSpecSourceCodeSpecInlineSourceOrDict = Union[ + ReasoningEngineSpecSourceCodeSpecInlineSource, + ReasoningEngineSpecSourceCodeSpecInlineSourceDict, +] + + +class ReasoningEngineSpecSourceCodeSpecAgentConfigSource(_common.BaseModel): + """Specification for the deploying from agent config.""" + + adk_config: Optional[ + ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfig + ] = Field(default=None, description="""Required. The ADK configuration.""") + inline_source: Optional[ReasoningEngineSpecSourceCodeSpecInlineSource] = Field( + default=None, + description="""Optional. Any additional files needed to interpret the config. If a `requirements.txt` file is present in the `inline_source`, the corresponding packages will be installed. If no `requirements.txt` file is present in `inline_source`, then the latest version of `google-adk` will be installed for interpreting the ADK config.""", + ) + + +class ReasoningEngineSpecSourceCodeSpecAgentConfigSourceDict(TypedDict, total=False): + """Specification for the deploying from agent config.""" + + adk_config: Optional[ + ReasoningEngineSpecSourceCodeSpecAgentConfigSourceAdkConfigDict + ] + """Required. The ADK configuration.""" + + inline_source: Optional[ReasoningEngineSpecSourceCodeSpecInlineSourceDict] + """Optional. Any additional files needed to interpret the config. If a `requirements.txt` file is present in the `inline_source`, the corresponding packages will be installed. If no `requirements.txt` file is present in `inline_source`, then the latest version of `google-adk` will be installed for interpreting the ADK config.""" + + +ReasoningEngineSpecSourceCodeSpecAgentConfigSourceOrDict = Union[ + ReasoningEngineSpecSourceCodeSpecAgentConfigSource, + ReasoningEngineSpecSourceCodeSpecAgentConfigSourceDict, +] + + +class ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfig(_common.BaseModel): + """Specifies the configuration for fetching source code from a Git repository that is managed by Developer Connect. + + This includes the repository, revision, and directory to use. + """ + + git_repository_link: Optional[str] = Field( + default=None, + description="""Required. The Developer Connect Git repository link, formatted as `projects/{project_id}/locations/{location_id}/connections/{connection_id}/gitRepositoryLink/{repository_link_id}`.""", + ) + dir: Optional[str] = Field( + default=None, + description="""Required. Directory, relative to the source root, in which to run the build.""", + ) + revision: Optional[str] = Field( + default=None, + description="""Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref.""", + ) + + +class ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigDict( + TypedDict, total=False +): + """Specifies the configuration for fetching source code from a Git repository that is managed by Developer Connect. + + This includes the repository, revision, and directory to use. + """ + + git_repository_link: Optional[str] + """Required. The Developer Connect Git repository link, formatted as `projects/{project_id}/locations/{location_id}/connections/{connection_id}/gitRepositoryLink/{repository_link_id}`.""" + + dir: Optional[str] + """Required. Directory, relative to the source root, in which to run the build.""" + + revision: Optional[str] + """Required. The revision to fetch from the Git repository such as a branch, a tag, a commit SHA, or any Git ref.""" + + +ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigOrDict = Union[ + ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfig, + ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigDict, +] + + +class ReasoningEngineSpecSourceCodeSpecDeveloperConnectSource(_common.BaseModel): + """Specifies source code to be fetched from a Git repository managed through the Developer Connect service.""" + + config: Optional[ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfig] = Field( + default=None, + description="""Required. The Developer Connect configuration that defines the specific repository, revision, and directory to use as the source code root.""", + ) + + +class ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceDict( + TypedDict, total=False +): + """Specifies source code to be fetched from a Git repository managed through the Developer Connect service.""" + + config: Optional[ReasoningEngineSpecSourceCodeSpecDeveloperConnectConfigDict] + """Required. The Developer Connect configuration that defines the specific repository, revision, and directory to use as the source code root.""" + + +ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceOrDict = Union[ + ReasoningEngineSpecSourceCodeSpecDeveloperConnectSource, + ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceDict, +] + + +class ReasoningEngineSpecSourceCodeSpecImageSpec(_common.BaseModel): + """The image spec for building an image (within a single build step). + + It is based on the config file (i.e. Dockerfile) in the source directory. + """ + + build_args: Optional[dict[str, str]] = Field( + default=None, + description="""Optional. Build arguments to be used. They will be passed through --build-arg flags.""", + ) + + +class ReasoningEngineSpecSourceCodeSpecImageSpecDict(TypedDict, total=False): + """The image spec for building an image (within a single build step). + + It is based on the config file (i.e. Dockerfile) in the source directory. + """ + + build_args: Optional[dict[str, str]] + """Optional. Build arguments to be used. They will be passed through --build-arg flags.""" + + +ReasoningEngineSpecSourceCodeSpecImageSpecOrDict = Union[ + ReasoningEngineSpecSourceCodeSpecImageSpec, + ReasoningEngineSpecSourceCodeSpecImageSpecDict, +] + + +class ReasoningEngineSpecSourceCodeSpecPythonSpec(_common.BaseModel): + """Specification for running a Python application from source.""" + + entrypoint_module: Optional[str] = Field( + default=None, + description="""Optional. The Python module to load as the entrypoint, specified as a fully qualified module name. For example: path.to.agent. If not specified, defaults to "agent". The project root will be added to Python sys.path, allowing imports to be specified relative to the root. This field should not be set if the source is `agent_config_source`.""", + ) + entrypoint_object: Optional[str] = Field( + default=None, + description="""Optional. The name of the callable object within the `entrypoint_module` to use as the application If not specified, defaults to "root_agent". This field should not be set if the source is `agent_config_source`.""", + ) + requirements_file: Optional[str] = Field( + default=None, + description="""Optional. The path to the requirements file, relative to the source root. If not specified, defaults to "requirements.txt".""", + ) + version: Optional[str] = Field( + default=None, + description="""Optional. The version of Python to use. Support version includes 3.9, 3.10, 3.11, 3.12, 3.13, 3.14. If not specified, default value is 3.10.""", + ) + + +class ReasoningEngineSpecSourceCodeSpecPythonSpecDict(TypedDict, total=False): + """Specification for running a Python application from source.""" + + entrypoint_module: Optional[str] + """Optional. The Python module to load as the entrypoint, specified as a fully qualified module name. For example: path.to.agent. If not specified, defaults to "agent". The project root will be added to Python sys.path, allowing imports to be specified relative to the root. This field should not be set if the source is `agent_config_source`.""" + + entrypoint_object: Optional[str] + """Optional. The name of the callable object within the `entrypoint_module` to use as the application If not specified, defaults to "root_agent". This field should not be set if the source is `agent_config_source`.""" + + requirements_file: Optional[str] + """Optional. The path to the requirements file, relative to the source root. If not specified, defaults to "requirements.txt".""" + + version: Optional[str] + """Optional. The version of Python to use. Support version includes 3.9, 3.10, 3.11, 3.12, 3.13, 3.14. If not specified, default value is 3.10.""" + + +ReasoningEngineSpecSourceCodeSpecPythonSpecOrDict = Union[ + ReasoningEngineSpecSourceCodeSpecPythonSpec, + ReasoningEngineSpecSourceCodeSpecPythonSpecDict, +] + + +class ReasoningEngineSpecSourceCodeSpec(_common.BaseModel): + """Specification for deploying from source code.""" + + agent_config_source: Optional[ + ReasoningEngineSpecSourceCodeSpecAgentConfigSource + ] = Field( + default=None, description="""Source code is generated from the agent config.""" + ) + developer_connect_source: Optional[ + ReasoningEngineSpecSourceCodeSpecDeveloperConnectSource ] = Field( default=None, - description="""Optional. The granular TTL configuration of the memories in the Memory Bank.""", + description="""Source code is in a Git repository managed by Developer Connect.""", ) - memory_revision_default_ttl: Optional[str] = Field( + image_spec: Optional[ReasoningEngineSpecSourceCodeSpecImageSpec] = Field( default=None, - description="""Optional. The default TTL duration of the memory revisions in the Memory Bank. This applies to all operations that create a memory revision. If not set, a default TTL of 365 days will be used.""", + description="""Optional. Configuration for building an image with custom config file.""", + ) + inline_source: Optional[ReasoningEngineSpecSourceCodeSpecInlineSource] = Field( + default=None, description="""Source code is provided directly in the request.""" + ) + python_spec: Optional[ReasoningEngineSpecSourceCodeSpecPythonSpec] = Field( + default=None, description="""Configuration for a Python application.""" ) -class ReasoningEngineContextSpecMemoryBankConfigTtlConfigDict(TypedDict, total=False): - """Configuration for automatically setting the TTL ("time-to-live") of the memories in the Memory Bank.""" +class ReasoningEngineSpecSourceCodeSpecDict(TypedDict, total=False): + """Specification for deploying from source code.""" - default_ttl: Optional[str] - """Optional. The default TTL duration of the memories in the Memory Bank. This applies to all operations that create or update a memory.""" + agent_config_source: Optional[ + ReasoningEngineSpecSourceCodeSpecAgentConfigSourceDict + ] + """Source code is generated from the agent config.""" - granular_ttl_config: Optional[ - ReasoningEngineContextSpecMemoryBankConfigTtlConfigGranularTtlConfigDict + developer_connect_source: Optional[ + ReasoningEngineSpecSourceCodeSpecDeveloperConnectSourceDict ] - """Optional. The granular TTL configuration of the memories in the Memory Bank.""" + """Source code is in a Git repository managed by Developer Connect.""" + + image_spec: Optional[ReasoningEngineSpecSourceCodeSpecImageSpecDict] + """Optional. Configuration for building an image with custom config file.""" + + inline_source: Optional[ReasoningEngineSpecSourceCodeSpecInlineSourceDict] + """Source code is provided directly in the request.""" + + python_spec: Optional[ReasoningEngineSpecSourceCodeSpecPythonSpecDict] + """Configuration for a Python application.""" + + +ReasoningEngineSpecSourceCodeSpecOrDict = Union[ + ReasoningEngineSpecSourceCodeSpec, ReasoningEngineSpecSourceCodeSpecDict +] + + +class ReasoningEngineSpec(_common.BaseModel): + """The specification of an agent engine.""" + + agent_card: Optional[dict[str, Any]] = Field( + default=None, + description="""Optional. The A2A Agent Card for the agent (if available). It follows the specification at https://a2a-protocol.org/latest/specification/#5-agent-discovery-the-agent-card.""", + ) + agent_framework: Optional[str] = Field( + default=None, + description="""Optional. The OSS agent framework used to develop the agent. Currently supported values: "google-adk", "langchain", "langgraph", "ag2", "llama-index", "custom".""", + ) + class_methods: Optional[list[dict[str, Any]]] = Field( + default=None, + description="""Optional. Declarations for object class methods in OpenAPI specification format.""", + ) + deployment_spec: Optional[ReasoningEngineSpecDeploymentSpec] = Field( + default=None, + description="""Optional. The specification of a Reasoning Engine deployment.""", + ) + effective_identity: Optional[str] = Field( + default=None, + description="""Output only. The identity to use for the Reasoning Engine. It can contain one of the following values: * service-{project}@gcp-sa-aiplatform-re.googleapis.com (for SERVICE_AGENT identity type) * {name}@{project}.gserviceaccount.com (for SERVICE_ACCOUNT identity type) * agents.global.{org}.system.id.goog/resources/aiplatform/projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine} (for AGENT_IDENTITY identity type)""", + ) + identity_type: Optional[IdentityType] = Field( + default=None, + description="""Optional. The identity type to use for the Reasoning Engine. If not specified, the `service_account` field will be used if set, otherwise the default Vertex AI Reasoning Engine Service Agent in the project will be used.""", + ) + package_spec: Optional[ReasoningEngineSpecPackageSpec] = Field( + default=None, + description="""Optional. User provided package spec of the ReasoningEngine. Ignored when users directly specify a deployment image through `deployment_spec.first_party_image_override`, but keeping the field_behavior to avoid introducing breaking changes. The `deployment_source` field should not be set if `package_spec` is specified.""", + ) + service_account: Optional[str] = Field( + default=None, + description="""Optional. The service account that the Reasoning Engine artifact runs as. It should have "roles/storage.objectViewer" for reading the user project's Cloud Storage and "roles/aiplatform.user" for using Vertex extensions. If not specified, the Vertex AI Reasoning Engine Service Agent in the project will be used.""", + ) + source_code_spec: Optional[ReasoningEngineSpecSourceCodeSpec] = Field( + default=None, + description="""Deploy from source code files with a defined entrypoint.""", + ) + + +class ReasoningEngineSpecDict(TypedDict, total=False): + """The specification of an agent engine.""" + + agent_card: Optional[dict[str, Any]] + """Optional. The A2A Agent Card for the agent (if available). It follows the specification at https://a2a-protocol.org/latest/specification/#5-agent-discovery-the-agent-card.""" + + agent_framework: Optional[str] + """Optional. The OSS agent framework used to develop the agent. Currently supported values: "google-adk", "langchain", "langgraph", "ag2", "llama-index", "custom".""" + + class_methods: Optional[list[dict[str, Any]]] + """Optional. Declarations for object class methods in OpenAPI specification format.""" + + deployment_spec: Optional[ReasoningEngineSpecDeploymentSpecDict] + """Optional. The specification of a Reasoning Engine deployment.""" + + effective_identity: Optional[str] + """Output only. The identity to use for the Reasoning Engine. It can contain one of the following values: * service-{project}@gcp-sa-aiplatform-re.googleapis.com (for SERVICE_AGENT identity type) * {name}@{project}.gserviceaccount.com (for SERVICE_ACCOUNT identity type) * agents.global.{org}.system.id.goog/resources/aiplatform/projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine} (for AGENT_IDENTITY identity type)""" + + identity_type: Optional[IdentityType] + """Optional. The identity type to use for the Reasoning Engine. If not specified, the `service_account` field will be used if set, otherwise the default Vertex AI Reasoning Engine Service Agent in the project will be used.""" + + package_spec: Optional[ReasoningEngineSpecPackageSpecDict] + """Optional. User provided package spec of the ReasoningEngine. Ignored when users directly specify a deployment image through `deployment_spec.first_party_image_override`, but keeping the field_behavior to avoid introducing breaking changes. The `deployment_source` field should not be set if `package_spec` is specified.""" + + service_account: Optional[str] + """Optional. The service account that the Reasoning Engine artifact runs as. It should have "roles/storage.objectViewer" for reading the user project's Cloud Storage and "roles/aiplatform.user" for using Vertex extensions. If not specified, the Vertex AI Reasoning Engine Service Agent in the project will be used.""" + + source_code_spec: Optional[ReasoningEngineSpecSourceCodeSpecDict] + """Deploy from source code files with a defined entrypoint.""" + + +ReasoningEngineSpecOrDict = Union[ReasoningEngineSpec, ReasoningEngineSpecDict] + + +class ReasoningEngine(_common.BaseModel): + """An agent engine.""" + + encryption_spec: Optional[genai_types.EncryptionSpec] = Field( + default=None, + description="""Customer-managed encryption key spec for a ReasoningEngine. If set, this ReasoningEngine and all sub-resources of this ReasoningEngine will be secured by this key.""", + ) + context_spec: Optional[ReasoningEngineContextSpec] = Field( + default=None, + description="""Optional. Configuration for how Agent Engine sub-resources should manage context.""", + ) + create_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Timestamp when this ReasoningEngine was created.""", + ) + description: Optional[str] = Field( + default=None, + description="""Optional. The description of the ReasoningEngine.""", + ) + display_name: Optional[str] = Field( + default=None, + description="""Required. The display name of the ReasoningEngine.""", + ) + etag: Optional[str] = Field( + default=None, + description="""Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.""", + ) + labels: Optional[dict[str, str]] = Field( + default=None, description="""Labels for the ReasoningEngine.""" + ) + name: Optional[str] = Field( + default=None, + description="""Identifier. The resource name of the ReasoningEngine. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`""", + ) + spec: Optional[ReasoningEngineSpec] = Field( + default=None, description="""Optional. Configurations of the ReasoningEngine""" + ) + update_time: Optional[datetime.datetime] = Field( + default=None, + description="""Output only. Timestamp when this ReasoningEngine was most recently updated.""", + ) + + +class ReasoningEngineDict(TypedDict, total=False): + """An agent engine.""" + + encryption_spec: Optional[genai_types.EncryptionSpecDict] + """Customer-managed encryption key spec for a ReasoningEngine. If set, this ReasoningEngine and all sub-resources of this ReasoningEngine will be secured by this key.""" + + context_spec: Optional[ReasoningEngineContextSpecDict] + """Optional. Configuration for how Agent Engine sub-resources should manage context.""" + + create_time: Optional[datetime.datetime] + """Output only. Timestamp when this ReasoningEngine was created.""" + + description: Optional[str] + """Optional. The description of the ReasoningEngine.""" + + display_name: Optional[str] + """Required. The display name of the ReasoningEngine.""" + + etag: Optional[str] + """Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.""" + + labels: Optional[dict[str, str]] + """Labels for the ReasoningEngine.""" + + name: Optional[str] + """Identifier. The resource name of the ReasoningEngine. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`""" + + spec: Optional[ReasoningEngineSpecDict] + """Optional. Configurations of the ReasoningEngine""" - memory_revision_default_ttl: Optional[str] - """Optional. The default TTL duration of the memory revisions in the Memory Bank. This applies to all operations that create a memory revision. If not set, a default TTL of 365 days will be used.""" + update_time: Optional[datetime.datetime] + """Output only. Timestamp when this ReasoningEngine was most recently updated.""" -ReasoningEngineContextSpecMemoryBankConfigTtlConfigOrDict = Union[ - ReasoningEngineContextSpecMemoryBankConfigTtlConfig, - ReasoningEngineContextSpecMemoryBankConfigTtlConfigDict, -] +ReasoningEngineOrDict = Union[ReasoningEngine, ReasoningEngineDict] -class ReasoningEngineContextSpecMemoryBankConfig(_common.BaseModel): - """Specification for a Memory Bank.""" +class AgentEngineOperation(_common.BaseModel): + """Operation that has an agent engine as a response.""" - customization_configs: Optional[list[MemoryBankCustomizationConfig]] = Field( + name: Optional[str] = Field( default=None, - description="""Optional. Configuration for how to customize Memory Bank behavior for a particular scope.""", + description="""The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""", ) - disable_memory_revisions: Optional[bool] = Field( + metadata: Optional[dict[str, Any]] = Field( default=None, - description="""If true, no memory revisions will be created for any requests to the Memory Bank.""", + description="""Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.""", ) - generation_config: Optional[ - ReasoningEngineContextSpecMemoryBankConfigGenerationConfig - ] = Field( + done: Optional[bool] = Field( default=None, - description="""Optional. Configuration for how to generate memories for the Memory Bank.""", + description="""If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""", ) - similarity_search_config: Optional[ - ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfig - ] = Field( + error: Optional[dict[str, Any]] = Field( default=None, - description="""Optional. Configuration for how to perform similarity search on memories. If not set, the Memory Bank will use the default embedding model `text-embedding-005`.""", + description="""The error result of the operation in case of failure or cancellation.""", ) - ttl_config: Optional[ReasoningEngineContextSpecMemoryBankConfigTtlConfig] = Field( - default=None, - description="""Optional. Configuration for automatic TTL ("time-to-live") of the memories in the Memory Bank. If not set, TTL will not be applied automatically. The TTL can be explicitly set by modifying the `expire_time` of each Memory resource.""", + response: Optional[ReasoningEngine] = Field( + default=None, description="""The created Agent Engine.""" ) -class ReasoningEngineContextSpecMemoryBankConfigDict(TypedDict, total=False): - """Specification for a Memory Bank.""" - - customization_configs: Optional[list[MemoryBankCustomizationConfigDict]] - """Optional. Configuration for how to customize Memory Bank behavior for a particular scope.""" - - disable_memory_revisions: Optional[bool] - """If true, no memory revisions will be created for any requests to the Memory Bank.""" - - generation_config: Optional[ - ReasoningEngineContextSpecMemoryBankConfigGenerationConfigDict - ] - """Optional. Configuration for how to generate memories for the Memory Bank.""" - - similarity_search_config: Optional[ - ReasoningEngineContextSpecMemoryBankConfigSimilaritySearchConfigDict - ] - """Optional. Configuration for how to perform similarity search on memories. If not set, the Memory Bank will use the default embedding model `text-embedding-005`.""" - - ttl_config: Optional[ReasoningEngineContextSpecMemoryBankConfigTtlConfigDict] - """Optional. Configuration for automatic TTL ("time-to-live") of the memories in the Memory Bank. If not set, TTL will not be applied automatically. The TTL can be explicitly set by modifying the `expire_time` of each Memory resource.""" - - -ReasoningEngineContextSpecMemoryBankConfigOrDict = Union[ - ReasoningEngineContextSpecMemoryBankConfig, - ReasoningEngineContextSpecMemoryBankConfigDict, -] - +class AgentEngineOperationDict(TypedDict, total=False): + """Operation that has an agent engine as a response.""" -class ReasoningEngineContextSpec(_common.BaseModel): - """The configuration for agent engine sub-resources to manage context.""" + name: Optional[str] + """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""" - memory_bank_config: Optional[ReasoningEngineContextSpecMemoryBankConfig] = Field( - default=None, - description="""Optional. Specification for a Memory Bank, which manages memories for the Agent Engine.""", - ) + metadata: Optional[dict[str, Any]] + """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.""" + done: Optional[bool] + """If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""" -class ReasoningEngineContextSpecDict(TypedDict, total=False): - """The configuration for agent engine sub-resources to manage context.""" + error: Optional[dict[str, Any]] + """The error result of the operation in case of failure or cancellation.""" - memory_bank_config: Optional[ReasoningEngineContextSpecMemoryBankConfigDict] - """Optional. Specification for a Memory Bank, which manages memories for the Agent Engine.""" + response: Optional[ReasoningEngineDict] + """The created Agent Engine.""" -ReasoningEngineContextSpecOrDict = Union[ - ReasoningEngineContextSpec, ReasoningEngineContextSpecDict -] +AgentEngineOperationOrDict = Union[AgentEngineOperation, AgentEngineOperationDict] class CreateAgentEngineConfig(_common.BaseModel): @@ -7727,132 +8012,6 @@ class _CreateAgentEngineRequestParametersDict(TypedDict, total=False): ] -class ReasoningEngine(_common.BaseModel): - """An agent engine.""" - - encryption_spec: Optional[genai_types.EncryptionSpec] = Field( - default=None, - description="""Customer-managed encryption key spec for a ReasoningEngine. If set, this ReasoningEngine and all sub-resources of this ReasoningEngine will be secured by this key.""", - ) - context_spec: Optional[ReasoningEngineContextSpec] = Field( - default=None, - description="""Optional. Configuration for how Agent Engine sub-resources should manage context.""", - ) - create_time: Optional[datetime.datetime] = Field( - default=None, - description="""Output only. Timestamp when this ReasoningEngine was created.""", - ) - description: Optional[str] = Field( - default=None, - description="""Optional. The description of the ReasoningEngine.""", - ) - display_name: Optional[str] = Field( - default=None, - description="""Required. The display name of the ReasoningEngine.""", - ) - etag: Optional[str] = Field( - default=None, - description="""Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.""", - ) - labels: Optional[dict[str, str]] = Field( - default=None, description="""Labels for the ReasoningEngine.""" - ) - name: Optional[str] = Field( - default=None, - description="""Identifier. The resource name of the ReasoningEngine. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`""", - ) - spec: Optional[ReasoningEngineSpec] = Field( - default=None, description="""Optional. Configurations of the ReasoningEngine""" - ) - update_time: Optional[datetime.datetime] = Field( - default=None, - description="""Output only. Timestamp when this ReasoningEngine was most recently updated.""", - ) - - -class ReasoningEngineDict(TypedDict, total=False): - """An agent engine.""" - - encryption_spec: Optional[genai_types.EncryptionSpecDict] - """Customer-managed encryption key spec for a ReasoningEngine. If set, this ReasoningEngine and all sub-resources of this ReasoningEngine will be secured by this key.""" - - context_spec: Optional[ReasoningEngineContextSpecDict] - """Optional. Configuration for how Agent Engine sub-resources should manage context.""" - - create_time: Optional[datetime.datetime] - """Output only. Timestamp when this ReasoningEngine was created.""" - - description: Optional[str] - """Optional. The description of the ReasoningEngine.""" - - display_name: Optional[str] - """Required. The display name of the ReasoningEngine.""" - - etag: Optional[str] - """Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.""" - - labels: Optional[dict[str, str]] - """Labels for the ReasoningEngine.""" - - name: Optional[str] - """Identifier. The resource name of the ReasoningEngine. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`""" - - spec: Optional[ReasoningEngineSpecDict] - """Optional. Configurations of the ReasoningEngine""" - - update_time: Optional[datetime.datetime] - """Output only. Timestamp when this ReasoningEngine was most recently updated.""" - - -ReasoningEngineOrDict = Union[ReasoningEngine, ReasoningEngineDict] - - -class AgentEngineOperation(_common.BaseModel): - """Operation that has an agent engine as a response.""" - - name: Optional[str] = Field( - default=None, - description="""The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""", - ) - metadata: Optional[dict[str, Any]] = Field( - default=None, - description="""Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.""", - ) - done: Optional[bool] = Field( - default=None, - description="""If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""", - ) - error: Optional[dict[str, Any]] = Field( - default=None, - description="""The error result of the operation in case of failure or cancellation.""", - ) - response: Optional[ReasoningEngine] = Field( - default=None, description="""The created Agent Engine.""" - ) - - -class AgentEngineOperationDict(TypedDict, total=False): - """Operation that has an agent engine as a response.""" - - name: Optional[str] - """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""" - - metadata: Optional[dict[str, Any]] - """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.""" - - done: Optional[bool] - """If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""" - - error: Optional[dict[str, Any]] - """The error result of the operation in case of failure or cancellation.""" - - response: Optional[ReasoningEngineDict] - """The created Agent Engine.""" - - -AgentEngineOperationOrDict = Union[AgentEngineOperation, AgentEngineOperationDict] - - class DeleteAgentEngineConfig(_common.BaseModel): """Config for deleting agent engine.""" @@ -16049,6 +16208,75 @@ class AgentEngineConfigDict(TypedDict, total=False): AgentEngineConfigOrDict = Union[AgentEngineConfig, AgentEngineConfigDict] +class RunQueryJobAgentEngineConfig(_common.BaseModel): + """Config for checking a query job on an agent engine.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + query: Optional[str] = Field( + default=None, description="""The query to send to the agent engine.""" + ) + gcs_bucket: Optional[str] = Field( + default=None, description="""The GCS bucket to use for the query.""" + ) + + +class RunQueryJobAgentEngineConfigDict(TypedDict, total=False): + """Config for checking a query job on an agent engine.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + query: Optional[str] + """The query to send to the agent engine.""" + + gcs_bucket: Optional[str] + """The GCS bucket to use for the query.""" + + +RunQueryJobAgentEngineConfigOrDict = Union[ + RunQueryJobAgentEngineConfig, RunQueryJobAgentEngineConfigDict +] + + +class RunQueryJobResult(_common.BaseModel): + """Result of running a query job.""" + + http_options: Optional[genai_types.HttpOptions] = Field( + default=None, description="""Used to override HTTP request options.""" + ) + job_name: Optional[str] = Field( + default=None, + description="""Name of the agent engine operation to later check for status.""", + ) + input_gcs_uri: Optional[str] = Field( + default=None, description="""The GCS URI of the input file.""" + ) + output_gcs_uri: Optional[str] = Field( + default=None, description="""The GCS URI of the output file.""" + ) + + +class RunQueryJobResultDict(TypedDict, total=False): + """Result of running a query job.""" + + http_options: Optional[genai_types.HttpOptionsDict] + """Used to override HTTP request options.""" + + job_name: Optional[str] + """Name of the agent engine operation to later check for status.""" + + input_gcs_uri: Optional[str] + """The GCS URI of the input file.""" + + output_gcs_uri: Optional[str] + """The GCS URI of the output file.""" + + +RunQueryJobResultOrDict = Union[RunQueryJobResult, RunQueryJobResultDict] + + class AssembleDataset(_common.BaseModel): """Represents the assembled dataset."""