diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml new file mode 100644 index 0000000..acdf77a --- /dev/null +++ b/.github/workflows/publish-to-pypi.yml @@ -0,0 +1,46 @@ +name: Publish Python Package + +on: + workflow_dispatch: + release: + types: [published] + +permissions: + contents: read + +jobs: + release-build: + runs-on: ubuntu-latest + # PyPI use trusted publisher + permissions: + id-token: write + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install uv + uv venv + make dev + - name: Build + run: | + make build + - name: Validate + run: | + WHEEL=$(ls dist/*.whl | head -n 1) + python -m pip install "${WHEEL}" + python -c "from oci_genai_auth import OciSessionAuth; from oci_genai_auth import OciUserPrincipalAuth; import oci_genai_auth;" + # - name: Publish to Test PyPI + # run: | + # python -m pip install twine + # twine check dist/* + # twine upload --verbose -r testpypi dist/* + - name: Publish to PyPI + run: | + python -m pip install twine + twine check dist/* + twine upload --verbose dist/* diff --git a/.github/workflows/run-all-check.yaml b/.github/workflows/run-all-check.yaml new file mode 100644 index 0000000..dc12f08 --- /dev/null +++ b/.github/workflows/run-all-check.yaml @@ -0,0 +1,39 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Unit test, format and lint check + +on: + workflow_dispatch: + pull_request: + branches: [ "main" ] + +jobs: + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install uv + uv venv + make dev + - name: Format and Lint + run: | + make check + - name: Test with pytest + run: | + make test + - name: Build + run: | + make build diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a46842d --- /dev/null +++ b/.gitignore @@ -0,0 +1,87 @@ +# Mac +.DS_Store + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[codz] +*$py.class + + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py.cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Environments +.env +.envrc +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +.idea/ +*.iml + +# Visual Studio Code +.vscode/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# Demo folder +.demo diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 85ab22a..637430b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,3 @@ -*Detailed instructions on how to contribute to the project, if applicable. Must include section about Oracle Contributor Agreement with link and instructions* - # Contributing to this repository We welcome your contributions! There are multiple ways to contribute. diff --git a/LICENSE.txt b/LICENSE.txt index bb91ea7..92e1920 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2026 Oracle and/or its affiliates. +Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved. The Universal Permissive License (UPL), Version 1.0 diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..89a7b74 --- /dev/null +++ b/Makefile @@ -0,0 +1,69 @@ +# Define the directory containing the source code +SRC_DIR := ./src +TEST_DIR := ./tests +EXAMPLE_DIR := ./examples + +# Optional install extras via `make install ` or `make build ` +ifneq (,$(filter install build,$(MAKECMDGOALS))) +EXTRAS := $(filter-out install build,$(MAKECMDGOALS)) +comma := , +empty := +space := $(empty) $(empty) +EXTRA_LIST := $(subst $(space),$(comma),$(strip $(EXTRAS))) +endif + +.PHONY: all +all: test lint build + +##@ General + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-24s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: install +install: ## Install project dependencies. Example: `make install openai` +ifneq ($(strip $(EXTRA_LIST)),) + uv pip install --editable ".[${EXTRA_LIST}]" +else + uv pip install --editable . +endif + +ifneq ($(strip $(EXTRAS)),) +.PHONY: $(EXTRAS) +$(EXTRAS): + @: +endif + +.PHONY: dev +dev: ## Install development dependencies. + uv pip install ".[dev]" + +.PHONY: test +test: ## Run tests. + uv run --no-project --no-reinstall pytest $(TEST_DIR) --cov --cov-config=.coveragerc -vv -s + +.PHONY: clean +clean: ## Remove build artifacts. + rm -rf build dist *.egg-info .pytest_cache .coverage + +.PHONY: format +format: ## Format code using ruff. + uv run --no-project --no-reinstall isort $(SRC_DIR) $(TEST_DIR) $(EXAMPLE_DIR) + uv run --no-project --no-reinstall ruff format $(SRC_DIR) $(TEST_DIR) $(EXAMPLE_DIR); uv run --no-project --no-reinstall ruff check --fix $(SRC_DIR) $(TEST_DIR) $(EXAMPLE_DIR) + +.PHONY: lint +lint: ## Run linters using ruff. + uv run --no-project --no-reinstall ruff format --diff $(SRC_DIR) $(TEST_DIR) + uv run --no-project --no-reinstall mypy $(SRC_DIR) $(TEST_DIR) + +.PHONY: check +check: format lint ## Run format and lint. + +##@ Build + +.PHONY: build +build: ## Build the application. + uv build diff --git a/README.md b/README.md index 73e8102..e2a8b91 100644 --- a/README.md +++ b/README.md @@ -1,44 +1,117 @@ -*This repository acts as a template for all of Oracle’s GitHub repositories. It contains information about the guidelines for those repositories. All files and sections contained in this template are mandatory, and a GitHub app ensures alignment with these guidelines. To get started with a new repository, replace the italic paragraphs with the respective text for your project.* +# oci-genai-auth -# Project name +[![PyPI - Version](https://img.shields.io/pypi/v/oci-genai-auth.svg)](https://pypi.org/project/oci-genai-auth) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/oci-genai-auth.svg)](https://pypi.org/project/oci-genai-auth) -*Describe your project's features, functionality and target audience* +The **OCI GenAI Auth** Python library provides OCI request-signing helpers for the OpenAI-compatible REST APIs hosted by OCI Generative AI. Partner/Passthrough endpoints do not store conversation history on OCI servers, while AgentHub (non-passthrough) stores data on OCI-managed servers. -## Installation +## Table of Contents -*Provide detailed step-by-step installation instructions. You can name this section **How to Run** or **Getting Started** instead of **Installation** if that's more acceptable for your project* +- [Before you start](#before-you-start) +- [Using OCI IAM Auth](#using-oci-iam-auth) +- [Using API Key Auth](#using-api-key-auth) +- [Using AgentHub APIs (non-passthrough)](#using-agenthub-apis-non-passthrough) +- [Using Partner APIs (passthrough)](#using-partner-apis-passthrough) +- [Running the Examples](#running-the-examples) -## Documentation +## Before you start -*Developer-oriented documentation can be published on GitHub, but all product documentation must be published on * +**Important!** -## Examples +Note that this package, as well as API keys package described below, only supports OpenAI, xAi Grok and Meta LLama models on OCI Generative AI. -*Describe any included examples or provide a link to a demo/tutorial* +Before you start using this package, determine if this is the right option for you. -## Help +If you are looking for a seamless way to port your code from an OpenAI compatible endpoint to OCI Generative AI endpoint, and you are currently using OpenAI-style API keys, you might want to use [OCI Generative AI API Keys](https://docs.oracle.com/en-us/iaas/Content/generative-ai/api-keys.htm) instead. -*Inform users on where to get help or how to receive official support from Oracle (if applicable)* +With OCI Generative AI API Keys, use the native `openai` SDK like before. Just update the `base_url`, create API keys in your OCI console, insure the policy granting the key access to generative AI services is present and you are good to go. -## Contributing +- Create an API key in Console: **Generative AI** -> **API Keys** +- Create a security policy: **Identity & Security** -> **Policies** -*If your project has specific contribution requirements, update the CONTRIBUTING.md file to ensure those requirements are clearly explained* +To authorize a specific API Key +``` +allow any-user to use generative-ai-family in compartment where ALL { request.principal.type='generativeaiapikey', request.principal.id='ocid1.generativeaiapikey.oc1.us-chicago-1....' } +``` -This project welcomes contributions from the community. Before submitting a pull request, please [review our contribution guide](./CONTRIBUTING.md) +To authorize any API Key +``` +allow any-user to use generative-ai-family in compartment where ALL { request.principal.type='generativeaiapikey' } +``` -## Security +## Using OCI IAM Auth -Please consult the [security guide](./SECURITY.md) for our responsible security vulnerability disclosure process +Use OCI IAM auth when you want to sign requests with your OCI profile (session/user/resource/instance principal) instead of API keys. -## License +```python +import httpx +from openai import OpenAI +from oci_genai_auth import OciSessionAuth -*The correct copyright notice format for both documentation and software is* - "Copyright (c) [year,] year Oracle and/or its affiliates." -*You must include the year the content was first released (on any platform) and the most recent year in which it was revised* +client = OpenAI( + base_url="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com/openai/v1", + api_key="not-used", + http_client=httpx.Client(auth=OciSessionAuth(profile_name="DEFAULT")), +) +``` -Copyright (c) 2026 Oracle and/or its affiliates. +## Using API Key Auth -*Replace this statement if your project is not licensed under the UPL* +Use OCI Generative AI API Keys if you want a direct API-key workflow with the OpenAI SDK. -Released under the Universal Permissive License v1.0 as shown at -. +```python +import os +from openai import OpenAI + +client = OpenAI( + base_url="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com/openai/v1", + api_key=os.getenv("OPENAI_API_KEY"), +) +``` + +## Using AgentHub APIs (non-passthrough) + +AgentHub runs in non-pass-through mode and provides a unified interface for interacting with models and agentic capabilities. +It is compatible with OpenAI's Responses API and the Open Responses Spec, enabling developers/users to: build agents with OpenAI SDK. +Only the project OCID is required. + +```python +import httpx +from openai import OpenAI +from oci_genai_auth import OciSessionAuth + +client = OpenAI( + base_url="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com/openai/v1", + api_key="not-used", + project="ocid1.generativeaiproject.oc1.us-chicago-1.aaaaaaaaexample", + http_client=httpx.Client(auth=OciSessionAuth(profile_name="DEFAULT")), +) +``` + +## Using Partner APIs (passthrough) + +Partner endpoints run in pass-through mode and require the compartment OCID header. + +```python +import httpx +from openai import OpenAI +from oci_genai_auth import OciSessionAuth + +client = OpenAI( + base_url="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com/v1", + api_key="not-used", + default_headers={ + "opc-compartment-id": "ocid1.compartment.oc1..aaaaaaaaexample", + }, + http_client=httpx.Client(auth=OciSessionAuth(profile_name="DEFAULT")), +) +``` + + +## Running the Examples + +1. Update `examples/agenthub/openai/common.py` and/or `examples/partner/openai/common.py` with your `COMPARTMENT_ID`, `PROJECT_OCID`, and set the correct `REGION`. +2. Set the `OPENAI_API_KEY` environment variable when an example uses API key authentication. +3. Install optional dev dependencies: `pip install -e '.[dev]'`. + +Run an example either by calling its `main()` method or from the command line. diff --git a/THIRD_PARTY_LICENSES.txt b/THIRD_PARTY_LICENSES.txt new file mode 100644 index 0000000..239402c --- /dev/null +++ b/THIRD_PARTY_LICENSES.txt @@ -0,0 +1,262 @@ +OCI OpenAI Client for Python Third Party License File + +------------------------ Third Party Components ------------------------ +------------------------------- Licenses ------------------------------- +- Apache License 2.0 +- MIT License +- Python Software Foundation License 2.0 (PSF-2.0) +- Universal Permissive License (UPL) + +======================== Third Party Components ======================== +httpx +* Copyright (c) 2021 ProjectDiscovery, Inc. +* License: MIT License +* Source code: https://github.com/projectdiscovery/httpx +* Project home: https://github.com/projectdiscovery/httpx + +oci +* Copyright (c) 2016, 2020, Oracle and/or its affiliates. [see notices section above] +* License: Apache License 2.0, Universal Permissive License (UPL) +* Source code: https://github.com/oracle/oci-python-sdk +* Project home: https://docs.oracle.com/en-us/iaas/tools/python/2.44.0/index.html + +requests +* Copyright 2023 Kenneth Reitz +* License: Apache-2.0 license +* Source code: https://github.com/psf/requests +* Project home: https://requests.readthedocs.io/en/latest/ + +=============================== Licenses =============================== + +------------------------------ MIT License ----------------------------- +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------ + +------------------Universal Permissive License (UPL) ------------------- + +Subject to the condition set forth below, permission is hereby granted to any person +obtaining a copy of this software, associated documentation and/or data (collectively +the "Software"), free of charge and under any and all copyright rights in the Software, +and any and all patent rights owned or freely licensable by each licensor hereunder +covering either (i) the unmodified Software as contributed to or provided by such +licensor, or (ii) the Larger Works (as defined below), to deal in both + +(a) the Software, and +(b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is +included with the Software (each a "Larger Work" to which the Software is contributed +by such licensors), + +without restriction, including without limitation the rights to copy, create derivative +works of, display, perform, and distribute the Software and make, use, sell, offer for +sale, import, export, have made, and have sold the Software and the Larger Work(s), and +to sublicense the foregoing rights on either these or other terms. + +This license is subject to the following condition: + +The above copyright notice and either this complete permission notice or at a minimum a +reference to the UPL must be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE +FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------ + +-------------------------- Apache License 2.0 -------------------------- + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS +------------------------------------------------------------------------ diff --git a/examples/__init__.py b/examples/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/agenthub/__init__.py b/examples/agenthub/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/agenthub/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/agenthub/openai/__init__.py b/examples/agenthub/openai/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/agenthub/openai/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/agenthub/openai/agents/basic_agents_example.py b/examples/agenthub/openai/agents/basic_agents_example.py new file mode 100644 index 0000000..144389a --- /dev/null +++ b/examples/agenthub/openai/agents/basic_agents_example.py @@ -0,0 +1,28 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + + +"""Demonstrates running an OpenAI Agents workflow against the AgentHub endpoint.""" + +import asyncio + +from agents import Agent, Runner, set_default_openai_client, trace + +from examples.agenthub.openai import common + +MODEL = "openai.gpt-4o" + +# Set the OCI OpenAI Client as the default client to use with OpenAI Agents +set_default_openai_client(common.build_agenthub_async_client()) + + +async def main(): + agent = Agent(name="Assistant", instructions="You are a helpful assistant", model=MODEL) + # https://openai.github.io/openai-agents-python/models/#tracing-client-error-401 + with trace("Trace workflow"): + result = await Runner.run(agent, "Write a haiku about recursion in programming.") + print(result.final_output) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/agenthub/openai/common.py b/examples/agenthub/openai/common.py new file mode 100644 index 0000000..67b18ea --- /dev/null +++ b/examples/agenthub/openai/common.py @@ -0,0 +1,50 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""AgentHub example clients and configuration.""" + +from __future__ import annotations + +import os + +import httpx +from openai import AsyncOpenAI, OpenAI + +from oci_genai_auth import OciSessionAuth + +# Shared defaults. +PROFILE_NAME = "DEFAULT" +COMPARTMENT_ID = "<>" +PROJECT_OCID = "<>" +REGION = "us-chicago-1" + +AGENTHUB_OPENAI_URL = f"https://inference.generativeai.{REGION}.oci.oraclecloud.com/openai/v1" +AGENTHUB_OPENAI_CP_URL = f"https://generativeai.{REGION}.oci.oraclecloud.com/20231130/openai/v1" + + +def build_agenthub_client() -> OpenAI: + return OpenAI( + base_url=AGENTHUB_OPENAI_URL, + api_key=os.getenv("OCI_GENAI_API_KEY", "not-used"), + project=os.getenv("OCI_GENAI_PROJECT_ID", PROJECT_OCID), + http_client=httpx.Client(auth=OciSessionAuth(profile_name=PROFILE_NAME)), + ) + + +def build_agenthub_async_client() -> AsyncOpenAI: + return AsyncOpenAI( + base_url=AGENTHUB_OPENAI_URL, + api_key=os.getenv("OCI_GENAI_API_KEY", "not-used"), + project=os.getenv("OCI_GENAI_PROJECT_ID", PROJECT_OCID), + http_client=httpx.AsyncClient(auth=OciSessionAuth(profile_name=PROFILE_NAME)), + ) + + +def build_agenthub_cp_client() -> OpenAI: + return OpenAI( + base_url=AGENTHUB_OPENAI_CP_URL, + api_key=os.getenv("OCI_GENAI_API_KEY", "not-used"), + http_client=httpx.Client(auth=OciSessionAuth(profile_name=PROFILE_NAME)), + project=os.getenv("OCI_GENAI_PROJECT_ID", PROJECT_OCID), + default_headers={"opc-compartment-id": COMPARTMENT_ID}, + ) \ No newline at end of file diff --git a/examples/agenthub/openai/converstions/conversation_items_crud.py b/examples/agenthub/openai/converstions/conversation_items_crud.py new file mode 100644 index 0000000..0b1e210 --- /dev/null +++ b/examples/agenthub/openai/converstions/conversation_items_crud.py @@ -0,0 +1,52 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates CRUD operations for conversation items in AgentHub.""" + +from examples.agenthub.openai import common + + +def main(): + # Create an empty conversation + cp_client = common.build_agenthub_client() + conversation = cp_client.conversations.create() + print("\nCreated conversation:", conversation) + + # Create items in the conversation + cp_client.conversations.items.create( + conversation_id=conversation.id, + items=[ + { + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": "What's your name?"}], + }, + { + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": "What's your favorite color?"}], + }, + ], + ) + + # List the items in the conversation after creating items + items = cp_client.conversations.items.list( + conversation_id=conversation.id, + ) + print("\nConversation items after creating items:", items.data) + + # Delete an item from the conversation + cp_client.conversations.items.delete( + conversation_id=conversation.id, + item_id=items.data[0].id, + ) + + # List the items in the conversation after deleting an item + items = cp_client.conversations.items.list( + conversation_id=conversation.id, + ) + print("\nConversation items after creating items:", items.data) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/converstions/conversations_crud.py b/examples/agenthub/openai/converstions/conversations_crud.py new file mode 100644 index 0000000..25065b3 --- /dev/null +++ b/examples/agenthub/openai/converstions/conversations_crud.py @@ -0,0 +1,46 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates CRUD operations for conversations in AgentHub.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + # Create a conversation + conversation = client.conversations.create( + items=[ + { + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": "Hello!"}], + } + ], + metadata={"topic": "demo"}, + ) + print("\nCreated conversation:", conversation) + + # Retrieve the conversation + conversation = client.conversations.retrieve( + conversation_id=conversation.id, + ) + print("\nRetrieved conversation:", conversation) + + # Update the conversation with new metadata + conversation = client.conversations.update( + conversation_id=conversation.id, + metadata={"topic": "demo2"}, + ) + print("\nUpdated conversation:", conversation) + + # Delete the conversation + client.conversations.delete( + conversation_id=conversation.id, + ) + print("\nDeleted conversation:", conversation) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/files/__init__.py b/examples/agenthub/openai/files/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/agenthub/openai/files/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/agenthub/openai/files/files_crud.py b/examples/agenthub/openai/files/files_crud.py new file mode 100644 index 0000000..e0c97a7 --- /dev/null +++ b/examples/agenthub/openai/files/files_crud.py @@ -0,0 +1,37 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates CRUD operations for files.""" + +from pathlib import Path + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + # List files in the project + files_list = client.files.list(order="asc") + for file in files_list.data: + print(f"ID: {file.id:<45} Status:{file.status:<10} Name:{file.filename}") + + pdf_file_path = Path(__file__).parent / "sample_doc.pdf" + + # Upload a file + with open(pdf_file_path, "rb") as f: + file = client.files.create(file=f, purpose="user_data") + print("Uploaded file:", file) + + # Retrieve file metadata + retrieved_result = client.files.retrieve(file_id=file.id) + print("\nRetrieved file:", retrieved_result) + print("\nWaiting for file to get processed") + client.files.wait_for_processing(file.id) + + # Delete file + delete_result = client.files.delete(file_id=file.id) + print("\nDelete result:", delete_result) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/files/sample_doc.pdf b/examples/agenthub/openai/files/sample_doc.pdf new file mode 100644 index 0000000..7d0fb33 Binary files /dev/null and b/examples/agenthub/openai/files/sample_doc.pdf differ diff --git a/examples/agenthub/openai/function/create_response_fc_parallel_tool.py b/examples/agenthub/openai/function/create_response_fc_parallel_tool.py new file mode 100644 index 0000000..f5b3fae --- /dev/null +++ b/examples/agenthub/openai/function/create_response_fc_parallel_tool.py @@ -0,0 +1,38 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates parallel function-calling tools with the Responses API.""" + +from rich import print + +from examples.agenthub.openai import common +from examples.fc_tools import fc_tools + +MODEL = "openai.gpt-4.1" + + +def main(): + openai_client = common.build_agenthub_client() + # parrel_call + response = openai_client.responses.create( + model=MODEL, + input="what is the weather in seattle and in new York?", + previous_response_id=None, # root of the history + tools=fc_tools, + ) + print(response.output) + + # no parrel_call + + response = openai_client.responses.create( + model=MODEL, + input="what is the weather in seattle and in new York?", + previous_response_id=None, # root of the history + tools=fc_tools, + parallel_tool_calls=False, + ) + print(response.output) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/function/create_responses_fc.py b/examples/agenthub/openai/function/create_responses_fc.py new file mode 100644 index 0000000..5de83d3 --- /dev/null +++ b/examples/agenthub/openai/function/create_responses_fc.py @@ -0,0 +1,85 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates function-calling responses with multiple tools.""" + +import json + +from openai.types.responses import ResponseFunctionToolCall +from openai.types.responses.response_input_param import FunctionCallOutput +from rich import print + +from examples.agenthub.openai import common +from examples.fc_tools import execute_function_call, fc_tools + +MODEL = "openai.gpt-4.1" + + +def main(): + openai_client = common.build_agenthub_client() + + # Creates first request + response = openai_client.responses.create( + model=MODEL, + input="what is the weather in seattle?", + previous_response_id=None, # root of the history + tools=fc_tools, + ) + print(response.output) + + # Based on output if it is function call, execute the function and provide output back + if isinstance(response.output[0], ResponseFunctionToolCall): + obj = response.output[0] + function_name = obj.name + function_args = json.loads(obj.arguments) + + function_response = execute_function_call(function_name, function_args) + + response = openai_client.responses.create( + model=MODEL, + input=[ + FunctionCallOutput( + type="function_call_output", + call_id=obj.call_id, + output=str(function_response), + ) + ], + previous_response_id=response.id, + tools=fc_tools, + ) + print(response.output) + + # Ask followup question related to previoud context + response = openai_client.responses.create( + model=MODEL, + input="what clothes should i wear in this weather?", + previous_response_id=response.id, + tools=fc_tools, + ) + print(response.output) + + # Based on FCTool execute the function tool output + if isinstance(response.output[0], ResponseFunctionToolCall): + obj = response.output[0] + function_name = obj.name + function_args = json.loads(obj.arguments) + + function_response = execute_function_call(function_name, function_args) + + response = openai_client.responses.create( + model=MODEL, + input=[ + FunctionCallOutput( + type="function_call_output", + call_id=obj.call_id, + output=str(function_response), + ) + ], + previous_response_id=response.id, + tools=fc_tools, + ) + print(response.output) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/mcp/create_response_approval_flow.py b/examples/agenthub/openai/mcp/create_response_approval_flow.py new file mode 100644 index 0000000..4e248d4 --- /dev/null +++ b/examples/agenthub/openai/mcp/create_response_approval_flow.py @@ -0,0 +1,46 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates an MCP approval flow before executing tools.""" + +from rich import print + +from examples.agenthub.openai import common + +MODEL = "openai.gpt-4.1" + + +def main(): + openai_client = common.build_agenthub_client() + + tools = [ + { + "type": "mcp", + "server_label": "deepwiki", + "require_approval": "always", + "server_url": "https://mcp.deepwiki.com/mcp", + } + ] + response1 = openai_client.responses.create( + model=MODEL, input="please tell me structure about facebook/react", tools=tools, store=True + ) + + print(response1.output) + + approve_id = response1.output[1].id + id = response1.id + + approval_response = { + "type": "mcp_approval_response", + "approval_request_id": approve_id, + "approve": True, + } + + response2 = openai_client.responses.create( + model=MODEL, input=[approval_response], tools=tools, previous_response_id=id + ) + print(response2.output) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/mcp/create_response_two_mcp.py b/examples/agenthub/openai/mcp/create_response_two_mcp.py new file mode 100644 index 0000000..4564d53 --- /dev/null +++ b/examples/agenthub/openai/mcp/create_response_two_mcp.py @@ -0,0 +1,43 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates a two-step MCP response flow.""" + +from rich import print + +from examples.agenthub.openai import common + +MODEL = "openai.gpt-4.1" + + +def main(): + openai_client = common.build_agenthub_client() + + tools = [ + { + "type": "mcp", + "server_label": "stripe", + "require_approval": "never", + "server_url": "https://mcp.stripe.com", + "authorization": "", + }, + { + "type": "mcp", + "server_label": "deepwiki", + "require_approval": "never", + "server_url": "https://mcp.deepwiki.com/mcp", + }, + ] + response1 = openai_client.responses.create( + model=MODEL, + input="Please use stirpe create account with a and a@g.com and " + "use deepwiki understand facebook/react", + tools=tools, + store=True, + ) + + print(response1.output) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/mcp/create_responses_mcp.py b/examples/agenthub/openai/mcp/create_responses_mcp.py new file mode 100644 index 0000000..469fcd6 --- /dev/null +++ b/examples/agenthub/openai/mcp/create_responses_mcp.py @@ -0,0 +1,46 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates creating responses with MCP tools.""" + +from rich import print + +from examples.agenthub.openai import common + +MODEL = "openai.gpt-4.1" + + +def main(): + openai_client = common.build_agenthub_client() + + tools = [ + { + "type": "mcp", + "server_label": "deepwiki", + "require_approval": "always", + "server_url": "https://mcp.deepwiki.com/mcp", + } + ] + response1 = openai_client.responses.create( + model=MODEL, input="please tell me structure about facebook/react", tools=tools, store=True + ) + + print(response1.output) + + approve_id = response1.output[1].id + id = response1.id + + approval_response = { + "type": "mcp_approval_response", + "approval_request_id": approve_id, + "approve": True, + } + + response2 = openai_client.responses.create( + model=MODEL, input=[approval_response], tools=tools, previous_response_id=id + ) + print(response2.output) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/mcp/create_responses_mcp_auth.py b/examples/agenthub/openai/mcp/create_responses_mcp_auth.py new file mode 100644 index 0000000..42b3a31 --- /dev/null +++ b/examples/agenthub/openai/mcp/create_responses_mcp_auth.py @@ -0,0 +1,36 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates creating responses with MCP auth.""" + +from rich import print + +from examples.agenthub.openai import common + +MODEL = "openai.gpt-4.1" + + +def main(): + openai_client = common.build_agenthub_client() + + tools = [ + { + "type": "mcp", + "server_label": "stripe", + "require_approval": "never", + "server_url": "https://mcp.stripe.com", + "authorization": "", + } + ] + response1 = openai_client.responses.create( + model=MODEL, + input="Please use stirpe create account with a and a@g.com", + tools=tools, + store=True, + ) + + print(response1.output) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/memory/__init__.py b/examples/agenthub/openai/memory/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/agenthub/openai/memory/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/agenthub/openai/memory/long_term_memory.py b/examples/agenthub/openai/memory/long_term_memory.py new file mode 100644 index 0000000..82d63c1 --- /dev/null +++ b/examples/agenthub/openai/memory/long_term_memory.py @@ -0,0 +1,45 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates long-term memory usage in AgentHub.""" + +import time + +from examples.agenthub.openai import common + +MODEL = "xai.grok-4-1-fast-reasoning" + + +def main(): + client = common.build_agenthub_client() + # First conversation - store preferences + conversation1 = client.conversations.create( + metadata={"memory_subject_id": "user_123456"}, + ) + + response = client.responses.create( + model=MODEL, + input="I like Fish. I don't like Shrimp.", + conversation=conversation1.id, + ) + print("Response 1:", response.output_text) + + # Delay for long-term memory processing + print("Waiting for long-term memory processing...") + time.sleep(10) + + # Second conversation - recall preferences + conversation2 = client.conversations.create( + metadata={"memory_subject_id": "user_123456"}, + ) + + response = client.responses.create( + model=MODEL, + input="What do I like?", + conversation=conversation2.id, + ) + print("Response 2:", response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/memory/long_term_memory_access_policy.py b/examples/agenthub/openai/memory/long_term_memory_access_policy.py new file mode 100644 index 0000000..4f64c65 --- /dev/null +++ b/examples/agenthub/openai/memory/long_term_memory_access_policy.py @@ -0,0 +1,51 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates long-term memory access policies in AgentHub.""" + +import time + +from examples.agenthub.openai import common + +MODEL = "openai.gpt-5.1" + + +def main(): + client = common.build_agenthub_client() + # First conversation - store only (no recall) + conversation1 = client.conversations.create( + metadata={ + "memory_subject_id": "user_123456", + "memory_access_policy": "store_only", + }, + ) + + response = client.responses.create( + model=MODEL, + input="I like Fish. I don't like Shrimp.", + conversation=conversation1.id, + ) + print("Response 1:", response.output_text) + + # Delay for long-term memory processing + print("Waiting for long-term memory processing...") + time.sleep(20) + + # Second conversation - recall only (no new storage) + conversation2 = client.conversations.create( + metadata={ + "memory_subject_id": "user_123456", + "memory_access_policy": "recall_only", + }, + ) + + response = client.responses.create( + model=MODEL, + input="What food do I like?", + conversation=conversation2.id, + ) + print("Response 2:", response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/memory/short_term_memory_optimization.py b/examples/agenthub/openai/memory/short_term_memory_optimization.py new file mode 100644 index 0000000..c201727 --- /dev/null +++ b/examples/agenthub/openai/memory/short_term_memory_optimization.py @@ -0,0 +1,52 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates short-term memory optimization in AgentHub.""" + +from examples.agenthub.openai import common + +MODEL = "xai.grok-4-1-fast-reasoning" + + +def main(): + client = common.build_agenthub_client() + # Create a conversation with STMO enabled + conversation = client.conversations.create( + metadata={"topic": "demo", "short_term_memory_optimization": "True"}, + items=[{"type": "message", "role": "user", "content": "Hello!"}], + ) + + # Multiple turns - STMO will auto-condense the history + response = client.responses.create( + model=MODEL, + input="I like Fish.", + conversation=conversation.id, + ) + print("Turn 1:", response.output_text) + + response = client.responses.create( + model=MODEL, + input="I like Beef.", + conversation=conversation.id, + ) + print("Turn 2:", response.output_text) + + response = client.responses.create( + model=MODEL, + input="I like ice-cream.", + conversation=conversation.id, + ) + print("Turn 3:", response.output_text) + + response = client.responses.create( + model=MODEL, + input="I like coffee.", + conversation=conversation.id, + ) + print("Turn 4:", response.output_text) + + # The STMO summary will be generated automatically + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/multiturn/__init__.py b/examples/agenthub/openai/multiturn/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/agenthub/openai/multiturn/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/agenthub/openai/multiturn/conversations_api.py b/examples/agenthub/openai/multiturn/conversations_api.py new file mode 100644 index 0000000..df9ea79 --- /dev/null +++ b/examples/agenthub/openai/multiturn/conversations_api.py @@ -0,0 +1,36 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates a multi-turn flow using the Conversations API.""" + +from examples.agenthub.openai import common + +MODEL = "xai.grok-4-1-fast-reasoning" + + +def main(): + client = common.build_agenthub_client() + + # Create a conversation upfront + conversation = client.conversations.create(metadata={"topic": "demo"}) + print("Conversation ID:", conversation.id) + + # First turn + response1 = client.responses.create( + model=MODEL, + input="Tell me a joke. Keep it short.", + conversation=conversation.id, + ) + print("Response 1:", response1.output_text) + + # Second turn on the same conversation + response2 = client.responses.create( + model=MODEL, + input="Why is it funny?", + conversation=conversation.id, + ) + print("Response 2:", response2.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/multiturn/responses_chaining.py b/examples/agenthub/openai/multiturn/responses_chaining.py new file mode 100644 index 0000000..d78af26 --- /dev/null +++ b/examples/agenthub/openai/multiturn/responses_chaining.py @@ -0,0 +1,30 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates chaining responses across multiple turns.""" + +from examples.agenthub.openai import common + +model = "xai.grok-4-1-fast-reasoning" + + +def main(): + client = common.build_agenthub_client() + # First turn + response1 = client.responses.create( + model=model, + input="Tell me a joke. Keep it short.", + ) + print("Response 1:", response1.output_text) + + # Second turn, chaining to the first + response2 = client.responses.create( + model=model, + input="Why is it funny?", + previous_response_id=response1.id, + ) + print("Response 2:", response2.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/quickstart_responses_create_api_key.py b/examples/agenthub/openai/quickstart_responses_create_api_key.py new file mode 100644 index 0000000..10c53f3 --- /dev/null +++ b/examples/agenthub/openai/quickstart_responses_create_api_key.py @@ -0,0 +1,36 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Quickstart using Generative AI API Key authentication. + +This example uses the native OpenAI client with OCI Generative AI API Key. +No oci-genai-auth package needed for API Key auth - just the official OpenAI SDK. + +Steps: + 1. Create a Generative AI Project on OCI Console + 2. Create a Generative AI API Key on OCI Console + 3. Run this script +""" + +import os + +from openai import OpenAI + +from examples.agenthub.openai.common import PROJECT_OCID + + +def main(): + client = OpenAI( + api_key=os.getenv("OPENAI_API_KEY"), + project=os.getenv("OCI_GENAI_PROJECT_ID", PROJECT_OCID), + base_url="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com/openai/v1", + ) + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input="What is 2x2?", + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/quickstart_responses_create_oci_iam.py b/examples/agenthub/openai/quickstart_responses_create_oci_iam.py new file mode 100644 index 0000000..e3494a4 --- /dev/null +++ b/examples/agenthub/openai/quickstart_responses_create_oci_iam.py @@ -0,0 +1,28 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Quickstart using OCI IAM authentication. + +This example uses oci-genai-auth with the OpenAI SDK for AgentHub. + +Steps: + 1. Create a Generative AI Project on OCI Console + 2. pip install oci-genai-auth + 3. Run this script +""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input="What is 2x2?", + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/Cat.jpg b/examples/agenthub/openai/responses/Cat.jpg new file mode 100644 index 0000000..6d89427 Binary files /dev/null and b/examples/agenthub/openai/responses/Cat.jpg differ diff --git a/examples/agenthub/openai/responses/__init__.py b/examples/agenthub/openai/responses/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/agenthub/openai/responses/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/agenthub/openai/responses/create_response.py b/examples/agenthub/openai/responses/create_response.py new file mode 100644 index 0000000..c44bafc --- /dev/null +++ b/examples/agenthub/openai/responses/create_response.py @@ -0,0 +1,20 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates creating a response with the Responses API on AgentHub.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input="What is 2x2?", + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/create_response_store_false.py b/examples/agenthub/openai/responses/create_response_store_false.py new file mode 100644 index 0000000..0b10267 --- /dev/null +++ b/examples/agenthub/openai/responses/create_response_store_false.py @@ -0,0 +1,25 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates creating a response with storage disabled using the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input="What is 2x2?", + store=False, + ) + print(response.output_text) + + # Try to retrieve the response by ID, and it should throw openai.NotFoundError + retrieved = client.responses.retrieve(response_id=response.id) + print(f"Response: {retrieved}") + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/delete_response.py b/examples/agenthub/openai/responses/delete_response.py new file mode 100644 index 0000000..d2d0553 --- /dev/null +++ b/examples/agenthub/openai/responses/delete_response.py @@ -0,0 +1,25 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates deleting a response from the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + # Create a response first + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input="What is 2x2?", + ) + print("Created response ID:", response.id) + + # Delete the response by ID + client.responses.delete(response_id=response.id) + print("Deleted response:", response.id) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/file_input_with_file_id.py b/examples/agenthub/openai/responses/file_input_with_file_id.py new file mode 100644 index 0000000..47da9dd --- /dev/null +++ b/examples/agenthub/openai/responses/file_input_with_file_id.py @@ -0,0 +1,42 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates providing file input by file ID to the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + # Upload a file first + with open("../files/sample_doc.pdf", "rb") as f: + file = client.files.create( + file=f, + purpose="user_data", + ) + + # Use the file in a response + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input=[ + { + "role": "user", + "content": [ + { + "type": "input_file", + "file_id": file.id, + }, + { + "type": "input_text", + "text": "What's discussed in the file?", + }, + ], + } + ], + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/file_input_with_file_url.py b/examples/agenthub/openai/responses/file_input_with_file_url.py new file mode 100644 index 0000000..7a00e79 --- /dev/null +++ b/examples/agenthub/openai/responses/file_input_with_file_url.py @@ -0,0 +1,35 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates providing file input by URL to the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + store=False, + input=[ + { + "role": "user", + "content": [ + { + "type": "input_text", + "text": "Analyze the letter and provide a summary of the key points.", + }, + { + "type": "input_file", + "file_url": "https://www.berkshirehathaway.com/letters/2024ltr.pdf", + }, + ], + } + ], + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/get_response.py b/examples/agenthub/openai/responses/get_response.py new file mode 100644 index 0000000..adde2a4 --- /dev/null +++ b/examples/agenthub/openai/responses/get_response.py @@ -0,0 +1,25 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates retrieving a response from the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + # Create a response first + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input="What is 2x2?", + ) + print("Created response ID:", response.id) + + # Retrieve the response by ID + retrieved = client.responses.retrieve(response_id=response.id) + print("Retrieved response:", retrieved.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/image_input_with_base64.py b/examples/agenthub/openai/responses/image_input_with_base64.py new file mode 100644 index 0000000..e9be2b0 --- /dev/null +++ b/examples/agenthub/openai/responses/image_input_with_base64.py @@ -0,0 +1,48 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates providing image input via base64 to the Responses API.""" + +import base64 +from pathlib import Path + +from examples.agenthub.openai import common + + +def encode_image(image_path): + with open(image_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode("utf-8") + + +def main(): + client = common.build_agenthub_client() + + # assuming the file "Cat.jpg" is in the same directory as this script + image_file_path = Path(__file__).parent / "Cat.jpg" + base64_image = encode_image(image_file_path) + + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + store=False, + input=[ + { + "role": "user", + "content": [ + { + "type": "input_text", + "text": "What's in this image?", + }, + { + "type": "input_image", + "image_url": f"data:image/jpeg;base64,{base64_image}", + "detail": "high", + }, + ], + } + ], + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/image_input_with_url.py b/examples/agenthub/openai/responses/image_input_with_url.py new file mode 100644 index 0000000..6207edc --- /dev/null +++ b/examples/agenthub/openai/responses/image_input_with_url.py @@ -0,0 +1,35 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates providing image input via URL to the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + store=False, + input=[ + { + "role": "user", + "content": [ + { + "type": "input_text", + "text": "What's in this image?", + }, + { + "type": "input_image", + "image_url": "https://picsum.photos/id/237/200/300", + }, + ], + } + ], + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/reasoning.py b/examples/agenthub/openai/responses/reasoning.py new file mode 100644 index 0000000..7224283 --- /dev/null +++ b/examples/agenthub/openai/responses/reasoning.py @@ -0,0 +1,34 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates a reasoning-style Responses API request.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + prompt = """ + Write a bash script that takes a matrix represented as a string with + format '[1,2],[3,4],[5,6]' and prints the transpose in the same format. + """ + + response = client.responses.create( + model="openai.gpt-oss-120b", + input=prompt, + reasoning={"effort": "medium", "summary": "detailed"}, + stream=True, + ) + for event in response: + if event.type == "response.reasoning_summary_part.added": + print("Thinking...") + if event.type == "response.reasoning_summary_text.delta": + print(event.delta, end="", flush=True) + if event.type == "response.output_text.delta": + print(event.delta, end="", flush=True) + print() + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/streaming_text_delta.py b/examples/agenthub/openai/responses/streaming_text_delta.py new file mode 100644 index 0000000..5180be2 --- /dev/null +++ b/examples/agenthub/openai/responses/streaming_text_delta.py @@ -0,0 +1,26 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates streaming Responses API output and handling text deltas.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response_stream = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input="What are the shapes of OCI GPUs?", + stream=True, + ) + + for event in response_stream: + if event.type == "response.output_text.delta": + print(event.delta, end="", flush=True) + + print() + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/structured_output.py b/examples/agenthub/openai/responses/structured_output.py new file mode 100644 index 0000000..0633ad2 --- /dev/null +++ b/examples/agenthub/openai/responses/structured_output.py @@ -0,0 +1,41 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates structured output with the Responses API.""" + +from pydantic import BaseModel + +from examples.agenthub.openai import common + + +class CalendarEvent(BaseModel): + name: str + date: str + participants: list[str] + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.parse( + model="xai.grok-4-1-fast-reasoning", + input=[ + { + "role": "system", + "content": "Extract the event information.", + }, + { + "role": "user", + "content": "Alice and Bob are going to a science fair on Friday.", + }, + ], + store=False, + text_format=CalendarEvent, + ) + + event = response.output_parsed + print(event) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/use_gpt_model.py b/examples/agenthub/openai/responses/use_gpt_model.py new file mode 100644 index 0000000..fb76f1c --- /dev/null +++ b/examples/agenthub/openai/responses/use_gpt_model.py @@ -0,0 +1,20 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates using a GPT model with the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="openai.gpt-5.2", + input="What is 2x2?", + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/use_gptoss_model_dac.py b/examples/agenthub/openai/responses/use_gptoss_model_dac.py new file mode 100644 index 0000000..ed8764b --- /dev/null +++ b/examples/agenthub/openai/responses/use_gptoss_model_dac.py @@ -0,0 +1,20 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates using a GPT OSS DAC model with the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="", + input="What is 2x2?", + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/use_gptoss_model_ondemand.py b/examples/agenthub/openai/responses/use_gptoss_model_ondemand.py new file mode 100644 index 0000000..25400d1 --- /dev/null +++ b/examples/agenthub/openai/responses/use_gptoss_model_ondemand.py @@ -0,0 +1,20 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates using a GPT OSS on-demand model with the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="openai.gpt-oss-120b", + input="What is 2x2?", + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/responses/use_grok_model.py b/examples/agenthub/openai/responses/use_grok_model.py new file mode 100644 index 0000000..9d2cab4 --- /dev/null +++ b/examples/agenthub/openai/responses/use_grok_model.py @@ -0,0 +1,20 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates using a Grok model with the Responses API.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input="What is 2x2?", + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/tools/__init__.py b/examples/agenthub/openai/tools/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/agenthub/openai/tools/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/agenthub/openai/tools/code_interpreter.py b/examples/agenthub/openai/tools/code_interpreter.py new file mode 100644 index 0000000..e749d28 --- /dev/null +++ b/examples/agenthub/openai/tools/code_interpreter.py @@ -0,0 +1,27 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates the code_interpreter tool in AgentHub.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + tools=[ + { + "type": "code_interpreter", + "container": {"type": "auto", "memory_limit": "4g"}, + } + ], + instructions="Write and run code using the python tool to answer the question.", + input="I need to solve the equation 3x + 11 = 14. Can you help me?", + ) + print(response) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/tools/file_search.py b/examples/agenthub/openai/tools/file_search.py new file mode 100644 index 0000000..573a723 --- /dev/null +++ b/examples/agenthub/openai/tools/file_search.py @@ -0,0 +1,28 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates the file_search tool in AgentHub.""" + +from examples.agenthub.openai import common + +VECTOR_STORE_ID = "<>" + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input="What are shapes of OCI GPU?", + tools=[ + { + "type": "file_search", + "vector_store_ids": [VECTOR_STORE_ID], + } + ], + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/tools/function_calling.py b/examples/agenthub/openai/tools/function_calling.py new file mode 100644 index 0000000..22d50d9 --- /dev/null +++ b/examples/agenthub/openai/tools/function_calling.py @@ -0,0 +1,84 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates function calling tools in AgentHub.""" + +import json + +from openai.types.responses import ResponseFunctionToolCall +from openai.types.responses.response_input_param import FunctionCallOutput + +from examples.agenthub.openai import common + +model = "xai.grok-4-1-fast-reasoning" + + +# Define local functions +def get_current_weather(location: str) -> dict: + """Mock weather function.""" + return { + "location": location, + "temperature": "72", + "unit": "fahrenheit", + "forecast": ["sunny", "windy"], + } + + +def main(): + client = common.build_agenthub_client() + + # Define function tool schema + function_tools = [ + { + "type": "function", + "name": "get_current_weather", + "description": "Get current weather for a given location.", + "strict": True, + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City and country e.g. Bogota, Colombia", + } + }, + "required": ["location"], + "additionalProperties": False, + }, + } + ] + + # First API request - model decides to call the function + response = client.responses.create( + model=model, + input="What is the weather in Seattle?", + tools=function_tools, + ) + print("First response:", response.output) + + # If the model requested a function call, execute it and send the result back + if isinstance(response.output[0], ResponseFunctionToolCall): + tool_call = response.output[0] + function_args = json.loads(tool_call.arguments) + + # Execute the local function + result = get_current_weather(**function_args) + + # Second API request - send the function output back to the model + response = client.responses.create( + model=model, + input=[ + FunctionCallOutput( + type="function_call_output", + call_id=tool_call.call_id, + output=json.dumps(result), + ) + ], + previous_response_id=response.id, + tools=function_tools, + ) + print("Final response:", response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/tools/image_generation.py b/examples/agenthub/openai/tools/image_generation.py new file mode 100644 index 0000000..49fa1c3 --- /dev/null +++ b/examples/agenthub/openai/tools/image_generation.py @@ -0,0 +1,38 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates image generation tooling in AgentHub.""" + +import base64 + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="openai.gpt-5.2", + input="Generate an image of gray tabby cat hugging an otter with an orange scarf", + tools=[{"type": "image_generation"}], + store=False, + stream=False, + ) + + # Save the generated image to a file + image_data = [ + output.result for output in response.output if output.type == "image_generation_call" + ] + + if image_data: + image_base64 = image_data[0] + with open("generated_image.png", "wb") as f: + f.write(base64.b64decode(image_base64)) + print("Image saved to generated_image.png") + else: + print("No image was generated.") + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/tools/multiple_tools.py b/examples/agenthub/openai/tools/multiple_tools.py new file mode 100644 index 0000000..869b459 --- /dev/null +++ b/examples/agenthub/openai/tools/multiple_tools.py @@ -0,0 +1,47 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates using multiple tools in a single request.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response_stream = client.responses.create( + model="openai.gpt-5.1", + tools=[ + {"type": "web_search"}, + { + "type": "mcp", + "server_label": "gitmcp", + "server_url": "https://gitmcp.io/openai/tiktoken", + "allowed_tools": [ + "search_tiktoken_documentation", + "fetch_tiktoken_documentation", + ], + "require_approval": "never", + }, + { + "type": "mcp", + "server_label": "dmcp", + "server_description": "A Dungeons and Dragons MCP server to" + " assist with dice rolling.", + "server_url": "https://mcp.deepwiki.com/mcp", + "require_approval": "never", + }, + ], + input="What are top news in Seattle today? How does tiktoken work? Roll 2d4+1.", + stream=True, + ) + + for event in response_stream: + if event.type == "response.output_text.delta": + print(event.delta, end="", flush=True) + + print() + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/tools/remote_mcp.py b/examples/agenthub/openai/tools/remote_mcp.py new file mode 100644 index 0000000..0b5bd66 --- /dev/null +++ b/examples/agenthub/openai/tools/remote_mcp.py @@ -0,0 +1,36 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates calling a remote MCP tool.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response_stream = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + tools=[ + { + "type": "mcp", + "server_label": "dmcp", + "server_description": "A Dungeons and Dragons MCP server to " + "assist with dice rolling.", + "server_url": "https://mcp.deepwiki.com/mcp", + "require_approval": "never", + }, + ], + input="Roll 2d4+1", + stream=True, + ) + + for event in response_stream: + if event.type == "response.output_text.delta": + print(event.delta, end="", flush=True) + + print() + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/tools/remote_mcp_approval_flow.py b/examples/agenthub/openai/tools/remote_mcp_approval_flow.py new file mode 100644 index 0000000..2d2a5d8 --- /dev/null +++ b/examples/agenthub/openai/tools/remote_mcp_approval_flow.py @@ -0,0 +1,60 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates an approval flow for remote MCP tools.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + # First API request - Ask the model to call the MCP server, + # and requires your approval to execute the tool call + response1 = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + tools=[ + { + "type": "mcp", + "server_label": "deepwiki", + "server_url": "https://mcp.deepwiki.com/mcp", + "require_approval": "always", + }, + ], + input="please tell me structure about facebook/react", + ) + print(response1.output) + + # Find the MCP approval request in the response + approval_request = next( + (item for item in response1.output if item.type == "mcp_approval_request"), None + ) + if not approval_request: + raise ValueError("No MCP approval request found in response") + + # Build your MCP approval response + approval_response = { + "type": "mcp_approval_response", + "approval_request_id": approval_request.id, + "approve": True, + } + + # Second APIrequest - Send the MCP approval response back to the model + response2 = client.responses.create( + model="xai.grok-4-1-fast-reasoning", + input=[approval_response], + tools=[ + { + "type": "mcp", + "server_label": "deepwiki", # this must match the server_label + "server_url": "https://mcp.deepwiki.com/mcp", + "require_approval": "always", + } + ], + previous_response_id=response1.id, + ) + print(response2.output) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/tools/web_search.py b/examples/agenthub/openai/tools/web_search.py new file mode 100644 index 0000000..dcd2fb8 --- /dev/null +++ b/examples/agenthub/openai/tools/web_search.py @@ -0,0 +1,21 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates the web_search tool in AgentHub.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response = client.responses.create( + model="openai.gpt-5.1", + tools=[{"type": "web_search"}], + input="What was a positive news story on 2025-11-14?", + ) + print(response.output_text) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/tools/web_search_streaming.py b/examples/agenthub/openai/tools/web_search_streaming.py new file mode 100644 index 0000000..8234a08 --- /dev/null +++ b/examples/agenthub/openai/tools/web_search_streaming.py @@ -0,0 +1,24 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates streaming results from the web_search tool.""" + +from examples.agenthub.openai import common + + +def main(): + client = common.build_agenthub_client() + + response_stream = client.responses.create( + model="openai.gpt-5.1", + tools=[{"type": "web_search"}], + input="What was a positive news story on 2026-03-06?", + stream=True, + ) + + for event in response_stream: + print(event) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/vector_stores/__init__.py b/examples/agenthub/openai/vector_stores/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/agenthub/openai/vector_stores/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/agenthub/openai/vector_stores/vector_store_file_batches_crud.py b/examples/agenthub/openai/vector_stores/vector_store_file_batches_crud.py new file mode 100644 index 0000000..976dba4 --- /dev/null +++ b/examples/agenthub/openai/vector_stores/vector_store_file_batches_crud.py @@ -0,0 +1,46 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates CRUD operations for vector store file batches.""" + +from examples.agenthub.openai import common + +VECTOR_STORE_ID = "<>" + + +def main(): + client = common.build_agenthub_client() + with open("../files/sample_doc.pdf", "rb") as f1, open("../files/sample_doc.pdf", "rb") as f2: + file_1 = client.files.create( + file=f1, + purpose="user_data", + ) + file_2 = client.files.create( + file=f2, + purpose="user_data", + ) + # Create a batch with file IDs and shared attributes + batch_result = client.vector_stores.file_batches.create( + vector_store_id=VECTOR_STORE_ID, + file_ids=[file_1.id, file_2.id], + attributes={"category": "history"}, + ) + print("Created batch:", batch_result) + + # Retrieve batch status + retrieve_result = client.vector_stores.file_batches.retrieve( + vector_store_id=VECTOR_STORE_ID, + batch_id=batch_result.id, + ) + print("\nBatch status:", retrieve_result) + + # List files in a batch + list_result = client.vector_stores.file_batches.list_files( + vector_store_id=VECTOR_STORE_ID, + batch_id=batch_result.id, + ) + print("\nBatch files:", list_result) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/vector_stores/vector_store_files_crud.py b/examples/agenthub/openai/vector_stores/vector_store_files_crud.py new file mode 100644 index 0000000..0a39177 --- /dev/null +++ b/examples/agenthub/openai/vector_stores/vector_store_files_crud.py @@ -0,0 +1,60 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates CRUD operations for vector store files.""" + +from examples.agenthub.openai import common + +VECTOR_STORE_ID = "<>" + + +def main(): + client = common.build_agenthub_client() + # Create the file + with open("../files/sample_doc.pdf", "rb") as f: + file = client.files.create( + file=f, + purpose="user_data", + ) + print(f"Created a File: {file.id}, now waiting for it to get processed") + client.files.wait_for_processing(file.id) + + # Add a file to a vector store + create_result = client.vector_stores.files.create( + vector_store_id=VECTOR_STORE_ID, + file_id=file.id, + attributes={"category": "history"}, + ) + print("\nCreated vector store file:", create_result) + + # List vector store files + list_result = client.vector_stores.files.list( + vector_store_id=VECTOR_STORE_ID, + ) + print("\nFiles:", list_result) + + # Retrieve vector store file + retrieve_result = client.vector_stores.files.retrieve( + vector_store_id=VECTOR_STORE_ID, + file_id=file.id, + ) + print("\nRetrieved:", retrieve_result) + + # Update vector store file attributes + update_result = client.vector_stores.files.update( + vector_store_id=VECTOR_STORE_ID, + file_id=file.id, + attributes={"category": "history", "period": "medieval"}, + ) + print("\nUpdated:", update_result) + + # Delete vector store file (removes parsed content, not the original file) + delete_result = client.vector_stores.files.delete( + vector_store_id=VECTOR_STORE_ID, + file_id=file.id, + ) + print("\nDeleted:", delete_result) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/vector_stores/vector_stores_crud.py b/examples/agenthub/openai/vector_stores/vector_stores_crud.py new file mode 100644 index 0000000..9f1343e --- /dev/null +++ b/examples/agenthub/openai/vector_stores/vector_stores_crud.py @@ -0,0 +1,51 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Vector Stores API examples - create, list, retrieve, update, search, and delete.""" + +from examples.agenthub.openai import common + + +def main(): + cp_client = common.build_agenthub_cp_client() + # Create a vector store + vector_store = cp_client.vector_stores.create( + name="OCI Support FAQ Vector Store", + description="My vector store for supporting customer queries", + expires_after={ + "anchor": "last_active_at", + "days": 30, + }, + metadata={ + "topic": "oci", + }, + ) + print("Created vector store:", vector_store.id) + + # List vector stores + list_result = cp_client.vector_stores.list(limit=20, order="desc") + print("\nVector stores:", list_result) + + # Retrieve vector store + retrieve_result = cp_client.vector_stores.retrieve( + vector_store_id=vector_store.id, + ) + print("\nRetrieved:", retrieve_result) + + # Update vector store + update_result = cp_client.vector_stores.update( + vector_store_id=vector_store.id, + name="Updated Demo Vector Store", + metadata={"category": "history", "period": "medieval"}, + ) + print("\nUpdated:", update_result) + + # Delete vector store + delete_result = cp_client.vector_stores.delete( + vector_store_id=vector_store.id, + ) + print("\nDeleted:", delete_result) + + +if __name__ == "__main__": + main() diff --git a/examples/agenthub/openai/vector_stores/vector_stores_search.py b/examples/agenthub/openai/vector_stores/vector_stores_search.py new file mode 100644 index 0000000..19f548a --- /dev/null +++ b/examples/agenthub/openai/vector_stores/vector_stores_search.py @@ -0,0 +1,81 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates searching vector stores.""" + +import time + +from examples.agenthub.openai import common + + +def main(): + dp_client = common.build_agenthub_client() + with open("../files/sample_doc.pdf", "rb") as f: + # Upload a file using the Files API + file = dp_client.files.create( + file=f, + purpose="user_data", + ) + print(f"Uploaded file:{file.id}, waiting for it to be processed") + # dp_client.files.wait_for_processing(file.id) + + cp_client = common.build_agenthub_cp_client() + # Create a vector store + vector_store = cp_client.vector_stores.create( + name="OCI Support FAQ Vector Store", + description="My vector store for supporting customer queries", + expires_after={ + "anchor": "last_active_at", + "days": 30, + }, + ) + print("Created vector store:", vector_store.id) + + # Wait for vector store resource to be in the "completed" state + while True: + vector_store = cp_client.vector_stores.retrieve(vector_store_id=vector_store.id) + print("Vector store status:", vector_store.status) + if vector_store.status == "completed": + break + else: + time.sleep(5) + + # Wait a few more seconds after completed state for the vector store to be fully activated + time.sleep(10) + + # Add a file to a vector store using the Vector Store Files API + create_result = dp_client.vector_stores.files.create( + vector_store_id=vector_store.id, + file_id=file.id, + attributes={"category": "docfiles"}, + ) + print("Created vector store file:", create_result) + + while True: + file_status = dp_client.vector_stores.files.retrieve( + vector_store_id=vector_store.id, + file_id=file.id, + ) + print("Vector store file status:", file_status.status) + if file_status.status == "completed": + break + else: + time.sleep(3) + + # Now the vector store file is indexed, we can search the vector store by a query term + search_results_page = dp_client.vector_stores.search( + vector_store_id=vector_store.id, + query="OCI GenAI Auth", + max_num_results=10, + ) + print("\nSearch results page:", search_results_page) + + if search_results_page.data: + for page_data in search_results_page.data: + print("\nSearch results page data:", page_data) + else: + print("\nNo search results found") + + +if __name__ == "__main__": + main() diff --git a/examples/fc_tools.py b/examples/fc_tools.py new file mode 100644 index 0000000..ea9966b --- /dev/null +++ b/examples/fc_tools.py @@ -0,0 +1,121 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates the Fc tools example.""" + +from openai.types.responses.function_tool_param import FunctionToolParam + + +def get_stock_price(symbol: str) -> dict: + """Get the current stock price for a given symbol.""" + print(f"Fetching stock price for {symbol}...") + + # Mock stock data + return { + "symbol": symbol.upper(), + "price": 175.42 if symbol.upper() == "AAPL" else 198.76, + "currency": "USD", + "change": 2.34 if symbol.upper() == "AAPL" else -1.23, + "change_percent": 1.35 if symbol.upper() == "AAPL" else -0.62, + "last_updated": "2025-06-26T15:00:00Z", + } + + +def get_current_weather(location: str, unit: str = "fahrenheit") -> dict: + """Get current weather for a given location. + + Args: + location: The city and state, e.g., San Francisco, CA + unit: The unit of temperature (celsius or fahrenheit) + + Returns: + dict: Weather information + """ + # In a real application, you would call a weather API here + # This is a mock implementation for demonstration + print(f"Fetching weather for {location} in {unit}...") + + # Mock weather data + weather_data = { + "location": location, + "temperature": "72", + "unit": "fahrenheit", + "forecast": ["sunny", "windy"], + "humidity": "65%", + "description": "Sunny with a gentle breeze", + } + + return weather_data + + +def recommend_clothing(temperature, unit="fahrenheit"): + """ + Returns clothing recommendations based on input temperature and unit. + + Parameters: + temperature (float or int): The temperature value to base the recommendation on. + unit (str, optional): The unit of the temperature value. + Can be 'fahrenheit' or 'celsius'. Defaults to 'fahrenheit'. + """ + # Convert to Fahrenheit if input is in Celsius + if unit.lower() == "celsius": + temperature = temperature * 9 / 5 + 32 + + if temperature >= 80: + return "It's hot! Wear shorts and a t-shirt." + elif temperature >= 65: + return "It's warm. A short-sleeve shirt and pants are fine." + elif temperature >= 50: + return "It's a bit chilly. Wear a light jacket or sweater." + elif temperature >= 32: + return "It's cold. Wear a coat, sweater, and possibly a hat." + else: + return "It's freezing! Dress warmly in layers, including a winter coat, gloves, and a hat." + + +fc_tools = [ + FunctionToolParam( + name="get_current_weather", + strict=True, + parameters={ + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City and country e.g. Bogotá, Colombia", + } + }, + "required": ["location"], + "additionalProperties": False, + }, + type="function", + description="Get current weather for a given location.", + ), + FunctionToolParam( + name="recommend_clothing", + strict=True, + parameters={ + "type": "object", + "properties": { + "temperature": { + "type": "integer", + "description": "The temperature value to base the recommendation on.", + } + }, + "required": ["temperature"], + "additionalProperties": False, + }, + type="function", + description="Returns clothing recommendations based on input temperature and unit.", + ), +] + + +def execute_function_call(function_name, function_args): + # Call the function + if function_name == "get_current_weather": + return get_current_weather(**function_args) + elif function_name == "recommend_clothing": + return recommend_clothing(**function_args) + else: + return None diff --git a/examples/partner/__init__.py b/examples/partner/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/partner/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/partner/openai/__init__.py b/examples/partner/openai/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/examples/partner/openai/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/examples/partner/openai/basic_chat_completion.py b/examples/partner/openai/basic_chat_completion.py new file mode 100644 index 0000000..da0e4a4 --- /dev/null +++ b/examples/partner/openai/basic_chat_completion.py @@ -0,0 +1,29 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates a basic chat completion request for the Partner (pass-through) endpoint.""" + +from rich import print + +from examples.partner.openai import common + +MODEL = "openai.gpt-4.1" + + +def main(): + openai_client = common.build_openai_pt_client() + + completion = openai_client.chat.completions.create( + model=MODEL, + messages=[ + {"role": "system", "content": "You are a concise assistant."}, + {"role": "user", "content": "List three creative uses for a paperclip."}, + ], + max_tokens=128, + ) + + print(completion.model_dump_json(indent=2)) + + +if __name__ == "__main__": + main() diff --git a/examples/partner/openai/basic_chat_completion_api_key.py b/examples/partner/openai/basic_chat_completion_api_key.py new file mode 100644 index 0000000..02f6fff --- /dev/null +++ b/examples/partner/openai/basic_chat_completion_api_key.py @@ -0,0 +1,37 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates the Basic chat completion api key example.""" + +import os + +from openai import OpenAI + +MODEL = "openai.gpt-4.1" + + +def main() -> None: + client = OpenAI( + api_key=os.getenv("OPENAI_API_KEY"), + base_url="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com/openai/v1", + ) + + response = client.chat.completions.create( + model=MODEL, + messages=[ + { + "role": "system", + "content": "You are a concise assistant who answers in one paragraph.", + }, + { + "role": "user", + "content": "Explain why the sky is blue as if you were a physics teacher.", + }, + ], + ) + + print(response.choices[0].message.content) + + +if __name__ == "__main__": + main() diff --git a/examples/partner/openai/common.py b/examples/partner/openai/common.py new file mode 100644 index 0000000..874b325 --- /dev/null +++ b/examples/partner/openai/common.py @@ -0,0 +1,39 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Partner (pass-through) example clients and configuration.""" + +from __future__ import annotations + +import httpx +from openai import AsyncOpenAI, OpenAI + +from oci_genai_auth import OciSessionAuth + +PROFILE_NAME = "DEFAULT" +COMPARTMENT_ID = "<>" +REGION = "us-chicago-1" + +PARTNER_OPENAI_BASE_URL = f"https://inference.generativeai.{REGION}.oci.oraclecloud.com/v1" + + +def build_openai_client() -> OpenAI: + client_kwargs = { + "api_key": "not-used", + "base_url": PARTNER_OPENAI_BASE_URL, + "http_client": httpx.Client(auth=OciSessionAuth(profile_name=PROFILE_NAME)), + } + if COMPARTMENT_ID: + client_kwargs["default_headers"] = {"opc-compartment-id": COMPARTMENT_ID} + return OpenAI(**client_kwargs) + + +def build_openai_async_client() -> AsyncOpenAI: + client_kwargs = { + "api_key": "not-used", + "base_url": PARTNER_OPENAI_BASE_URL, + "http_client": httpx.AsyncClient(auth=OciSessionAuth(profile_name=PROFILE_NAME)), + } + if COMPARTMENT_ID: + client_kwargs["default_headers"] = {"opc-compartment-id": COMPARTMENT_ID} + return AsyncOpenAI(**client_kwargs) diff --git a/examples/partner/openai/quickstart_openai_chat_completions.py b/examples/partner/openai/quickstart_openai_chat_completions.py new file mode 100755 index 0000000..8da3e94 --- /dev/null +++ b/examples/partner/openai/quickstart_openai_chat_completions.py @@ -0,0 +1,46 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates a simple openai chat completions example.""" + +import logging + +from examples.partner.openai import common + +logging.basicConfig(level=logging.DEBUG) + + +def main(): + client = common.build_openai_pt_client() + model = "meta.llama-4-scout-17b-16e-instruct" + + completion = client.chat.completions.create( + model="openai.gpt-4.1", + messages=[ + { + "role": "user", + "content": "How do I output all files in a directory using Python?", + }, + ], + ) + print(completion.model_dump_json()) + + # Process the stream + print("=" * 80) + print("Process in streaming mode") + streaming = client.chat.completions.create( + model=model, + messages=[ + { + "role": "user", + "content": "How do I output all files in a directory using Python?", + }, + ], + stream=True, + ) + for chunk in streaming: + print(chunk) + + +if __name__ == "__main__": + main() diff --git a/examples/partner/openai/streaming_chat_completion.py b/examples/partner/openai/streaming_chat_completion.py new file mode 100644 index 0000000..8a3065b --- /dev/null +++ b/examples/partner/openai/streaming_chat_completion.py @@ -0,0 +1,38 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates streaming chat completion responses for the Partner (pass-through) endpoint.""" + +from examples.partner.openai import common + +MODEL = "openai.gpt-4.1" + + +def main(): + openai_client = common.build_openai_pt_client() + + stream = openai_client.chat.completions.create( + model=MODEL, + messages=[ + { + "role": "system", + "content": "You are a concise assistant who answers in one paragraph.", + }, + { + "role": "user", + "content": "Explain why the sky is blue as if you were a physics teacher.", + }, + ], + stream=True, + ) + + for chunk in stream: + for choice in chunk.choices: + delta = choice.delta + if delta.content: + print(delta.content, end="", flush=True) + print() + + +if __name__ == "__main__": + main() diff --git a/examples/partner/openai/tool_call_chat_completion.py b/examples/partner/openai/tool_call_chat_completion.py new file mode 100644 index 0000000..fab4cdb --- /dev/null +++ b/examples/partner/openai/tool_call_chat_completion.py @@ -0,0 +1,94 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +"""Demonstrates tool calling with chat completions for the Partner (pass-through) endpoint.""" + +import json +from typing import Dict + +from examples.partner.openai import common + +MODEL = "openai.gpt-4.1" + + +def get_current_weather(location: str, unit: str = "fahrenheit") -> Dict[str, str]: + # Simple stand-in for a real weather lookup. + return { + "location": location, + "temperature": "72", + "unit": unit, + "forecast": ["sunny", "windy"], + } + + +def main(): + openai_client = common.build_openai_pt_client() + + messages = [ + { + "role": "user", + "content": "What is the weather like in Boston and San Francisco?", + } + ] + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather for a specific location.", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City and state, for example Boston, MA.", + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "Temperature unit to use in the response.", + }, + }, + "required": ["location"], + }, + }, + } + ] + + first_response = openai_client.chat.completions.create( + model=MODEL, + messages=messages, + tools=tools, + tool_choice="auto", + ) + first_choice = first_response.choices[0] + + if first_choice.finish_reason == "tool_calls": + call_message = first_choice.message + new_messages = messages + [call_message] + for tool_call in call_message.tool_calls: + args = json.loads(tool_call.function.arguments) + tool_result = get_current_weather( + location=args.get("location", ""), + unit=args.get("unit", "fahrenheit"), + ) + new_messages.append( + { + "role": "tool", + "name": tool_call.function.name, + "tool_call_id": tool_call.id, + "content": json.dumps(tool_result), + } + ) + + follow_up = openai_client.chat.completions.create( + model=MODEL, + messages=new_messages, + ) + print(follow_up.choices[0].message.content) + else: + print(first_choice.message.content) + + +if __name__ == "__main__": + main() diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..4c9a26c --- /dev/null +++ b/mypy.ini @@ -0,0 +1,6 @@ +[mypy] +ignore_missing_imports = True +no_strict_optional = True + +# Suppress specific error codes that are false positives due to complex union types +disable_error_code = union-attr,index,import-untyped,arg-type diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..0a1021f --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,116 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "oci-genai-auth" +dynamic = ["version"] +description = 'OCI authentication and authorization utilities for Generative AI SDKs' +readme = "README.md" +requires-python = ">=3.9" +license = "UPL-1.0" +license-files = ["LICENSE.txt"] +keywords = [ + "Oracle", + "Oracle Cloud Infrastructure", + "OCI", + "Artificial Intelligence", + "AI", + "GenAI", + "Generative AI", +] +authors = [{ name = "Chao Yang", email = "chao.c.yang@oracle.com" }] +classifiers = [ + "Intended Audience :: Developers", + "Programming Language :: Python", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: System :: Benchmark", + "Typing :: Typed", +] +dependencies = [ + "httpx>=0.23.0, <1", + "oci>=2.150.1", + "requests>=2.32.1,<3.0.0", +] + +[project.optional-dependencies] +dev = [ + "ruff>=0.5.6,<0.6.0", + "mypy>=1.11.1,<2.0.0", + "black>=24.8.0,<25.0.0", + "isort>=5.13.2,<6.0.0", + "pytest>=8.3.2,<9.0.0", + "pytest-cov>=5.0.0,<6.0.0", + "respx", + "typing-extensions>=4.11, <5", + "openai>=1.108.1", + "openai-agents>=0.5.1", + "pytest-mock", + "ag2[openai]", + "pytest-httpx", + "pytest-asyncio", + "eval_type_backport", + "rich>=13.0" +] + +[project.urls] +Documentation = "https://github.com/oracle-samples/oci-genai-auth#readme" +Issues = "https://github.com/oracle-samples/oci-genai-auth/issues" +Source = "https://github.com/oracle-samples/oci-genai-auth" + +[tool.hatch.version] +path = "src/oci_genai_auth/__about__.py" + +[tool.hatch.envs.types] +extra-dependencies = [ + "mypy>=1.0.0", +] +[tool.hatch.envs.types.scripts] +check = "mypy --install-types --non-interactive {args:src/oci_genai_auth tests}" + +[tool.coverage.run] +source_pkgs = ["oci_genai_auth", "tests"] +branch = true +parallel = true +omit = [ + "src/oci_genai_auth/__about__.py", +] + +[tool.coverage.paths] +oci_genai_auth = ["src/oci_genai_auth", "*/oci-genai-auth/src/oci_genai_auth"] +tests = ["tests", "*/oci-genai-auth/tests"] + +[tool.coverage.report] +exclude_lines = [ + "no cov", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] + +[tool.ruff] +target-version = "py39" +line-length = 100 +extend-exclude = [ +] + +[tool.ruff.lint] +select = [ + # pycodestyle + "E", + # Pyflakes + "F", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + "G", +] + +[tool.black] +line-length = 100 +target-version = ['py39'] +exclude = '\.venv|build|dist' diff --git a/src/oci_genai_auth/__about__.py b/src/oci_genai_auth/__about__.py new file mode 100644 index 0000000..1a04f1e --- /dev/null +++ b/src/oci_genai_auth/__about__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +__version__ = "1.1.0" diff --git a/src/oci_genai_auth/__init__.py b/src/oci_genai_auth/__init__.py new file mode 100644 index 0000000..1d1716e --- /dev/null +++ b/src/oci_genai_auth/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +from __future__ import annotations + +from .auth import ( + HttpxOciAuth, + OciInstancePrincipalAuth, + OciResourcePrincipalAuth, + OciSessionAuth, + OciUserPrincipalAuth, +) + +__all__ = [ + "HttpxOciAuth", + "OciSessionAuth", + "OciResourcePrincipalAuth", + "OciInstancePrincipalAuth", + "OciUserPrincipalAuth", +] diff --git a/src/oci_genai_auth/auth.py b/src/oci_genai_auth/auth.py new file mode 100644 index 0000000..b19475b --- /dev/null +++ b/src/oci_genai_auth/auth.py @@ -0,0 +1,375 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +from __future__ import annotations + +import logging +import threading +import time +from abc import ABC, abstractmethod +from typing import Any, Generator, Mapping, Optional + +import httpx +import oci +import requests +from oci.config import DEFAULT_LOCATION, DEFAULT_PROFILE +from typing_extensions import TypeAlias, override + +logger = logging.getLogger(__name__) + +OciAuthSigner: TypeAlias = oci.signer.AbstractBaseSigner + + +class HttpxOciAuth(httpx.Auth, ABC): + """ + Enhanced custom HTTPX authentication class that implements OCI request signing + with auto-refresh. + + This class handles the authentication flow for HTTPX requests by signing them + using the OCI Signer, which adds the necessary authentication headers for OCI API calls. + It also provides automatic token refresh functionality for token-based authentication methods. + Attributes: + signer (oci.signer.Signer): The OCI signer instance used for request signing + refresh_interval: Seconds between token refreshes (default: 3600 - 1 hour) + _lock: Threading lock for thread-safe token refresh + _last_refresh: Last refresh timestamp + """ + + def __init__(self, signer: OciAuthSigner, refresh_interval: int = 3600): + """ + Initialize the authentication with a signer and refresh configuration. + Args: + signer: OCI signer instance + refresh_interval: Seconds between token refreshes (default: 3600 - 1 hour) + """ + self.signer = signer + self.refresh_interval = refresh_interval + self._lock = threading.Lock() + self._last_refresh: Optional[float] = time.time() + logger.info( + "Initialized %s with refresh interval: %d seconds", + self.__class__.__name__, + refresh_interval, + ) + + def _should_refresh_token(self) -> bool: + """ + Check if the token should be refreshed based on time interval. + Returns: + bool: True if token should be refreshed, False otherwise + """ + current_time = time.time() + return (current_time - self._last_refresh) >= self.refresh_interval + + @abstractmethod + def _refresh_signer(self) -> None: + """ + Abstract method to refresh the signer. Must be implemented by subclasses. + This method should create a new signer instance with fresh credentials/tokens. + """ + pass + + def _refresh_if_needed(self) -> OciAuthSigner: + """ + Refresh the signer if enough time has passed since last refresh. + This method is thread-safe and will only refresh once per interval. + """ + with self._lock: + if self._should_refresh_token(): + logger.info("Time interval reached, refreshing %s ...", self.__class__.__name__) + try: + self._refresh_signer() + self._last_refresh = time.time() + logger.info("%s token refresh completed successfully", self.__class__.__name__) + except Exception: + logger.exception("Token refresh failed") + return self.signer + + def _sign_request(self, request: httpx.Request, content: bytes, signer: OciAuthSigner) -> None: + """ + Sign the given HTTPX request with the OCI signer using the provided content. + Updates request.headers in place with the signed headers. + """ + # Strip any SDK auth headers to avoid conflicting with OCI signing. + request.headers.pop("Authorization", None) + request.headers.pop("X-Api-Key", None) + req = requests.Request( + method=request.method, + url=str(request.url), + headers=dict(request.headers), + data=content, + ) + prepared_request = req.prepare() + signer.do_request_sign(prepared_request) # type: ignore + request.headers.update(prepared_request.headers) + + @override + def auth_flow(self, request: httpx.Request) -> Generator[httpx.Request, httpx.Response, None]: + """ + Authentication flow for HTTPX requests with automatic retry on 401 errors. + This method: + 1. Checks if token needs refresh and refreshes if necessary + 2. Signs the request using OCI signer + 3. Yields the signed request + 4. If 401 error is received, attempts token refresh and retries once + Args: + request: The HTTPX request to be authenticated + Yields: + httpx.Request: The authenticated request + """ + # Check and refresh token if needed + signer = self._refresh_if_needed() + + # Read the request content to handle streaming requests properly + try: + content = request.content + except httpx.RequestNotRead: + # For streaming requests, we need to read the content first + content = request.read() + + self._sign_request(request, content, signer) + + response = yield request + + # If we get a 401 (Unauthorized), try refreshing the token once and retry + if response.status_code == 401: + logger.info("Received 401 Unauthorized, attempting token refresh and retry...") + with self._lock: + try: + self._refresh_signer() + self._last_refresh = time.time() + signer = self.signer + self._sign_request(request, content, signer) + yield request + except Exception: + logger.exception("Token refresh on 401 failed") + + +class OciSessionAuth(HttpxOciAuth): + """ + OCI authentication implementation using session-based authentication. + + This class implements OCI authentication using a session token and private key + loaded from the OCI configuration file. It's suitable for interactive user sessions. + + Attributes: + signer (oci.auth.signers.SecurityTokenSigner): OCI signer using session token + """ + + def __init__( + self, + config_file: str = DEFAULT_LOCATION, + profile_name: str = DEFAULT_PROFILE, + refresh_interval: int = 3600, + **kwargs: Any, + ): + """ + Initialize a Security Token-based OCI signer. + + Parameters + ---------- + config_file : str, optional + Path to the OCI configuration file. Defaults to `~/.oci/config`. + profile_name : str, optional + Profile name inside the OCI configuration file to use. + Defaults to "DEFAULT". + refresh_interval: int, optional + Seconds between token refreshes (default: 3600 - 1 hour) + **kwargs : Mapping[str, Any] + Optional keyword arguments: + - `generic_headers`: Optional[Dict[str, str]] + Headers to be used for generic requests. + Default: `["date", "(request-target)", "host"]` + - `body_headers`: Optional[Dict[str, str]] + Headers to be used for signed request bodies. + Default: `["content-length", "content-type", "x-content-sha256"]` + + Raises + ------ + oci.exceptions.ConfigFileNotFound + If the configuration file cannot be found. + KeyError + If a required key such as `"key_file"` is missing in the config. + Exception + For any other initialization errors. + """ + # Load OCI configuration and token + self.config_file = config_file + self.profile_name = profile_name + config = oci.config.from_file(config_file, profile_name) + token = self._load_token(config) + + # Load the private key from config + key_path = config.get("key_file") + if not key_path: + raise KeyError(f"Missing 'key_file' entry in OCI config profile '{profile_name}'.") + private_key = self._load_private_key(config) + + # Optional signer header customization + generic_headers = kwargs.pop("generic_headers", None) + body_headers = kwargs.pop("body_headers", None) + + additional_kwargs = {} + if generic_headers: + additional_kwargs["generic_headers"] = generic_headers + if body_headers: + additional_kwargs["body_headers"] = body_headers + + self.additional_kwargs = additional_kwargs + signer = oci.auth.signers.SecurityTokenSigner(token, private_key, **self.additional_kwargs) + super().__init__(signer=signer, refresh_interval=refresh_interval) + + def _load_token(self, config: Mapping[str, Any]) -> str: + """ + Load session token from file specified in configuration. + Args: + config: OCI configuration dictionary + Returns: + str: Session token content + """ + token_file = config["security_token_file"] + with open(token_file, "r") as f: + return f.read().strip() + + def _load_private_key(self, config: Any) -> str: + """ + Load private key from file specified in configuration. + Args: + config: OCI configuration dictionary + Returns: + Private key object + """ + return oci.signer.load_private_key_from_file(config["key_file"]) + + def _refresh_signer(self) -> None: + """ + Refresh the session signer by reloading token and private key. + This method creates a new SecurityTokenSigner with fresh credentials + loaded from the configuration files. + """ + # Reload configuration in case it has changed + config = oci.config.from_file(self.config_file, self.profile_name) + token = self._load_token(config) + private_key = self._load_private_key(config) + self.signer = oci.auth.signers.SecurityTokenSigner( + token, private_key, **self.additional_kwargs + ) + + +class OciResourcePrincipalAuth(HttpxOciAuth): + """ + OCI authentication implementation using Resource Principal authentication with auto-refresh. + + This class implements OCI authentication using Resource Principal credentials, + which is suitable for services running within OCI (like Functions, Container Instances) + that need to access other OCI services. The resource principal token is automatically + refreshed at specified intervals. + """ + + def __init__(self, refresh_interval: int = 3600, **kwargs: Any) -> None: + """ + Initialize resource principal authentication. + Args: + refresh_interval: Seconds between token refreshes (default: 3600 - 1 hour) + **kwargs: Additional arguments passed to the resource principal signer + """ + self.kwargs = kwargs + signer = oci.auth.signers.get_resource_principals_signer(**kwargs) + super().__init__(signer=signer, refresh_interval=refresh_interval) + + def _refresh_signer(self) -> None: + """ + Refresh the resource principal signer. + This method creates a new resource principal signer which will + automatically fetch fresh credentials from the OCI metadata service. + """ + self.signer = oci.auth.signers.get_resource_principals_signer(**self.kwargs) + + +class OciInstancePrincipalAuth(HttpxOciAuth): + """ + OCI authentication implementation using Instance Principal authentication with auto-refresh. + + This class implements OCI authentication using Instance Principal credentials, + which is suitable for compute instances that need to access OCI services. + The instance principal token is automatically refreshed at specified intervals. + """ + + def __init__(self, refresh_interval: int = 3600, **kwargs) -> None: # noqa: ANN003 + """ + Initialize instance principal authentication. + Args: + refresh_interval: Seconds between token refreshes (default: 3600 - 1 hour) + **kwargs: Additional arguments passed to InstancePrincipalsSecurityTokenSigner + """ + self.kwargs = kwargs + signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner(**kwargs) + super().__init__(signer=signer, refresh_interval=refresh_interval) + + def _refresh_signer(self) -> None: + """ + Refresh the instance principal signer. + This method creates a new InstancePrincipalsSecurityTokenSigner which will + automatically fetch fresh credentials from the OCI metadata service. + """ + self.signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner(**self.kwargs) + + +class OciUserPrincipalAuth(HttpxOciAuth): + """ + OCI authentication implementation using user principal authentication with auto-refresh. + + This class implements OCI authentication using API Key credentials loaded from + the OCI configuration file. It's suitable for programmatic access to OCI services. + Since API key authentication doesn't use tokens that expire, this class doesn't + need frequent refresh but supports configuration reload at specified intervals. + Attributes: + config_file (str): Path to OCI configuration file + profile_name (str): Profile name in the configuration file + config (dict): OCI configuration dictionary + """ + + def __init__( + self, + config_file: str = DEFAULT_LOCATION, + profile_name: str = DEFAULT_PROFILE, + refresh_interval: int = 3600, + ) -> None: + """ + Initialize user principal authentication. + Args: + config_file: Path to OCI config file (default: ~/.oci/config) + profile_name: Profile name to use (default: DEFAULT) + refresh_interval: Seconds between config reloads (default: 3600 - 1 hour) + """ + self.config_file = config_file + self.profile_name = profile_name + self.config = oci.config.from_file(config_file, profile_name) + oci.config.validate_config(self.config) + signer = oci.signer.Signer( + tenancy=self.config["tenancy"], + user=self.config["user"], + fingerprint=self.config["fingerprint"], + private_key_file_location=self.config.get("key_file"), + pass_phrase=oci.config.get_config_value_or_default(self.config, "pass_phrase"), + private_key_content=self.config.get("key_content"), + ) + super().__init__(signer=signer, refresh_interval=refresh_interval) + + def _refresh_signer(self) -> None: + """ + Refresh the user principal signer. + For API key authentication, this recreates the signer with the same credentials. + This is mainly useful if the configuration file has been updated. + """ + # Reload configuration in case it has changed + self.config = oci.config.from_file(self.config_file, self.profile_name) + oci.config.validate_config(self.config) + self.signer = oci.signer.Signer( + tenancy=self.config["tenancy"], + user=self.config["user"], + fingerprint=self.config["fingerprint"], + private_key_file_location=self.config.get("key_file"), + pass_phrase=oci.config.get_config_value_or_default(self.config, "pass_phrase"), + private_key_content=self.config.get("key_content"), + ) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..b38e643 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..06fcc1e --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,19 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +import os + +import pytest + + +@pytest.fixture(autouse=True, scope="session") +def _disable_openai_agents_tracing(): + # Prevent OpenAI Agents tracing from emitting external HTTP requests during tests. + os.environ.setdefault("OPENAI_AGENTS_DISABLE_TRACING", "true") + try: + from agents.tracing import set_tracing_disabled + except (ImportError, ModuleNotFoundError): + yield + return + set_tracing_disabled(True) + yield diff --git a/tests/test_auth.py b/tests/test_auth.py new file mode 100644 index 0000000..f9e8784 --- /dev/null +++ b/tests/test_auth.py @@ -0,0 +1,163 @@ +# Copyright (c) 2026 Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +from __future__ import annotations + +from unittest.mock import patch + +import httpx + +from oci_genai_auth.auth import ( + HttpxOciAuth, + OciInstancePrincipalAuth, + OciResourcePrincipalAuth, + OciSessionAuth, + OciUserPrincipalAuth, +) + + +class _DummySigner: + def __init__(self, token: str) -> None: + self.token = token + + def do_request_sign(self, prepared_request) -> None: # noqa: ANN001 + prepared_request.headers["authorization"] = self.token + + +class _DummyAuth(HttpxOciAuth): + def __init__(self, signer: _DummySigner, refresh_interval: int = 3600) -> None: + self.refresh_calls = 0 + super().__init__(signer=signer, refresh_interval=refresh_interval) + + def _refresh_signer(self) -> None: + self.refresh_calls += 1 + self.signer = _DummySigner(f"signed-{self.refresh_calls}") + + +class _BrokenRefreshAuth(HttpxOciAuth): + def _refresh_signer(self) -> None: + raise ConnectionError("metadata service unreachable") + + +def test_auth_flow_signs_request(): + auth = _DummyAuth(_DummySigner("signed-0")) + request = httpx.Request( + "GET", + "https://example.com?foo=bar", + headers={ + "Authorization": "Bearer test", + "X-Api-Key": "api-key", + }, + ) + flow = auth.auth_flow(request) + signed_request = next(flow) + assert signed_request.headers["authorization"] == "signed-0" + assert "x-api-key" not in signed_request.headers + assert signed_request.url.params.get("foo") == "bar" + + +def test_auth_flow_refreshes_on_401(): + auth = _DummyAuth(_DummySigner("signed-0")) + request = httpx.Request("GET", "https://example.com") + flow = auth.auth_flow(request) + signed_request = next(flow) + response = httpx.Response(401, request=signed_request) + retry_request = flow.send(response) + assert auth.refresh_calls == 1 + assert retry_request.headers["authorization"] == "signed-1" + + +def test_refresh_if_needed_calls_refresh_signer(): + auth = _DummyAuth(_DummySigner("signed-0"), refresh_interval=0) + auth._refresh_if_needed() + assert auth.refresh_calls == 1 + + +def test_refresh_failure_does_not_break_auth_flow(caplog): + auth = _BrokenRefreshAuth(_DummySigner("signed-0"), refresh_interval=0) + request = httpx.Request("GET", "https://example.com") + + with caplog.at_level("ERROR"): + flow = auth.auth_flow(request) + signed_request = next(flow) + + assert signed_request.headers["authorization"] == "signed-0" + assert any("Token refresh failed" in record.message for record in caplog.records) + + +def test_session_auth_initializes_signer_from_config(): + config = { + "key_file": "dummy.key", + "security_token_file": "dummy.token", + "tenancy": "dummy_tenancy", + "user": "dummy_user", + "fingerprint": "dummy_fingerprint", + } + with ( + patch("oci.config.from_file", return_value=config), + patch("oci.signer.load_private_key_from_file", return_value="dummy_private_key"), + patch("oci.auth.signers.SecurityTokenSigner") as mock_signer, + patch("builtins.open", create=True) as mock_open, + ): + mock_open.return_value.__enter__.return_value.read.return_value = "dummy_token" + auth = OciSessionAuth( + profile_name="DEFAULT", + generic_headers=["date"], + body_headers=["content-length"], + ) + + mock_signer.assert_called_once_with( + "dummy_token", + "dummy_private_key", + generic_headers=["date"], + body_headers=["content-length"], + ) + assert auth.signer == mock_signer.return_value + + +def test_user_principal_auth_uses_signer_from_config(): + config = { + "key_file": "dummy.key", + "tenancy": "dummy_tenancy", + "user": "dummy_user", + "fingerprint": "dummy_fingerprint", + } + with ( + patch("oci.config.from_file", return_value=config), + patch("oci.config.validate_config", return_value=True), + patch("oci.signer.Signer") as mock_signer, + ): + auth = OciUserPrincipalAuth(profile_name="DEFAULT") + + mock_signer.assert_called_once() + assert auth.signer == mock_signer.return_value + + +def test_resource_principal_refreshes_signer(): + with patch( + "oci.auth.signers.get_resource_principals_signer", return_value="signer-1" + ) as mock_signer: + auth = OciResourcePrincipalAuth() + assert auth.signer == "signer-1" + mock_signer.assert_called_once() + + mock_signer.reset_mock() + mock_signer.return_value = "signer-2" + auth._refresh_signer() + mock_signer.assert_called_once() + assert auth.signer == "signer-2" + + +def test_instance_principal_refreshes_signer(): + with patch( + "oci.auth.signers.InstancePrincipalsSecurityTokenSigner", return_value="signer-1" + ) as mock_signer: + auth = OciInstancePrincipalAuth() + assert auth.signer == "signer-1" + mock_signer.assert_called_once() + + mock_signer.reset_mock() + mock_signer.return_value = "signer-2" + auth._refresh_signer() + mock_signer.assert_called_once() + assert auth.signer == "signer-2"