From 89251eaf4eb6418a436c62dc0184230bcedc80b2 Mon Sep 17 00:00:00 2001 From: giulio-leone Date: Mon, 2 Mar 2026 15:36:07 +0100 Subject: [PATCH 1/3] fix(security): redact sensitive headers from debug logs At debug log level, response headers were logged unfiltered, potentially exposing API keys and authorization tokens. Now filters sensitive headers (authorization, api-key) from debug log output using the existing SENSITIVE_HEADERS set, consistent with SensitiveHeadersFilter. Fixes #1196 --- src/openai/_base_client.py | 15 ++++++++++++--- src/openai/_utils/__init__.py | 2 +- tests/test_utils/test_logging.py | 20 +++++++++++++++++++- 3 files changed, 32 insertions(+), 5 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index cf4571bf45..54a8b2716a 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -61,7 +61,16 @@ ModelBuilderProtocol, not_given, ) -from ._utils import SensitiveHeadersFilter, is_dict, is_list, asyncify, is_given, lru_cache, is_mapping +from ._utils import ( + SENSITIVE_HEADERS, + SensitiveHeadersFilter, + is_dict, + is_list, + asyncify, + is_given, + lru_cache, + is_mapping, +) from ._compat import PYDANTIC_V1, model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( @@ -1042,7 +1051,7 @@ def request( request.url, response.status_code, response.reason_phrase, - response.headers, + {k: v if k.lower() not in SENSITIVE_HEADERS else "" for k, v in response.headers.items()}, ) log.debug("request_id: %s", response.headers.get("x-request-id")) @@ -1641,7 +1650,7 @@ async def request( request.url, response.status_code, response.reason_phrase, - response.headers, + {k: v if k.lower() not in SENSITIVE_HEADERS else "" for k, v in response.headers.items()}, ) log.debug("request_id: %s", response.headers.get("x-request-id")) diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index 963c83b6d4..5c6f2a30dc 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -1,4 +1,4 @@ -from ._logs import SensitiveHeadersFilter as SensitiveHeadersFilter +from ._logs import SENSITIVE_HEADERS as SENSITIVE_HEADERS, SensitiveHeadersFilter as SensitiveHeadersFilter from ._sync import asyncify as asyncify from ._proxy import LazyProxy as LazyProxy from ._utils import ( diff --git a/tests/test_utils/test_logging.py b/tests/test_utils/test_logging.py index cc018012e2..9d2def48fd 100644 --- a/tests/test_utils/test_logging.py +++ b/tests/test_utils/test_logging.py @@ -3,7 +3,7 @@ import pytest -from openai._utils import SensitiveHeadersFilter +from openai._utils import SENSITIVE_HEADERS, SensitiveHeadersFilter @pytest.fixture @@ -98,3 +98,21 @@ def test_standard_debug_msg(logger_with_filter: logging.Logger, caplog: pytest.L with caplog.at_level(logging.DEBUG): logger_with_filter.debug("Sending HTTP Request: %s %s", "POST", "chat/completions") assert caplog.messages[0] == "Sending HTTP Request: POST chat/completions" + + +def test_response_headers_redaction() -> None: + """Verify that filtering response headers with SENSITIVE_HEADERS redacts sensitive values.""" + raw_headers = { + "content-type": "application/json", + "authorization": "Bearer sk-secret-key", + "api-key": "my-secret-api-key", + "x-request-id": "req_abc123", + } + filtered = { + k: v if k.lower() not in SENSITIVE_HEADERS else "" + for k, v in raw_headers.items() + } + assert filtered["content-type"] == "application/json" + assert filtered["authorization"] == "" + assert filtered["api-key"] == "" + assert filtered["x-request-id"] == "req_abc123" From 6e91375757160753d7096702d1036f843e0b1f86 Mon Sep 17 00:00:00 2001 From: giulio-leone Date: Mon, 2 Mar 2026 16:19:12 +0100 Subject: [PATCH 2/3] fix(logging): avoid eager response header redaction work Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/openai/_base_client.py | 40 +++++++++++++++++--------------- src/openai/_utils/__init__.py | 6 ++++- src/openai/_utils/_logs.py | 15 ++++++++---- tests/test_utils/test_logging.py | 9 +++---- 4 files changed, 40 insertions(+), 30 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 54a8b2716a..eedce363a6 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -62,7 +62,6 @@ not_given, ) from ._utils import ( - SENSITIVE_HEADERS, SensitiveHeadersFilter, is_dict, is_list, @@ -70,6 +69,7 @@ is_given, lru_cache, is_mapping, + redact_sensitive_headers, ) from ._compat import PYDANTIC_V1, model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type @@ -1045,15 +1045,16 @@ def request( log.debug("Raising connection error") raise APIConnectionError(request=request) from err - log.debug( - 'HTTP Response: %s %s "%i %s" %s', - request.method, - request.url, - response.status_code, - response.reason_phrase, - {k: v if k.lower() not in SENSITIVE_HEADERS else "" for k, v in response.headers.items()}, - ) - log.debug("request_id: %s", response.headers.get("x-request-id")) + if log.isEnabledFor(logging.DEBUG): + log.debug( + 'HTTP Response: %s %s "%i %s" %s', + request.method, + request.url, + response.status_code, + response.reason_phrase, + redact_sensitive_headers(response.headers), + ) + log.debug("request_id: %s", response.headers.get("x-request-id")) try: response.raise_for_status() @@ -1644,15 +1645,16 @@ async def request( log.debug("Raising connection error") raise APIConnectionError(request=request) from err - log.debug( - 'HTTP Response: %s %s "%i %s" %s', - request.method, - request.url, - response.status_code, - response.reason_phrase, - {k: v if k.lower() not in SENSITIVE_HEADERS else "" for k, v in response.headers.items()}, - ) - log.debug("request_id: %s", response.headers.get("x-request-id")) + if log.isEnabledFor(logging.DEBUG): + log.debug( + 'HTTP Response: %s %s "%i %s" %s', + request.method, + request.url, + response.status_code, + response.reason_phrase, + redact_sensitive_headers(response.headers), + ) + log.debug("request_id: %s", response.headers.get("x-request-id")) try: response.raise_for_status() diff --git a/src/openai/_utils/__init__.py b/src/openai/_utils/__init__.py index 5c6f2a30dc..15601ec59d 100644 --- a/src/openai/_utils/__init__.py +++ b/src/openai/_utils/__init__.py @@ -1,4 +1,8 @@ -from ._logs import SENSITIVE_HEADERS as SENSITIVE_HEADERS, SensitiveHeadersFilter as SensitiveHeadersFilter +from ._logs import ( + SENSITIVE_HEADERS as SENSITIVE_HEADERS, + SensitiveHeadersFilter as SensitiveHeadersFilter, + redact_sensitive_headers as redact_sensitive_headers, +) from ._sync import asyncify as asyncify from ._proxy import LazyProxy as LazyProxy from ._utils import ( diff --git a/src/openai/_utils/_logs.py b/src/openai/_utils/_logs.py index 376946933c..450131a57f 100644 --- a/src/openai/_utils/_logs.py +++ b/src/openai/_utils/_logs.py @@ -1,5 +1,8 @@ +from __future__ import annotations + import os import logging +from typing import Any, Mapping from typing_extensions import override from ._utils import is_dict @@ -11,6 +14,13 @@ SENSITIVE_HEADERS = {"api-key", "authorization"} +def redact_sensitive_headers(headers: Mapping[str, Any]) -> dict[str, Any]: + return { + k: (v if str(k).lower() not in SENSITIVE_HEADERS else "") + for k, v in headers.items() + } + + def _basic_config() -> None: # e.g. [2023-10-05 14:12:26 - openai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( @@ -35,8 +45,5 @@ class SensitiveHeadersFilter(logging.Filter): @override def filter(self, record: logging.LogRecord) -> bool: if is_dict(record.args) and "headers" in record.args and is_dict(record.args["headers"]): - headers = record.args["headers"] = {**record.args["headers"]} - for header in headers: - if str(header).lower() in SENSITIVE_HEADERS: - headers[header] = "" + record.args["headers"] = redact_sensitive_headers({**record.args["headers"]}) return True diff --git a/tests/test_utils/test_logging.py b/tests/test_utils/test_logging.py index 9d2def48fd..a93c970ce8 100644 --- a/tests/test_utils/test_logging.py +++ b/tests/test_utils/test_logging.py @@ -3,7 +3,7 @@ import pytest -from openai._utils import SENSITIVE_HEADERS, SensitiveHeadersFilter +from openai._utils import SensitiveHeadersFilter, redact_sensitive_headers @pytest.fixture @@ -101,17 +101,14 @@ def test_standard_debug_msg(logger_with_filter: logging.Logger, caplog: pytest.L def test_response_headers_redaction() -> None: - """Verify that filtering response headers with SENSITIVE_HEADERS redacts sensitive values.""" + """Verify sensitive response headers are redacted by the shared helper.""" raw_headers = { "content-type": "application/json", "authorization": "Bearer sk-secret-key", "api-key": "my-secret-api-key", "x-request-id": "req_abc123", } - filtered = { - k: v if k.lower() not in SENSITIVE_HEADERS else "" - for k, v in raw_headers.items() - } + filtered = redact_sensitive_headers(raw_headers) assert filtered["content-type"] == "application/json" assert filtered["authorization"] == "" assert filtered["api-key"] == "" From 5738c34b36e54f6282234a6cfd8636e9df859f44 Mon Sep 17 00:00:00 2001 From: giulio-leone Date: Mon, 2 Mar 2026 17:21:38 +0100 Subject: [PATCH 3/3] fix(review): add integration test for response header redaction in _base_client Adds a test using respx to verify that sensitive response headers are actually redacted in log output when making real client calls, not just testing the filter utility in isolation. Refs: #2918 --- tests/test_utils/test_logging.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/test_utils/test_logging.py b/tests/test_utils/test_logging.py index a93c970ce8..2057ff00c6 100644 --- a/tests/test_utils/test_logging.py +++ b/tests/test_utils/test_logging.py @@ -1,8 +1,11 @@ import logging from typing import Any, Dict, cast +import httpx import pytest +from respx import MockRouter +from openai import OpenAI from openai._utils import SensitiveHeadersFilter, redact_sensitive_headers @@ -113,3 +116,29 @@ def test_response_headers_redaction() -> None: assert filtered["authorization"] == "" assert filtered["api-key"] == "" assert filtered["x-request-id"] == "req_abc123" + + +@pytest.mark.respx(base_url="https://api.openai.com/v1") +def test_response_header_redaction_in_client( + respx_mock: MockRouter, + caplog: pytest.LogCaptureFixture, +) -> None: + """Integration test: verify _base_client.py redacts sensitive response headers in actual log output.""" + respx_mock.post("/chat/completions").mock( + return_value=httpx.Response( + 200, + json={"id": "chatcmpl-test", "object": "chat.completion", "choices": [], "created": 0, "model": "gpt-4"}, + headers={"authorization": "Bearer secret", "x-request-id": "req_123"}, + ) + ) + + client = OpenAI(api_key="test-key", base_url="https://api.openai.com/v1") + + with caplog.at_level(logging.DEBUG, logger="openai"): + client.chat.completions.create(messages=[], model="gpt-4") + + response_logs = [r for r in caplog.records if r.getMessage().startswith("HTTP Response:")] + assert len(response_logs) >= 1, "Expected at least one 'HTTP Response:' log line" + msg = response_logs[0].getMessage() + assert "secret" not in msg, "Sensitive header value should be redacted in log output" + assert "" in msg, "Redacted placeholder should appear in log output"