From 74bd5fbd57ed9738d924a602ee4c3bf871838205 Mon Sep 17 00:00:00 2001 From: Daniel Blando Date: Tue, 12 May 2026 15:43:19 -0700 Subject: [PATCH] security: limit decompressed gzip output in ParseProtoReader and OTLP ingestion path Wrap gzip.Reader with io.LimitReader(maxSize+1) before reading decompressed bytes in both pkg/util/http.go and pkg/util/push/otlp.go. Signed-off-by: Daniel Blando --- CHANGELOG.md | 1 + pkg/util/http.go | 8 ++++---- pkg/util/http_test.go | 24 ++++++++++++++++++++++++ pkg/util/push/otlp.go | 1 + 4 files changed, 30 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d56a1e916ed..bf05db5ba0d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ * [BUGFIX] Memberlist: Drop incoming TCP transport packets when digest verification fails, preventing corrupted payloads from being forwarded. #7474 * [BUGFIX] Compactor: Fix stale `cortex_bucket_index_last_successful_update_timestamp_seconds` metric not being cleaned up when tenant ownership changes due to ring rebalancing. This caused false alarms on bucket index update rate when a tenant moved between compactors. #7485 * [BUGFIX] Security: Fix stored XSS vulnerability in Alertmanager and Store Gateway status pages by replacing `text/template` with `html/template`. #7512 +* [BUGFIX] Security: Limit decompressed gzip output in `ParseProtoReader` and OTLP ingestion path. The decompressed body is now capped by `-distributor.otlp-max-recv-msg-size`. #7515 ## 1.21.0 2026-04-24 diff --git a/pkg/util/http.go b/pkg/util/http.go index a1a221b2365..0e7500716ea 100644 --- a/pkg/util/http.go +++ b/pkg/util/http.go @@ -220,11 +220,11 @@ func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compressi } body, err = decompressFromBuffer(&buf, maxSize, RawSnappy, sp) case Gzip: - reader, err = gzip.NewReader(reader) - if err != nil { - return nil, err + gzReader, gzErr := gzip.NewReader(reader) + if gzErr != nil { + return nil, gzErr } - _, err = buf.ReadFrom(reader) + _, err = buf.ReadFrom(io.LimitReader(gzReader, int64(maxSize)+1)) body = buf.Bytes() } return body, err diff --git a/pkg/util/http_test.go b/pkg/util/http_test.go index a5226ba4757..199fa8ad06e 100644 --- a/pkg/util/http_test.go +++ b/pkg/util/http_test.go @@ -2,6 +2,7 @@ package util_test import ( "bytes" + "compress/gzip" "context" "html/template" "io" @@ -220,3 +221,26 @@ func TestIsRequestBodyTooLargeRegression(t *testing.T) { _, err := io.ReadAll(http.MaxBytesReader(httptest.NewRecorder(), io.NopCloser(bytes.NewReader([]byte{1, 2, 3, 4})), 1)) assert.True(t, util.IsRequestBodyTooLarge(err)) } + +func TestParseProtoReader_GzipDecompressionBomb(t *testing.T) { + // Create a gzip payload where decompressed size far exceeds maxSize. + const maxSize = 4096 // 4 KB limit on decompressed output + uncompressed := make([]byte, 1<<20) // 1 MB of zeros + + var compressed bytes.Buffer + gzw := gzip.NewWriter(&compressed) + _, err := gzw.Write(uncompressed) + require.NoError(t, err) + require.NoError(t, gzw.Close()) + + // The compressed payload is small enough to pass the compressed-size limit, + // but decompresses to far more than maxSize. + require.Less(t, compressed.Len(), maxSize) + + var fromWire cortexpb.PreallocWriteRequest + err = util.ParseProtoReader(context.Background(), io.NopCloser(&compressed), 0, maxSize, &fromWire, util.Gzip) + // The decompressed output should be limited to maxSize+1 bytes, causing a + // proto unmarshal error (not an OOM). The key assertion is that we don't + // allocate 1 MB of memory. + assert.NotNil(t, err) +} diff --git a/pkg/util/push/otlp.go b/pkg/util/push/otlp.go index 0a77bbee523..19f1ef58191 100644 --- a/pkg/util/push/otlp.go +++ b/pkg/util/push/otlp.go @@ -160,6 +160,7 @@ func decodeOTLPWriteRequest(ctx context.Context, r *http.Request, maxSize int) ( if err != nil { return req, err } + reader = io.LimitReader(reader, int64(maxSize)+1) } var buf bytes.Buffer