Skip to content

Commit 84cb718

Browse files
committed
fix(tests): Use synchronous Snuba insertion in preprod EAP integration tests
The tests were producing TraceItems via the async Kafka producer, but SingletonProducer only flushes on process exit, so data never reached Snuba during test runs. Fix by intercepting the Kafka produce call to capture the serialized TraceItem and inserting it via the synchronous HTTP endpoint (store_occurrences), which is the standard pattern used by all other Snuba integration tests.
1 parent 813ac44 commit 84cb718

1 file changed

Lines changed: 45 additions & 55 deletions

File tree

tests/snuba/preprod/eap/test_preprod_eap_integration.py

Lines changed: 45 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import time
21
from datetime import datetime, timedelta
32
from datetime import timezone as dt_timezone
43

@@ -15,6 +14,8 @@
1514
)
1615
from sentry.testutils.cases import SnubaTestCase, TestCase
1716

17+
PREPROD_PRODUCER_MOCK_PATH = "sentry.preprod.eap.write._eap_producer.produce"
18+
1819

1920
class PreprodEAPIntegrationTest(TestCase, SnubaTestCase):
2021
def test_write_and_read_size_metric_round_trip(self):
@@ -58,41 +59,35 @@ def test_write_and_read_size_metric_round_trip(self):
5859
analysis_file_id=123,
5960
)
6061

61-
produce_preprod_size_metric_to_eap(
62+
self.produce_and_store_eap_items(
63+
PREPROD_PRODUCER_MOCK_PATH,
64+
produce_preprod_size_metric_to_eap,
6265
size_metric=size_metric,
6366
organization=self.organization,
6467
organization_id=self.organization.id,
6568
project_id=self.project.id,
6669
)
6770

68-
max_attempts = 20
69-
found = False
70-
71-
for attempt in range(max_attempts):
72-
time.sleep(0.5)
73-
74-
app_filter = TraceItemFilter(
75-
comparison_filter=ComparisonFilter(
76-
key=AttributeKey(name="app_id", type=AttributeKey.Type.TYPE_STRING),
77-
op=ComparisonFilter.OP_EQUALS,
78-
value=AttributeValue(val_str="com.example.integrationtest"),
79-
)
80-
)
81-
82-
response = query_preprod_size_metrics(
83-
organization_id=self.organization.id,
84-
project_ids=[self.project.id],
85-
start=datetime.now(dt_timezone.utc) - timedelta(hours=1),
86-
end=datetime.now(dt_timezone.utc) + timedelta(hours=1),
87-
referrer="test.preprod.integration",
88-
filter=app_filter,
71+
app_filter = TraceItemFilter(
72+
comparison_filter=ComparisonFilter(
73+
key=AttributeKey(name="app_id", type=AttributeKey.Type.TYPE_STRING),
74+
op=ComparisonFilter.OP_EQUALS,
75+
value=AttributeValue(val_str="com.example.integrationtest"),
8976
)
77+
)
9078

91-
if response.column_values:
92-
found = True
93-
break
79+
response = query_preprod_size_metrics(
80+
organization_id=self.organization.id,
81+
project_ids=[self.project.id],
82+
start=datetime.now(dt_timezone.utc) - timedelta(hours=1),
83+
end=datetime.now(dt_timezone.utc) + timedelta(hours=1),
84+
referrer="test.preprod.integration",
85+
filter=app_filter,
86+
)
9487

95-
assert found, f"Data not found in Snuba after {max_attempts} attempts"
88+
assert response.column_values and response.column_values[0].results, (
89+
"Data not found in Snuba"
90+
)
9691

9792
columns = {cv.attribute_name: idx for idx, cv in enumerate(response.column_values)}
9893

@@ -154,49 +149,44 @@ def test_write_multiple_size_metrics_same_artifact(self):
154149
max_install_size=1000,
155150
)
156151

157-
produce_preprod_size_metric_to_eap(
152+
self.produce_and_store_eap_items(
153+
PREPROD_PRODUCER_MOCK_PATH,
154+
produce_preprod_size_metric_to_eap,
158155
size_metric=size_metric_main,
159156
organization=self.organization,
160157
organization_id=self.organization.id,
161158
project_id=self.project.id,
162159
)
163160

164-
produce_preprod_size_metric_to_eap(
161+
self.produce_and_store_eap_items(
162+
PREPROD_PRODUCER_MOCK_PATH,
163+
produce_preprod_size_metric_to_eap,
165164
size_metric=size_metric_watch,
166165
organization=self.organization,
167166
organization_id=self.organization.id,
168167
project_id=self.project.id,
169168
)
170169

171-
max_attempts = 20
172-
found_count = 0
173-
174-
for attempt in range(max_attempts):
175-
time.sleep(0.5)
176-
177-
app_filter = TraceItemFilter(
178-
comparison_filter=ComparisonFilter(
179-
key=AttributeKey(name="app_id", type=AttributeKey.Type.TYPE_STRING),
180-
op=ComparisonFilter.OP_EQUALS,
181-
value=AttributeValue(val_str="com.example.multitest"),
182-
)
183-
)
184-
185-
response = query_preprod_size_metrics(
186-
organization_id=self.organization.id,
187-
project_ids=[self.project.id],
188-
start=datetime.now(dt_timezone.utc) - timedelta(hours=1),
189-
end=datetime.now(dt_timezone.utc) + timedelta(hours=1),
190-
referrer="test.preprod.integration",
191-
filter=app_filter,
170+
app_filter = TraceItemFilter(
171+
comparison_filter=ComparisonFilter(
172+
key=AttributeKey(name="app_id", type=AttributeKey.Type.TYPE_STRING),
173+
op=ComparisonFilter.OP_EQUALS,
174+
value=AttributeValue(val_str="com.example.multitest"),
192175
)
176+
)
193177

194-
if response.column_values:
195-
found_count = len(response.column_values[0].results)
196-
if found_count >= 2:
197-
break
178+
response = query_preprod_size_metrics(
179+
organization_id=self.organization.id,
180+
project_ids=[self.project.id],
181+
start=datetime.now(dt_timezone.utc) - timedelta(hours=1),
182+
end=datetime.now(dt_timezone.utc) + timedelta(hours=1),
183+
referrer="test.preprod.integration",
184+
filter=app_filter,
185+
)
198186

199-
assert found_count == 2, f"Expected 2 records, found {found_count}"
187+
assert response.column_values and len(response.column_values[0].results) == 2, (
188+
f"Expected 2 records, found {len(response.column_values[0].results) if response.column_values else 0}"
189+
)
200190

201191
columns = {cv.attribute_name: idx for idx, cv in enumerate(response.column_values)}
202192
num_rows = len(response.column_values[0].results)

0 commit comments

Comments
 (0)