1313RC_PATH = f"datadog/2/{ RC_PRODUCT } "
1414
1515
16- def make_ufc_fixture (flag_key , variant_key = "on" , variation_type = "STRING" , enabled = True ):
16+ def make_ufc_fixture (flag_key : str , variant_key : str = "on" , variation_type : str = "STRING" , * , enabled : bool = True ):
1717 """Create a UFC fixture with the given flag configuration."""
1818 values : dict [str , dict [str , str | bool ]] = {
1919 "STRING" : {"on" : "on-value" , "off" : "off-value" },
@@ -47,7 +47,7 @@ def make_ufc_fixture(flag_key, variant_key="on", variation_type="STRING", enable
4747 }
4848
4949
50- def find_eval_metrics (flag_key = None ):
50+ def find_eval_metrics (flag_key : str | None = None ):
5151 """Find feature_flag.evaluations metrics in agent data.
5252
5353 Returns a list of metric points matching the metric name, optionally filtered by flag key tag.
@@ -67,7 +67,7 @@ def find_eval_metrics(flag_key=None):
6767 return results
6868
6969
70- def get_tag_value (tags , key ):
70+ def get_tag_value (tags : list [ str ] , key : str ):
7171 """Extract a tag value from a list of 'key:value' strings."""
7272 prefix = f"{ key } :"
7373 for tag in tags :
@@ -99,8 +99,6 @@ def setup_ffe_eval_metric_basic(self):
9999 },
100100 )
101101
102-
103-
104102 def test_ffe_eval_metric_basic (self ):
105103 """Test that flag evaluation produces a metric with correct tags."""
106104 assert self .r .status_code == 200 , f"Flag evaluation failed: { self .r .text } "
@@ -121,8 +119,8 @@ def test_ffe_eval_metric_basic(self):
121119 assert get_tag_value (tags , "feature_flag.result.variant" ) == "on" , (
122120 f"Expected tag feature_flag.result.variant:on, got tags: { tags } "
123121 )
124- assert get_tag_value (tags , "feature_flag.result.reason" ) == "targeting_match " , (
125- f"Expected tag feature_flag.result.reason:targeting_match , got tags: { tags } "
122+ assert get_tag_value (tags , "feature_flag.result.reason" ) == "static " , (
123+ f"Expected tag feature_flag.result.reason:static , got tags: { tags } "
126124 )
127125 assert get_tag_value (tags , "feature_flag.result.allocation_key" ) == "default-allocation" , (
128126 f"Expected tag feature_flag.result.allocation_key:default-allocation, got tags: { tags } "
@@ -156,17 +154,14 @@ def setup_ffe_eval_metric_count(self):
156154 )
157155 self .responses .append (r )
158156
159-
160-
161157 def test_ffe_eval_metric_count (self ):
162158 """Test that N evaluations produce metric count = N."""
163159 for i , r in enumerate (self .responses ):
164160 assert r .status_code == 200 , f"Request { i + 1 } failed: { r .text } "
165161
166162 metrics = find_eval_metrics (self .flag_key )
167163 assert len (metrics ) > 0 , (
168- f"Expected at least one feature_flag.evaluations metric for flag '{ self .flag_key } ', "
169- f"but found none."
164+ f"Expected at least one feature_flag.evaluations metric for flag '{ self .flag_key } ', but found none."
170165 )
171166
172167 # Sum all data points for this flag (agent may split across multiple series entries)
@@ -180,9 +175,7 @@ def test_ffe_eval_metric_count(self):
180175 elif isinstance (p , list ) and len (p ) >= 2 :
181176 total_count += p [1 ]
182177
183- assert total_count >= self .eval_count , (
184- f"Expected metric count >= { self .eval_count } , got { total_count } "
185- )
178+ assert total_count >= self .eval_count , f"Expected metric count >= { self .eval_count } , got { total_count } "
186179
187180
188181@scenarios .feature_flagging_and_experimentation
@@ -262,8 +255,6 @@ def setup_ffe_eval_metric_different_flags(self):
262255 },
263256 )
264257
265-
266-
267258 def test_ffe_eval_metric_different_flags (self ):
268259 """Test that each flag key gets its own metric series."""
269260 assert self .r_a .status_code == 200 , f"Flag A evaluation failed: { self .r_a .text } "
@@ -272,12 +263,8 @@ def test_ffe_eval_metric_different_flags(self):
272263 metrics_a = find_eval_metrics (self .flag_a )
273264 metrics_b = find_eval_metrics (self .flag_b )
274265
275- assert len (metrics_a ) > 0 , (
276- f"Expected metric for flag '{ self .flag_a } ', found none. All: { find_eval_metrics ()} "
277- )
278- assert len (metrics_b ) > 0 , (
279- f"Expected metric for flag '{ self .flag_b } ', found none. All: { find_eval_metrics ()} "
280- )
266+ assert len (metrics_a ) > 0 , f"Expected metric for flag '{ self .flag_a } ', found none. All: { find_eval_metrics ()} "
267+ assert len (metrics_b ) > 0 , f"Expected metric for flag '{ self .flag_b } ', found none. All: { find_eval_metrics ()} "
281268
282269
283270@scenarios .feature_flagging_and_experimentation
@@ -290,9 +277,7 @@ def setup_ffe_eval_metric_error(self):
290277
291278 # Set up config with a different flag than what we'll request
292279 config_id = "ffe-eval-metric-error"
293- rc .tracer_rc_state .set_config (
294- f"{ RC_PATH } /{ config_id } /config" , make_ufc_fixture ("some-other-flag" )
295- ).apply ()
280+ rc .tracer_rc_state .set_config (f"{ RC_PATH } /{ config_id } /config" , make_ufc_fixture ("some-other-flag" )).apply ()
296281
297282 self .flag_key = "non-existent-eval-metric-flag"
298283 self .r = weblog .post (
@@ -306,8 +291,6 @@ def setup_ffe_eval_metric_error(self):
306291 },
307292 )
308293
309-
310-
311294 def test_ffe_eval_metric_error (self ):
312295 """Test that error evaluations produce metric with error.type tag."""
313296 assert self .r .status_code == 200 , f"Flag evaluation request failed: { self .r .text } "
@@ -362,16 +345,12 @@ def setup_ffe_eval_metric_type_mismatch(self):
362345 },
363346 )
364347
365-
366-
367348 def test_ffe_eval_metric_type_mismatch (self ):
368349 """Test that type conversion errors produce metric with error.type:type_mismatch."""
369350 assert self .r .status_code == 200 , f"Flag evaluation request failed: { self .r .text } "
370351
371352 metrics = find_eval_metrics (self .flag_key )
372- assert len (metrics ) > 0 , (
373- f"Expected metric for flag '{ self .flag_key } ', found none. All: { find_eval_metrics ()} "
374- )
353+ assert len (metrics ) > 0 , f"Expected metric for flag '{ self .flag_key } ', found none. All: { find_eval_metrics ()} "
375354
376355 point = metrics [0 ]
377356 tags = point .get ("tags" , [])
0 commit comments