|
1 | | -"""Simple test for runtime factory and executor span capture.""" |
| 1 | +"""Tests for UiPathTraceManager""" |
| 2 | + |
| 3 | +from unittest.mock import MagicMock |
2 | 4 |
|
3 | 5 | import pytest |
4 | 6 | from opentelemetry import trace |
| 7 | +from opentelemetry.sdk.trace import SpanProcessor |
| 8 | + |
| 9 | +from uipath.core.tracing.trace_manager import ( |
| 10 | + UiPathTraceManager, |
| 11 | + _DelegatingSpanProcessor, |
| 12 | +) |
| 13 | + |
5 | 14 |
|
6 | | -from uipath.core.tracing.trace_manager import UiPathTraceManager |
| 15 | +@pytest.fixture(autouse=True) |
| 16 | +def _reset_delegating_singleton(): |
| 17 | + """Reset the singleton between tests so each test starts clean.""" |
| 18 | + _DelegatingSpanProcessor._instance = None |
| 19 | + yield |
| 20 | + _DelegatingSpanProcessor._instance = None |
7 | 21 |
|
8 | 22 |
|
9 | 23 | @pytest.mark.asyncio |
@@ -31,3 +45,160 @@ async def test_multiple_factories_same_executor(): |
31 | 45 |
|
32 | 46 | assert spans[1].name == "root-span" |
33 | 47 | assert spans[1].attributes == {"execution.id": "test"} |
| 48 | + |
| 49 | + |
| 50 | +class TestDelegatingSpanProcessor: |
| 51 | + """Tests for _DelegatingSpanProcessor.""" |
| 52 | + |
| 53 | + def test_add_and_clear(self) -> None: |
| 54 | + dp = _DelegatingSpanProcessor() |
| 55 | + p1 = MagicMock(spec=SpanProcessor) |
| 56 | + p2 = MagicMock(spec=SpanProcessor) |
| 57 | + |
| 58 | + dp.add(p1) |
| 59 | + dp.add(p2) |
| 60 | + assert len(dp._processors) == 2 |
| 61 | + |
| 62 | + removed = dp.clear() |
| 63 | + assert removed == [p1, p2] |
| 64 | + assert dp._processors == [] |
| 65 | + |
| 66 | + def test_delegates_on_start_and_on_end(self) -> None: |
| 67 | + dp = _DelegatingSpanProcessor() |
| 68 | + p1 = MagicMock(spec=SpanProcessor) |
| 69 | + p2 = MagicMock(spec=SpanProcessor) |
| 70 | + dp.add(p1) |
| 71 | + dp.add(p2) |
| 72 | + |
| 73 | + mock_span = MagicMock() |
| 74 | + dp.on_start(mock_span, parent_context=None) |
| 75 | + p1.on_start.assert_called_once_with(mock_span, None) |
| 76 | + p2.on_start.assert_called_once_with(mock_span, None) |
| 77 | + |
| 78 | + mock_readable_span = MagicMock() |
| 79 | + dp.on_end(mock_readable_span) |
| 80 | + p1.on_end.assert_called_once_with(mock_readable_span) |
| 81 | + p2.on_end.assert_called_once_with(mock_readable_span) |
| 82 | + |
| 83 | + def test_clear_stops_delegation(self) -> None: |
| 84 | + dp = _DelegatingSpanProcessor() |
| 85 | + p1 = MagicMock(spec=SpanProcessor) |
| 86 | + dp.add(p1) |
| 87 | + dp.clear() |
| 88 | + |
| 89 | + mock_span = MagicMock() |
| 90 | + dp.on_start(mock_span, parent_context=None) |
| 91 | + dp.on_end(mock_span) |
| 92 | + p1.on_start.assert_not_called() |
| 93 | + p1.on_end.assert_not_called() |
| 94 | + |
| 95 | + def test_get_instance_returns_singleton(self) -> None: |
| 96 | + provider = MagicMock() |
| 97 | + inst1 = _DelegatingSpanProcessor.get_instance(provider) |
| 98 | + inst2 = _DelegatingSpanProcessor.get_instance(provider) |
| 99 | + assert inst1 is inst2 |
| 100 | + provider.add_span_processor.assert_called_once_with(inst1) |
| 101 | + |
| 102 | + def test_force_flush_delegates(self) -> None: |
| 103 | + dp = _DelegatingSpanProcessor() |
| 104 | + p1 = MagicMock(spec=SpanProcessor) |
| 105 | + dp.add(p1) |
| 106 | + |
| 107 | + result = dp.force_flush(timeout_millis=5000) |
| 108 | + assert result is True |
| 109 | + p1.force_flush.assert_called_once_with(5000) |
| 110 | + |
| 111 | + def test_force_flush_returns_false_on_child_failure(self) -> None: |
| 112 | + dp = _DelegatingSpanProcessor() |
| 113 | + p1 = MagicMock(spec=SpanProcessor) |
| 114 | + p2 = MagicMock(spec=SpanProcessor) |
| 115 | + p1.force_flush.return_value = True |
| 116 | + p2.force_flush.return_value = False |
| 117 | + dp.add(p1) |
| 118 | + dp.add(p2) |
| 119 | + |
| 120 | + assert dp.force_flush(timeout_millis=5000) is False |
| 121 | + |
| 122 | + def test_shutdown_delegates_to_children(self) -> None: |
| 123 | + dp = _DelegatingSpanProcessor() |
| 124 | + p1 = MagicMock(spec=SpanProcessor) |
| 125 | + p2 = MagicMock(spec=SpanProcessor) |
| 126 | + dp.add(p1) |
| 127 | + dp.add(p2) |
| 128 | + dp.shutdown() |
| 129 | + p1.shutdown.assert_called_once() |
| 130 | + p2.shutdown.assert_called_once() |
| 131 | + assert len(dp._processors) == 2 |
| 132 | + |
| 133 | + |
| 134 | +class TestTraceManagerShutdown: |
| 135 | + """Tests for the multi-job accumulation fix.""" |
| 136 | + |
| 137 | + def test_shutdown_clears_processors(self) -> None: |
| 138 | + tm = UiPathTraceManager() |
| 139 | + p1 = MagicMock(spec=SpanProcessor) |
| 140 | + tm.add_span_processor(p1) |
| 141 | + |
| 142 | + tm.shutdown() |
| 143 | + |
| 144 | + assert tm.tracer_span_processors == [] |
| 145 | + assert tm._delegating._processors == [] |
| 146 | + p1.force_flush.assert_called_once() |
| 147 | + p1.shutdown.assert_called_once() |
| 148 | + |
| 149 | + def test_successive_managers_do_not_accumulate(self) -> None: |
| 150 | + """Simulates multiple jobs on the same pod. |
| 151 | +
|
| 152 | + Each job creates a new UiPathTraceManager and adds a processor. |
| 153 | + After shutdown, the next job's processors should not stack with |
| 154 | + the previous ones. |
| 155 | + """ |
| 156 | + processor_counts: list[int] = [] |
| 157 | + |
| 158 | + for _ in range(5): |
| 159 | + tm = UiPathTraceManager() |
| 160 | + mock_processor = MagicMock(spec=SpanProcessor) |
| 161 | + tm.add_span_processor(mock_processor) |
| 162 | + |
| 163 | + # Count processors visible to the delegator (minus the batch |
| 164 | + # processor added by __init__ for execution_span_exporter) |
| 165 | + processor_counts.append(len(tm._delegating._processors)) |
| 166 | + tm.shutdown() |
| 167 | + |
| 168 | + # Every job should see the same number of processors (2: the |
| 169 | + # execution batch processor from __init__ + the one we added), |
| 170 | + # NOT a linearly growing count. |
| 171 | + assert all(c == processor_counts[0] for c in processor_counts), ( |
| 172 | + f"Processor counts should be constant across jobs, got: {processor_counts}" |
| 173 | + ) |
| 174 | + |
| 175 | + def test_shutdown_tolerates_processor_error(self) -> None: |
| 176 | + tm = UiPathTraceManager() |
| 177 | + bad_processor = MagicMock(spec=SpanProcessor) |
| 178 | + bad_processor.force_flush.side_effect = RuntimeError("flush failed") |
| 179 | + tm.add_span_processor(bad_processor) |
| 180 | + |
| 181 | + # Should not raise |
| 182 | + tm.shutdown() |
| 183 | + assert tm.tracer_span_processors == [] |
| 184 | + assert tm._delegating._processors == [] |
| 185 | + |
| 186 | + def test_spans_not_sent_to_old_processors_after_shutdown(self) -> None: |
| 187 | + """After shutdown, old processors must not receive new span events.""" |
| 188 | + tm1 = UiPathTraceManager() |
| 189 | + old_processor = MagicMock(spec=SpanProcessor) |
| 190 | + tm1.add_span_processor(old_processor) |
| 191 | + tm1.shutdown() |
| 192 | + |
| 193 | + # Simulate next job |
| 194 | + tm2 = UiPathTraceManager() |
| 195 | + new_processor = MagicMock(spec=SpanProcessor) |
| 196 | + tm2.add_span_processor(new_processor) |
| 197 | + |
| 198 | + mock_span = MagicMock() |
| 199 | + tm2._delegating.on_start(mock_span, parent_context=None) |
| 200 | + |
| 201 | + new_processor.on_start.assert_called_once() |
| 202 | + old_processor.on_start.assert_not_called() |
| 203 | + |
| 204 | + tm2.shutdown() |
0 commit comments