diff --git a/src/sentry_sync.c b/src/sentry_sync.c index 7766a6970..079274311 100644 --- a/src/sentry_sync.c +++ b/src/sentry_sync.c @@ -121,10 +121,20 @@ thread_setname(sentry_threadid_t thread_id, const char *thread_name) * `done` *from* the worker signaling that it will close down and can be joined. */ +/** + * Overflow-safe addition that clamps to UINT64_MAX instead of wrapping. + */ +static uint64_t +add_saturate(uint64_t a, uint64_t b) +{ + return b <= UINT64_MAX - a ? a + b : UINT64_MAX; +} + struct sentry_bgworker_task_s; typedef struct sentry_bgworker_task_s { struct sentry_bgworker_task_s *next_task; long refcount; + uint64_t execute_after; sentry_task_exec_func_t exec_func; void (*cleanup_func)(void *task_data); void *task_data; @@ -155,6 +165,7 @@ struct sentry_bgworker_s { sentry_mutex_t task_lock; sentry_bgworker_task_t *first_task; sentry_bgworker_task_t *last_task; + sentry_bgworker_task_t *current_task; void *state; void (*free_state)(void *state); long refcount; @@ -225,7 +236,9 @@ sentry__bgworker_get_state(sentry_bgworker_t *bgw) static bool sentry__bgworker_is_done(sentry_bgworker_t *bgw) { - return !bgw->first_task && !sentry__atomic_fetch(&bgw->running); + return (!bgw->first_task + || sentry__monotonic_time() < bgw->first_task->execute_after) + && !sentry__atomic_fetch(&bgw->running); } SENTRY_THREAD_FN @@ -260,7 +273,18 @@ worker_thread(void *data) continue; } + // wait for a delayed task, wake up to new submissions + { + uint64_t now = sentry__monotonic_time(); + if (now < task->execute_after) { + sentry__cond_wait_timeout(&bgw->submit_signal, &bgw->task_lock, + (uint32_t)MIN(task->execute_after - now, UINT32_MAX)); + continue; + } + } + sentry__task_incref(task); + bgw->current_task = task; sentry__mutex_unlock(&bgw->task_lock); SENTRY_DEBUG("executing task on worker thread"); @@ -274,6 +298,7 @@ worker_thread(void *data) // if not, we pop it and `decref` again, removing the _is inside // list_ refcount. sentry__mutex_lock(&bgw->task_lock); + bgw->current_task = NULL; if (bgw->first_task == task) { bgw->first_task = task->next_task; if (task == bgw->last_task) { @@ -350,11 +375,26 @@ sentry__bgworker_flush(sentry_bgworker_t *bgw, uint64_t timeout) sentry__cond_init(&flush_task->signal); sentry__mutex_init(&flush_task->lock); + // place the flush sentinel after the last task due within the timeout; + // tasks delayed beyond the timeout cannot complete in time anyway + uint64_t before = sentry__monotonic_time(); + uint64_t deadline = add_saturate(before, timeout); + uint64_t execute_after = before; + sentry__mutex_lock(&bgw->task_lock); + for (sentry_bgworker_task_t *t + = bgw->current_task ? bgw->current_task->next_task : bgw->first_task; + t && t->execute_after <= deadline; t = t->next_task) { + if (t->execute_after > execute_after) { + execute_after = t->execute_after; + } + } + sentry__mutex_unlock(&bgw->task_lock); + sentry__mutex_lock(&flush_task->lock); /* submit the task that triggers our condvar once it runs */ - sentry__bgworker_submit(bgw, sentry__flush_task, - (void (*)(void *))sentry__flush_task_decref, flush_task); + sentry__bgworker_submit_at(bgw, sentry__flush_task, + (void (*)(void *))sentry__flush_task_decref, flush_task, execute_after); uint64_t started = sentry__monotonic_time(); bool was_flushed = false; @@ -397,12 +437,6 @@ sentry__bgworker_shutdown(sentry_bgworker_t *bgw, uint64_t timeout) uint64_t started = sentry__monotonic_time(); sentry__mutex_lock(&bgw->task_lock); while (true) { - if (sentry__bgworker_is_done(bgw)) { - sentry__mutex_unlock(&bgw->task_lock); - sentry__thread_join(bgw->thread_id); - return 0; - } - uint64_t now = sentry__monotonic_time(); if (now > started && now - started > timeout) { sentry__atomic_store(&bgw->running, 0); @@ -413,6 +447,12 @@ sentry__bgworker_shutdown(sentry_bgworker_t *bgw, uint64_t timeout) return 1; } + if (!sentry__atomic_fetch(&bgw->running)) { + sentry__mutex_unlock(&bgw->task_lock); + sentry__thread_join(bgw->thread_id); + return 0; + } + // this will implicitly release the lock, and re-acquire on wake sentry__cond_wait_timeout(&bgw->done_signal, &bgw->task_lock, 250); } @@ -422,6 +462,29 @@ int sentry__bgworker_submit(sentry_bgworker_t *bgw, sentry_task_exec_func_t exec_func, void (*cleanup_func)(void *task_data), void *task_data) +{ + SENTRY_DEBUG("submitting task to background worker thread"); + return sentry__bgworker_submit_at( + bgw, exec_func, cleanup_func, task_data, sentry__monotonic_time()); +} + +int +sentry__bgworker_submit_delayed(sentry_bgworker_t *bgw, + sentry_task_exec_func_t exec_func, void (*cleanup_func)(void *task_data), + void *task_data, uint64_t delay_ms) +{ + SENTRY_DEBUGF("submitting %" PRIu64 + " ms delayed task to background worker thread", + delay_ms); + uint64_t execute_after = add_saturate(sentry__monotonic_time(), delay_ms); + return sentry__bgworker_submit_at( + bgw, exec_func, cleanup_func, task_data, execute_after); +} + +int +sentry__bgworker_submit_at(sentry_bgworker_t *bgw, + sentry_task_exec_func_t exec_func, void (*cleanup_func)(void *task_data), + void *task_data, uint64_t execute_after) { sentry_bgworker_task_t *task = SENTRY_MAKE(sentry_bgworker_task_t); if (!task) { @@ -432,19 +495,42 @@ sentry__bgworker_submit(sentry_bgworker_t *bgw, } task->next_task = NULL; task->refcount = 1; + task->execute_after = execute_after; task->exec_func = exec_func; task->cleanup_func = cleanup_func; task->task_data = task_data; - SENTRY_DEBUG("submitting task to background worker thread"); sentry__mutex_lock(&bgw->task_lock); + if (!bgw->first_task) { + // empty queue bgw->first_task = task; - } - if (bgw->last_task) { + bgw->last_task = task; + } else if (bgw->last_task->execute_after <= task->execute_after) { + // append last (common fast path for FIFO immediates) bgw->last_task->next_task = task; + bgw->last_task = task; + } else { + // insert sorted by execute_after; skip past current_task which + // may be executing without the lock held + sentry_bgworker_task_t *prev = bgw->current_task; + sentry_bgworker_task_t *cur = prev ? prev->next_task : bgw->first_task; + while (cur && cur->execute_after <= task->execute_after) { + prev = cur; + cur = cur->next_task; + } + + task->next_task = cur; + if (prev) { + prev->next_task = task; + } else { + bgw->first_task = task; + } + if (!task->next_task) { + bgw->last_task = task; + } } - bgw->last_task = task; + sentry__cond_wake(&bgw->submit_signal); sentry__mutex_unlock(&bgw->task_lock); @@ -475,6 +561,12 @@ sentry__bgworker_foreach_matching(sentry_bgworker_t *bgw, } else { bgw->first_task = next_task; } + if (bgw->current_task == task) { + bgw->current_task = NULL; + } else if (bgw->current_task + && bgw->current_task->next_task == task) { + bgw->current_task->next_task = next_task; + } sentry__task_decref(task); dropped++; } else { diff --git a/src/sentry_sync.h b/src/sentry_sync.h index 5516443b9..faa314e0a 100644 --- a/src/sentry_sync.h +++ b/src/sentry_sync.h @@ -471,12 +471,23 @@ const char *sentry__bgworker_get_thread_name(sentry_bgworker_t *bgw); /** * This will submit a new task to the background thread. * + * The `_delayed` variant delays execution by the specified delay in + * milliseconds, and the `_at` variant executes after the specified monotonic + * timestamp. The latter is mostly useful for testing to ensure deterministic + * ordering of tasks regardless of OS preemption between submissions. + * * Takes ownership of `data`, freeing it using the provided `cleanup_func`. * Returns 0 on success. */ int sentry__bgworker_submit(sentry_bgworker_t *bgw, sentry_task_exec_func_t exec_func, void (*cleanup_func)(void *task_data), void *task_data); +int sentry__bgworker_submit_delayed(sentry_bgworker_t *bgw, + sentry_task_exec_func_t exec_func, void (*cleanup_func)(void *task_data), + void *task_data, uint64_t delay_ms); +int sentry__bgworker_submit_at(sentry_bgworker_t *bgw, + sentry_task_exec_func_t exec_func, void (*cleanup_func)(void *task_data), + void *task_data, uint64_t execute_after); /** * This function will iterate through all the current tasks of the worker diff --git a/tests/unit/test_sync.c b/tests/unit/test_sync.c index 795595554..66c49cdd6 100644 --- a/tests/unit/test_sync.c +++ b/tests/unit/test_sync.c @@ -1,13 +1,16 @@ #include "sentry_core.h" #include "sentry_sync.h" #include "sentry_testsupport.h" +#include "sentry_utils.h" #ifdef SENTRY_PLATFORM_WINDOWS # include # define sleep_s(SECONDS) Sleep((SECONDS) * 1000) +# define sleep_ms(MS) Sleep(MS) #else # include # define sleep_s(SECONDS) sleep(SECONDS) +# define sleep_ms(MS) usleep((MS) * 1000) #endif struct task_state { @@ -167,3 +170,423 @@ SENTRY_TEST(bgworker_flush) TEST_CHECK_INT_EQUAL(shutdown, 0); sentry__bgworker_decref(bgw); } + +static void +noop_task(void *UNUSED(data), void *UNUSED(state)) +{ +} + +static void +incr_cleanup(void *data) +{ + (*(int *)data)++; +} + +static sentry_cond_t blocker_signal; +#ifdef SENTRY__MUTEX_INIT_DYN +SENTRY__MUTEX_INIT_DYN(blocker_lock) +#else +static sentry_mutex_t blocker_lock = SENTRY__MUTEX_INIT; +#endif +static bool blocker_released; + +static void +blocker_task(void *UNUSED(data), void *UNUSED(state)) +{ + SENTRY__MUTEX_INIT_DYN_ONCE(blocker_lock); + sentry__mutex_lock(&blocker_lock); + while (!blocker_released) { + sentry__cond_wait_timeout(&blocker_signal, &blocker_lock, 100); + } + sentry__mutex_unlock(&blocker_lock); +} + +struct order_state { + int order[10]; + int count; +}; + +static void +record_order_task(void *data, void *_state) +{ + struct order_state *state = (struct order_state *)_state; + state->order[state->count++] = (int)(size_t)data; +} + +SENTRY_TEST(bgworker_task_delay) +{ + struct order_state os; + os.count = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(&os, NULL); + TEST_ASSERT(!!bgw); + + uint64_t before = sentry__monotonic_time(); + sentry__bgworker_submit_delayed( + bgw, record_order_task, NULL, (void *)1, 50); + + sentry__bgworker_start(bgw); + TEST_CHECK_INT_EQUAL(sentry__bgworker_flush(bgw, 500), 0); + uint64_t after = sentry__monotonic_time(); + + TEST_CHECK_INT_EQUAL(os.count, 1); + TEST_CHECK_INT_EQUAL(os.order[0], 1); + TEST_CHECK(after - before >= 50); + + sentry__bgworker_shutdown(bgw, 500); + sentry__bgworker_decref(bgw); +} + +SENTRY_TEST(bgworker_delayed_flush) +{ + struct order_state os; + os.count = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(&os, NULL); + TEST_ASSERT(!!bgw); + + uint64_t base = sentry__monotonic_time(); + + // immediate + eligible delayed + far-future delayed + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)1, base); + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)2, base + 50); + sentry__bgworker_submit_delayed( + bgw, record_order_task, NULL, (void *)3, UINT64_MAX); + + sentry__bgworker_start(bgw); + + // flush covers the immediate and the 50ms task but skips the far-future one + TEST_CHECK_INT_EQUAL(sentry__bgworker_flush(bgw, 2000), 0); + TEST_CHECK_INT_EQUAL(os.count, 2); + TEST_CHECK_INT_EQUAL(os.order[0], 1); + TEST_CHECK_INT_EQUAL(os.order[1], 2); + + sentry__bgworker_shutdown(bgw, 500); + sentry__bgworker_decref(bgw); +} + +SENTRY_TEST(bgworker_delayed_tasks) +{ + struct order_state os; + os.count = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(&os, NULL); + TEST_ASSERT(!!bgw); + + // submit_at with a fixed base so ordering is deterministic regardless + // of OS preemption between submissions (submit_delayed reads the clock + // per call, so a pause between calls could shift execute_after values + // and change the expected sort order) + uint64_t base = sentry__monotonic_time(); + + // all tasks sorted by execute_after: immediate (0) first, then delayed + // by deadline + // + // queue after each submit: + // i(1) + // i(1) d100(3) + // i(1) i(6) d100(3) + // i(1) i(6) d50(2) d100(3) + // i(1) i(6) i(7) d50(2) d100(3) + // i(1) i(6) i(7) d50(2) d100(3) d200(5) + // i(1) i(6) i(7) d50(2) d100(3) d150(4) d200(5) + // i(1) i(6) i(7) i(8) d50(2) d100(3) d150(4) d200(5) + // i(1) i(6) i(7) i(8) d50(2) d75(9) d100(3) d150(4) d200(5) + // i(1) i(6) i(7) i(8) i(10) d50(2) d75(9) d100(3) d150(4) d200(5) + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)1, base); + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)3, base + 100); + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)6, base); + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)2, base + 50); + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)7, base); + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)5, base + 200); + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)4, base + 150); + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)8, base); + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)9, base + 75); + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)10, base); + + sentry__bgworker_start(bgw); + TEST_CHECK_INT_EQUAL(sentry__bgworker_flush(bgw, 5000), 0); + + // all tasks execute: immediate first, then delayed in deadline order + TEST_CHECK_INT_EQUAL(os.count, 10); + TEST_CHECK_INT_EQUAL(os.order[0], 1); + TEST_CHECK_INT_EQUAL(os.order[1], 6); + TEST_CHECK_INT_EQUAL(os.order[2], 7); + TEST_CHECK_INT_EQUAL(os.order[3], 8); + TEST_CHECK_INT_EQUAL(os.order[4], 10); + TEST_CHECK_INT_EQUAL(os.order[5], 2); + TEST_CHECK_INT_EQUAL(os.order[6], 9); + TEST_CHECK_INT_EQUAL(os.order[7], 3); + TEST_CHECK_INT_EQUAL(os.order[8], 4); + TEST_CHECK_INT_EQUAL(os.order[9], 5); + + sentry__bgworker_shutdown(bgw, 500); + sentry__bgworker_decref(bgw); +} + +SENTRY_TEST(bgworker_delayed_priority) +{ + SENTRY__MUTEX_INIT_DYN_ONCE(blocker_lock); + sentry__cond_init(&blocker_signal); + blocker_released = false; + + struct order_state os; + os.count = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(&os, NULL); + TEST_ASSERT(!!bgw); + + // blocker holds the worker busy + sentry__bgworker_submit(bgw, blocker_task, NULL, NULL); + // delayed task queued behind the blocker + sentry__bgworker_submit_delayed( + bgw, record_order_task, NULL, (void *)1, 50); + + sentry__bgworker_start(bgw); + + // wait for the delayed task to become ready + sleep_ms(100); + + // submit an immediate task — should NOT bypass the ready delayed task + sentry__bgworker_submit(bgw, record_order_task, NULL, (void *)2); + + // release the blocker + sentry__mutex_lock(&blocker_lock); + blocker_released = true; + sentry__cond_wake(&blocker_signal); + sentry__mutex_unlock(&blocker_lock); + + TEST_CHECK_INT_EQUAL(sentry__bgworker_shutdown(bgw, 5000), 0); + + TEST_CHECK_INT_EQUAL(os.count, 2); + TEST_CHECK_INT_EQUAL(os.order[0], 1); // delayed (was ready first) + TEST_CHECK_INT_EQUAL(os.order[1], 2); // immediate (submitted later) + + sentry__bgworker_decref(bgw); +} + +static void +blocking_record_task(void *data, void *_state) +{ + struct order_state *state = (struct order_state *)_state; + state->order[state->count++] = (int)(size_t)data; + + SENTRY__MUTEX_INIT_DYN_ONCE(blocker_lock); + sentry__mutex_lock(&blocker_lock); + while (!blocker_released) { + sentry__cond_wait_timeout(&blocker_signal, &blocker_lock, 100); + } + sentry__mutex_unlock(&blocker_lock); +} + +SENTRY_TEST(bgworker_delayed_current) +{ + SENTRY__MUTEX_INIT_DYN_ONCE(blocker_lock); + sentry__cond_init(&blocker_signal); + blocker_released = false; + + struct order_state os; + os.count = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(&os, NULL); + TEST_ASSERT(!!bgw); + + // head task that blocks and records execution + sentry__bgworker_submit(bgw, blocking_record_task, NULL, (void *)1); + + sentry__bgworker_start(bgw); + sleep_ms(100); + + // submit_at(0) would insert before head without the current_task guard + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)2, 0); + + sentry__mutex_lock(&blocker_lock); + blocker_released = true; + sentry__cond_wake(&blocker_signal); + sentry__mutex_unlock(&blocker_lock); + + TEST_CHECK_INT_EQUAL(sentry__bgworker_shutdown(bgw, 5000), 0); + + // head task must not be re-executed + TEST_CHECK_INT_EQUAL(os.count, 2); + TEST_CHECK_INT_EQUAL(os.order[0], 1); + TEST_CHECK_INT_EQUAL(os.order[1], 2); + + sentry__bgworker_decref(bgw); +} + +SENTRY_TEST(bgworker_delayed_head) +{ + struct order_state os; + os.count = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(&os, NULL); + TEST_ASSERT(!!bgw); + + uint64_t base = sentry__monotonic_time(); + + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)1, base); + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)2, base + 1); + // earlier than first_task -> triggers insert-before-head + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)3, base - 1); + + sentry__bgworker_start(bgw); + TEST_CHECK_INT_EQUAL(sentry__bgworker_flush(bgw, 5000), 0); + + TEST_CHECK_INT_EQUAL(os.count, 3); + TEST_CHECK_INT_EQUAL(os.order[0], 3); + TEST_CHECK_INT_EQUAL(os.order[1], 1); + TEST_CHECK_INT_EQUAL(os.order[2], 2); + + sentry__bgworker_shutdown(bgw, 500); + sentry__bgworker_decref(bgw); +} + +SENTRY_TEST(bgworker_delayed_drop_current) +{ + SENTRY__MUTEX_INIT_DYN_ONCE(blocker_lock); + sentry__cond_init(&blocker_signal); + blocker_released = false; + + struct order_state os; + os.count = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(&os, NULL); + TEST_ASSERT(!!bgw); + + // A blocks the worker; B is queued behind + sentry__bgworker_submit(bgw, blocking_record_task, NULL, (void *)1); + sentry__bgworker_submit(bgw, record_order_task, NULL, (void *)2); + + sentry__bgworker_start(bgw); + sleep_ms(100); + + // drop the currently executing task A + sentry__bgworker_foreach_matching( + bgw, blocking_record_task, drop_lessthan, (void *)2); + + // without the fix, this links to stale current_task + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)3, 0); + + sentry__mutex_lock(&blocker_lock); + blocker_released = true; + sentry__cond_wake(&blocker_signal); + sentry__mutex_unlock(&blocker_lock); + + TEST_CHECK_INT_EQUAL(sentry__bgworker_shutdown(bgw, 5000), 0); + + TEST_CHECK_INT_EQUAL(os.count, 3); + TEST_CHECK_INT_EQUAL(os.order[0], 1); + TEST_CHECK_INT_EQUAL(os.order[1], 3); + TEST_CHECK_INT_EQUAL(os.order[2], 2); + + sentry__bgworker_decref(bgw); +} + +SENTRY_TEST(bgworker_delayed_drop_next) +{ + SENTRY__MUTEX_INIT_DYN_ONCE(blocker_lock); + sentry__cond_init(&blocker_signal); + blocker_released = false; + + struct order_state os; + os.count = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(&os, NULL); + TEST_ASSERT(!!bgw); + + // A blocks the worker; B and C are queued behind + sentry__bgworker_submit(bgw, blocking_record_task, NULL, (void *)1); + sentry__bgworker_submit(bgw, record_order_task, NULL, (void *)2); + sentry__bgworker_submit(bgw, record_order_task, NULL, (void *)3); + + sentry__bgworker_start(bgw); + sleep_ms(100); + + // drop B (next after current_task A) + sentry__bgworker_foreach_matching( + bgw, record_order_task, drop_lessthan, (void *)3); + + // walks from current_task->next_task which must be valid + sentry__bgworker_submit_at(bgw, record_order_task, NULL, (void *)4, 0); + + sentry__mutex_lock(&blocker_lock); + blocker_released = true; + sentry__cond_wake(&blocker_signal); + sentry__mutex_unlock(&blocker_lock); + + TEST_CHECK_INT_EQUAL(sentry__bgworker_shutdown(bgw, 5000), 0); + + TEST_CHECK_INT_EQUAL(os.count, 3); + TEST_CHECK_INT_EQUAL(os.order[0], 1); + TEST_CHECK_INT_EQUAL(os.order[1], 4); + TEST_CHECK_INT_EQUAL(os.order[2], 3); + + sentry__bgworker_decref(bgw); +} + +SENTRY_TEST(bgworker_delayed_cleanup) +{ + int cleaned = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(NULL, NULL); + TEST_ASSERT(!!bgw); + + // immediate tasks (cleanup after execution) + sentry__bgworker_submit(bgw, noop_task, incr_cleanup, &cleaned); + sentry__bgworker_submit(bgw, noop_task, incr_cleanup, &cleaned); + + // far-future delayed tasks (discarded on shutdown, cleanup in decref) + sentry__bgworker_submit_at( + bgw, noop_task, incr_cleanup, &cleaned, UINT64_MAX); + sentry__bgworker_submit_at( + bgw, noop_task, incr_cleanup, &cleaned, UINT64_MAX); + sentry__bgworker_submit_at( + bgw, noop_task, incr_cleanup, &cleaned, UINT64_MAX); + + sentry__bgworker_start(bgw); + TEST_CHECK_INT_EQUAL(sentry__bgworker_shutdown(bgw, 1000), 0); + sentry__bgworker_decref(bgw); + + TEST_CHECK_INT_EQUAL(cleaned, 5); +} + +SENTRY_TEST(bgworker_delayed_shutdown) +{ + struct order_state os; + os.count = 0; + + sentry_bgworker_t *bgw = sentry__bgworker_new(&os, NULL); + TEST_ASSERT(!!bgw); + + // immediate tasks + sentry__bgworker_submit(bgw, record_order_task, NULL, (void *)1); + sentry__bgworker_submit(bgw, record_order_task, NULL, (void *)2); + sentry__bgworker_submit(bgw, record_order_task, NULL, (void *)3); + + // pending delayed tasks are discarded on shutdown + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)4, UINT64_MAX); + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)5, UINT64_MAX); + sentry__bgworker_submit_at( + bgw, record_order_task, NULL, (void *)6, UINT64_MAX); + + sentry__bgworker_start(bgw); + TEST_CHECK_INT_EQUAL(sentry__bgworker_shutdown(bgw, 1000), 0); + + TEST_CHECK_INT_EQUAL(os.count, 3); + TEST_CHECK_INT_EQUAL(os.order[0], 1); + TEST_CHECK_INT_EQUAL(os.order[1], 2); + TEST_CHECK_INT_EQUAL(os.order[2], 3); + + sentry__bgworker_decref(bgw); +} diff --git a/tests/unit/tests.inc b/tests/unit/tests.inc index 0a5cb2e9d..5642d247a 100644 --- a/tests/unit/tests.inc +++ b/tests/unit/tests.inc @@ -24,7 +24,17 @@ XX(basic_tracing_context) XX(basic_transaction) XX(basic_transport_thread_name) XX(basic_write_envelope_to_file) +XX(bgworker_delayed_cleanup) +XX(bgworker_delayed_current) +XX(bgworker_delayed_drop_current) +XX(bgworker_delayed_drop_next) +XX(bgworker_delayed_flush) +XX(bgworker_delayed_head) +XX(bgworker_delayed_priority) +XX(bgworker_delayed_shutdown) +XX(bgworker_delayed_tasks) XX(bgworker_flush) +XX(bgworker_task_delay) XX(breadcrumb_without_type_or_message_still_valid) XX(build_id_parser) XX(cache_keep)