@@ -372,4 +372,195 @@ template <typename T = std::shared_mutex, size_t SLOT_SIZE = 256, int SLOWDOWN_G
372372
373373using shared_mutex = shared_mutex_impl<>;
374374
375+ /* *
376+ ts::bravo::recursive_shared_mutex_impl
377+
378+ A recursive version of shared_mutex_impl that allows the same thread
379+ to acquire exclusive and shared locks multiple times.
380+
381+ Uses DenseThreadId for efficient per-thread state tracking without map overhead.
382+ Optimized to minimize expensive std::this_thread::get_id() calls by using
383+ DenseThreadId for ownership tracking.
384+
385+ Mixed lock semantics:
386+ - Upgrade prevention: A thread holding a shared lock cannot acquire an exclusive lock
387+ (would cause deadlock). try_lock() returns false, lock() asserts.
388+ - Downgrade allowed: A thread holding an exclusive lock can acquire a shared lock.
389+ */
390+ template <typename T = shared_mutex_impl<>, size_t SLOT_SIZE = 256 > class recursive_shared_mutex_impl
391+ {
392+ // Use a sentinel value for "no owner" - DenseThreadId values are 0 to SLOT_SIZE-1
393+ static constexpr size_t NO_OWNER = SLOT_SIZE;
394+
395+ public:
396+ recursive_shared_mutex_impl () = default ;
397+ ~recursive_shared_mutex_impl () = default ;
398+
399+ // No copying or moving
400+ recursive_shared_mutex_impl (recursive_shared_mutex_impl const &) = delete ;
401+ recursive_shared_mutex_impl &operator =(recursive_shared_mutex_impl const &) = delete ;
402+ recursive_shared_mutex_impl (recursive_shared_mutex_impl &&) = delete ;
403+ recursive_shared_mutex_impl &operator =(recursive_shared_mutex_impl &&) = delete ;
404+
405+ // //
406+ // Exclusive locking (recursive)
407+ //
408+ void
409+ lock ()
410+ {
411+ size_t tid = DenseThreadId::self ();
412+ // Fast path: check if we already own the lock
413+ if (_exclusive_owner.load (std::memory_order_relaxed) == tid) {
414+ ++_exclusive_count;
415+ return ;
416+ }
417+ // Upgrade prevention: cannot acquire exclusive lock while holding shared lock
418+ ThreadState &state = _thread_states[tid];
419+ debug_assert (state.shared_count == 0 );
420+ _mutex.lock ();
421+ _exclusive_owner.store (tid, std::memory_order_relaxed);
422+ _exclusive_count = 1 ;
423+ }
424+
425+ bool
426+ try_lock ()
427+ {
428+ size_t tid = DenseThreadId::self ();
429+ // Fast path: check if we already own the lock
430+ if (_exclusive_owner.load (std::memory_order_relaxed) == tid) {
431+ ++_exclusive_count;
432+ return true ;
433+ }
434+ // Upgrade prevention: cannot acquire exclusive lock while holding shared lock
435+ ThreadState &state = _thread_states[tid];
436+ if (state.shared_count > 0 ) {
437+ return false ;
438+ }
439+ if (_mutex.try_lock ()) {
440+ _exclusive_owner.store (tid, std::memory_order_relaxed);
441+ _exclusive_count = 1 ;
442+ return true ;
443+ }
444+ return false ;
445+ }
446+
447+ void
448+ unlock ()
449+ {
450+ if (--_exclusive_count == 0 ) {
451+ _exclusive_owner.store (NO_OWNER, std::memory_order_relaxed);
452+ _mutex.unlock ();
453+ }
454+ }
455+
456+ // //
457+ // Shared locking (recursive)
458+ //
459+ void
460+ lock_shared (Token &token)
461+ {
462+ size_t tid = DenseThreadId::self ();
463+ ThreadState &state = _thread_states[tid];
464+
465+ // Fast path: already holding shared lock - just increment count (most common case)
466+ size_t count = state.shared_count ;
467+ if (count > 0 ) {
468+ state.shared_count = count + 1 ;
469+ token = state.cached_token ;
470+ return ;
471+ }
472+
473+ // Check for downgrade: if we hold exclusive lock, allow shared lock without acquiring underlying
474+ if (_exclusive_owner.load (std::memory_order_relaxed) == tid) {
475+ state.shared_count = 1 ;
476+ token = 0 ; // Special token indicating we're under exclusive lock
477+ return ;
478+ }
479+
480+ // Slow path: acquire underlying lock
481+ _mutex.lock_shared (state.cached_token );
482+ state.shared_count = 1 ;
483+ token = state.cached_token ;
484+ }
485+
486+ bool
487+ try_lock_shared (Token &token)
488+ {
489+ size_t tid = DenseThreadId::self ();
490+ ThreadState &state = _thread_states[tid];
491+
492+ // Fast path: already holding shared lock - just increment count (most common case)
493+ size_t count = state.shared_count ;
494+ if (count > 0 ) {
495+ state.shared_count = count + 1 ;
496+ token = state.cached_token ;
497+ return true ;
498+ }
499+
500+ // Check for downgrade: if we hold exclusive lock, allow shared lock without acquiring underlying
501+ if (_exclusive_owner.load (std::memory_order_relaxed) == tid) {
502+ state.shared_count = 1 ;
503+ token = 0 ; // Special token indicating we're under exclusive lock
504+ return true ;
505+ }
506+
507+ // Slow path: try to acquire underlying lock
508+ if (_mutex.try_lock_shared (state.cached_token )) {
509+ state.shared_count = 1 ;
510+ token = state.cached_token ;
511+ return true ;
512+ }
513+ return false ;
514+ }
515+
516+ void
517+ unlock_shared (const Token /* token */ )
518+ {
519+ size_t tid = DenseThreadId::self ();
520+ ThreadState &state = _thread_states[tid];
521+ if (--state.shared_count == 0 ) {
522+ // Only unlock underlying mutex if we're not holding exclusive lock
523+ if (_exclusive_owner.load (std::memory_order_relaxed) != tid) {
524+ _mutex.unlock_shared (state.cached_token );
525+ }
526+ state.cached_token = 0 ;
527+ }
528+ }
529+
530+ // Extensions to check
531+ bool
532+ has_unique_lock ()
533+ {
534+ return _exclusive_owner.load (std::memory_order_relaxed) == DenseThreadId::self ();
535+ }
536+
537+ bool
538+ has_shared_lock ()
539+ {
540+ size_t tid = DenseThreadId::self ();
541+ ThreadState &state = _thread_states[tid];
542+
543+ if (state.shared_count > 0 ) {
544+ return true ;
545+ } else if (_exclusive_owner.load (std::memory_order_relaxed) == tid) {
546+ return true ;
547+ } else {
548+ return false ;
549+ }
550+ }
551+
552+ private:
553+ struct ThreadState {
554+ size_t shared_count{0 };
555+ Token cached_token{0 };
556+ };
557+
558+ T _mutex;
559+ std::atomic<size_t > _exclusive_owner{NO_OWNER};
560+ size_t _exclusive_count{0 };
561+ std::array<ThreadState, SLOT_SIZE> _thread_states{};
562+ };
563+
564+ using recursive_shared_mutex = recursive_shared_mutex_impl<>;
565+
375566} // namespace ts::bravo
0 commit comments