- an experimental read/write mutex... - 15 Updates
- Strong birthday problem simulated -- mismatch with known value of 3064 - 1 Update
- Fast mutex - 3 Updates
"Chris M. Thomasson" <invalid_chris_thomasson_invalid@invalid.com>: Feb 13 11:03PM -0800 Fwiw, here is an older read/write mutex of mine. I created a little benchmark for it vs c++17's std::shared_mutex. On MSVC 2017, my algorithm beats std::shared_mutex pretty badly. It takes my algorithm around 34 seconds to complete. MSVC's std::shared_mutex takes around 127 seconds. Wow! What a difference. Here is the code: Still have to try it out on GCC in c++17 mode. Can anybody run it? https://pastebin.com/raw/xCBHY9qd __________________________________ /* Simple, crude read/write mutex test by: Chris M. Thomasson __________________________________________*/ #include <thread> #include <atomic> #include <shared_mutex> #include <condition_variable> #include <iostream> #include <functional> #include <cassert> #include <cstdlib> #include <ctime> #define THREADS 16UL #define ITERS 10000000UL #define COUNT (THREADS * ITERS) // undefine to test std::shared_mutex #define CT_TEST_FAST_MUTEX 1 // bare bones mutex/condvar based semaphore struct ct_slow_semaphore { unsigned long m_state; std::mutex m_mutex; std::condition_variable m_cond; ct_slow_semaphore(unsigned long state) : m_state(state) {} void inc() { { std::unique_lock<std::mutex> lock(m_mutex); ++m_state; } m_cond.notify_one(); } void add(unsigned long addend) { { std::unique_lock<std::mutex> lock(m_mutex); m_state += addend; } m_cond.notify_all(); } void dec() { std::unique_lock<std::mutex> lock(m_mutex); while (m_state == 0) m_cond.wait(lock); --m_state; } }; // bin-sema struct ct_auto_reset_event { bool m_state; std::mutex m_mutex; std::condition_variable m_cond; ct_auto_reset_event() : m_state(false) {} void signal() { std::unique_lock<std::mutex> lock(m_mutex); m_state = true; m_cond.notify_one(); } void wait() { std::unique_lock<std::mutex> lock(m_mutex); while (m_state == false) m_cond.wait(lock); m_state = false; // auto-reset } }; // just a layer over an auto-reset event struct ct_fast_mutex { std::atomic<unsigned int> m_state; ct_auto_reset_event m_waitset; ct_fast_mutex() : m_state(0) {} void lock() { if (m_state.exchange(1, std::memory_order_acquire)) { while (m_state.exchange(2, std::memory_order_acquire)) { m_waitset.wait(); } } } void unlock() { if (m_state.exchange(0, std::memory_order_release) == 2) { m_waitset.signal(); } } }; // Chris M. Thomassons Experimental Read/Write Mutex // Yeah, it is pretty damn fat wrt the state, however // it has some interesting properties... // The state can be compressed a bit... // btw, it has no loops... // Take a look at the lock_shared and unlock_shared functions #define RWMUTEX_COUNT_MAX LONG_MAX struct ct_rwmutex { // shared state std::atomic<long> m_wrstate; std::atomic<long> m_count; std::atomic<long> m_rdwake; ct_slow_semaphore m_rdwset; ct_slow_semaphore m_wrwset; ct_fast_mutex m_wrlock; ct_rwmutex() : m_wrstate(1), m_count(RWMUTEX_COUNT_MAX), m_rdwake(0), m_rdwset(0), m_wrwset(0) { } // READ, pretty slim... void lock_shared() { if (m_count.fetch_add(-1, std::memory_order_acquire) < 1) { m_rdwset.dec(); } } void unlock_shared() { if (m_count.fetch_add(1, std::memory_order_release) < 0) { if (m_rdwake.fetch_add(-1, std::memory_order_acq_rel) == 1) { m_wrwset.inc(); } } } // WRITE, more hefty void lock() { m_wrlock.lock(); long count = m_count.fetch_add(-RWMUTEX_COUNT_MAX, std::memory_order_acquire); if (count < RWMUTEX_COUNT_MAX) { long rdwake = m_rdwake.fetch_add(RWMUTEX_COUNT_MAX - count, std::memory_order_acquire); if (rdwake + RWMUTEX_COUNT_MAX - count) { m_wrwset.dec(); } } } // write unlock void unlock() { long count = m_count.fetch_add(RWMUTEX_COUNT_MAX, std::memory_order_release); if (count < 0) { m_rdwset.add(-count); } m_wrlock.unlock(); } }; struct ct_shared { std::atomic<unsigned long> m_state; #if defined (CT_TEST_FAST_MUTEX) ct_rwmutex m_std_rwmutex; #else std::shared_mutex m_std_rwmutex;
Subscribe to:
Post Comments (Atom)
|
No comments:
Post a Comment