diff --git a/src/sync.cpp b/src/sync.cpp index 4be9b3b36..d3333327f 100644 --- a/src/sync.cpp +++ b/src/sync.cpp @@ -1,217 +1,235 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include +#include #ifdef DEBUG_LOCKCONTENTION void PrintLockContention(const char *pszName, const char *pszFile, int nLine) { LogPrintf("LOCKCONTENTION: %s\n", pszName); LogPrintf("Locker: %s:%d\n", pszFile, nLine); } #endif /* DEBUG_LOCKCONTENTION */ #ifdef DEBUG_LOCKORDER // // Early deadlock detection. // Problem being solved: // Thread 1 locks A, then B, then C // Thread 2 locks D, then C, then A // --> may result in deadlock between the two threads, depending on when // they run. // Solution implemented here: // Keep track of pairs of locks: (A before B), (A before C), etc. // Complain if any thread tries to lock in a different order. // struct CLockLocation { CLockLocation(const char *pszName, const char *pszFile, int nLine, bool fTryIn, const std::string &thread_name) : fTry(fTryIn), mutexName(pszName), sourceFile(pszFile), m_thread_name(thread_name), sourceLine(nLine) {} std::string ToString() const { return strprintf("%s %s:%s%s (in thread %s)", mutexName, sourceFile, itostr(sourceLine), (fTry ? " (TRY)" : ""), m_thread_name); } + std::string Name() const { return mutexName; } + private: bool fTry; std::string mutexName; std::string sourceFile; const std::string &m_thread_name; int sourceLine; }; typedef std::vector> LockStack; typedef std::map, LockStack> LockOrders; typedef std::set> InvLockOrders; struct LockData { // Very ugly hack: as the global constructs and destructors run single // threaded, we use this boolean to know whether LockData still exists, // as DeleteLock can get called by global RecursiveMutex destructors // after LockData disappears. bool available; LockData() : available(true) {} ~LockData() { available = false; } LockOrders lockorders; InvLockOrders invlockorders; std::mutex dd_mutex; }; LockData &GetLockData() { static LockData lockdata; return lockdata; } static thread_local LockStack g_lockstack; static void potential_deadlock_detected(const std::pair &mismatch, const LockStack &s1, const LockStack &s2) { LogPrintf("POTENTIAL DEADLOCK DETECTED\n"); LogPrintf("Previous lock order was:\n"); for (const std::pair &i : s2) { if (i.first == mismatch.first) { LogPrintfToBeContinued(" (1)"); } if (i.first == mismatch.second) { LogPrintfToBeContinued(" (2)"); } LogPrintf(" %s\n", i.second.ToString()); } LogPrintf("Current lock order is:\n"); for (const std::pair &i : s1) { if (i.first == mismatch.first) { LogPrintfToBeContinued(" (1)"); } if (i.first == mismatch.second) { LogPrintfToBeContinued(" (2)"); } LogPrintf(" %s\n", i.second.ToString()); } if (g_debug_lockorder_abort) { tfm::format( std::cerr, "Assertion failed: detected inconsistent lock order at %s:%i, " "details in debug log.\n", __FILE__, __LINE__); abort(); } throw std::logic_error("potential deadlock detected"); } static void push_lock(void *c, const CLockLocation &locklocation) { LockData &lockdata = GetLockData(); std::lock_guard lock(lockdata.dd_mutex); g_lockstack.push_back(std::make_pair(c, locklocation)); for (const std::pair &i : g_lockstack) { if (i.first == c) { break; } std::pair p1 = std::make_pair(i.first, c); if (lockdata.lockorders.count(p1)) { continue; } lockdata.lockorders.emplace(p1, g_lockstack); std::pair p2 = std::make_pair(c, i.first); lockdata.invlockorders.insert(p2); if (lockdata.lockorders.count(p2)) { potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]); } } } static void pop_lock() { g_lockstack.pop_back(); } void EnterCritical(const char *pszName, const char *pszFile, int nLine, void *cs, bool fTry) { push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry, util::ThreadGetInternalName())); } +void CheckLastCritical(void *cs, std::string &lockname, const char *guardname, + const char *file, int line) { + if (!g_lockstack.empty()) { + const auto &lastlock = g_lockstack.back(); + if (lastlock.first == cs) { + lockname = lastlock.second.Name(); + return; + } + } + throw std::system_error( + EPERM, std::generic_category(), + strprintf("%s:%s %s was not most recent critical section locked", file, + line, guardname)); +} + void LeaveCritical() { pop_lock(); } std::string LocksHeld() { std::string result; for (const std::pair &i : g_lockstack) { result += i.second.ToString() + std::string("\n"); } return result; } void AssertLockHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) { for (const std::pair &i : g_lockstack) { if (i.first == cs) { return; } } tfm::format(std::cerr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); abort(); } void AssertLockNotHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) { for (const std::pair &i : g_lockstack) { if (i.first == cs) { tfm::format( std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld()); abort(); } } } void DeleteLock(void *cs) { LockData &lockdata = GetLockData(); if (!lockdata.available) { // We're already shutting down. return; } std::lock_guard lock(lockdata.dd_mutex); std::pair item = std::make_pair(cs, nullptr); LockOrders::iterator it = lockdata.lockorders.lower_bound(item); while (it != lockdata.lockorders.end() && it->first.first == cs) { std::pair invitem = std::make_pair(it->first.second, it->first.first); lockdata.invlockorders.erase(invitem); lockdata.lockorders.erase(it++); } InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item); while (invit != lockdata.invlockorders.end() && invit->first == cs) { std::pair invinvitem = std::make_pair(invit->second, invit->first); lockdata.lockorders.erase(invinvitem); lockdata.invlockorders.erase(invit++); } } bool g_debug_lockorder_abort = true; #endif /* DEBUG_LOCKORDER */ diff --git a/src/sync.h b/src/sync.h index 73de30e43..b044294bb 100644 --- a/src/sync.h +++ b/src/sync.h @@ -1,312 +1,361 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SYNC_H #define BITCOIN_SYNC_H #include #include #include #include #include ///////////////////////////////////////////////// // // // THE SIMPLE DEFINITION, EXCLUDING DEBUG CODE // // // ///////////////////////////////////////////////// /* RecursiveMutex mutex; std::recursive_mutex mutex; LOCK(mutex); std::unique_lock criticalblock(mutex); LOCK2(mutex1, mutex2); std::unique_lock criticalblock1(mutex1); std::unique_lock criticalblock2(mutex2); TRY_LOCK(mutex, name); std::unique_lock name(mutex, std::try_to_lock_t); ENTER_CRITICAL_SECTION(mutex); // no RAII mutex.lock(); LEAVE_CRITICAL_SECTION(mutex); // no RAII mutex.unlock(); */ /////////////////////////////// // // // THE ACTUAL IMPLEMENTATION // // // /////////////////////////////// #ifdef DEBUG_LOCKORDER void EnterCritical(const char *pszName, const char *pszFile, int nLine, void *cs, bool fTry = false); void LeaveCritical(); +void CheckLastCritical(void *cs, std::string &lockname, const char *guardname, + const char *file, int line); std::string LocksHeld(); void AssertLockHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) ASSERT_EXCLUSIVE_LOCK(cs); void AssertLockNotHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs); void DeleteLock(void *cs); /** * Call abort() if a potential lock order deadlock bug is detected, instead of * just logging information and throwing a logic_error. Defaults to true, and * set to false in DEBUG_LOCKORDER unit tests. */ extern bool g_debug_lockorder_abort; #else static inline void EnterCritical(const char *pszName, const char *pszFile, int nLine, void *cs, bool fTry = false) {} static inline void LeaveCritical() {} +static inline void CheckLastCritical(void *cs, std::string &lockname, + const char *guardname, const char *file, + int line) {} static inline void AssertLockHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) ASSERT_EXCLUSIVE_LOCK(cs) {} static inline void AssertLockNotHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) {} static inline void DeleteLock(void *cs) {} #endif #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) #define AssertLockNotHeld(cs) \ AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs) /** * Template mixin that adds -Wthread-safety locking annotations and lock order * checking to a subset of the mutex API. */ template class LOCKABLE AnnotatedMixin : public PARENT { public: ~AnnotatedMixin() { DeleteLock((void *)this); } void lock() EXCLUSIVE_LOCK_FUNCTION() { PARENT::lock(); } void unlock() UNLOCK_FUNCTION() { PARENT::unlock(); } bool try_lock() EXCLUSIVE_TRYLOCK_FUNCTION(true) { return PARENT::try_lock(); } using UniqueLock = std::unique_lock; }; /** * Wrapped mutex: supports recursive locking, but no waiting * TODO: We should move away from using the recursive lock by default. */ using RecursiveMutex = AnnotatedMixin; /** Wrapped mutex: supports waiting but not recursive locking */ typedef AnnotatedMixin Mutex; #ifdef DEBUG_LOCKCONTENTION void PrintLockContention(const char *pszName, const char *pszFile, int nLine); #endif /** Wrapper around std::unique_lock style lock for Mutex. */ template class SCOPED_LOCKABLE UniqueLock : public Base { private: void Enter(const char *pszName, const char *pszFile, int nLine) { EnterCritical(pszName, pszFile, nLine, (void *)(Base::mutex())); #ifdef DEBUG_LOCKCONTENTION if (!Base::try_lock()) { PrintLockContention(pszName, pszFile, nLine); #endif Base::lock(); #ifdef DEBUG_LOCKCONTENTION } #endif } bool TryEnter(const char *pszName, const char *pszFile, int nLine) { EnterCritical(pszName, pszFile, nLine, (void *)(Base::mutex()), true); Base::try_lock(); if (!Base::owns_lock()) { LeaveCritical(); } return Base::owns_lock(); } public: UniqueLock(Mutex &mutexIn, const char *pszName, const char *pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(mutexIn) : Base(mutexIn, std::defer_lock) { if (fTry) { TryEnter(pszName, pszFile, nLine); } else { Enter(pszName, pszFile, nLine); } } UniqueLock(Mutex *pmutexIn, const char *pszName, const char *pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(pmutexIn) { if (!pmutexIn) { return; } *static_cast(this) = Base(*pmutexIn, std::defer_lock); if (fTry) { TryEnter(pszName, pszFile, nLine); } else { Enter(pszName, pszFile, nLine); } } ~UniqueLock() UNLOCK_FUNCTION() { if (Base::owns_lock()) { LeaveCritical(); } } operator bool() { return Base::owns_lock(); } + +protected: + // needed for reverse_lock + UniqueLock() {} + +public: + /** + * An RAII-style reverse lock. Unlocks on construction and locks on + * destruction. + */ + class reverse_lock { + public: + explicit reverse_lock(UniqueLock &_lock, const char *_guardname, + const char *_file, int _line) + : lock(_lock), file(_file), line(_line) { + CheckLastCritical((void *)lock.mutex(), lockname, _guardname, _file, + _line); + lock.unlock(); + LeaveCritical(); + lock.swap(templock); + } + + ~reverse_lock() { + templock.swap(lock); + EnterCritical(lockname.c_str(), file.c_str(), line, + (void *)lock.mutex()); + lock.lock(); + } + + private: + reverse_lock(reverse_lock const &); + reverse_lock &operator=(reverse_lock const &); + + UniqueLock &lock; + UniqueLock templock; + std::string lockname; + const std::string file; + const int line; + }; + friend class reverse_lock; }; +#define REVERSE_LOCK(g) \ + decltype(g)::reverse_lock PASTE2(revlock, __COUNTER__)(g, #g, __FILE__, \ + __LINE__) + template using DebugLock = UniqueLock::type>::type>; #define LOCK(cs) \ DebugLock PASTE2(criticalblock, \ __COUNTER__)(cs, #cs, __FILE__, __LINE__) #define LOCK2(cs1, cs2) \ DebugLock criticalblock1(cs1, #cs1, __FILE__, __LINE__); \ DebugLock criticalblock2(cs2, #cs2, __FILE__, __LINE__); #define TRY_LOCK(cs, name) \ DebugLock name(cs, #cs, __FILE__, __LINE__, true) #define WAIT_LOCK(cs, name) \ DebugLock name(cs, #cs, __FILE__, __LINE__) #define ENTER_CRITICAL_SECTION(cs) \ { \ EnterCritical(#cs, __FILE__, __LINE__, (void *)(&cs)); \ (cs).lock(); \ } #define LEAVE_CRITICAL_SECTION(cs) \ { \ (cs).unlock(); \ LeaveCritical(); \ } //! Run code while locking a mutex. //! //! Examples: //! //! WITH_LOCK(cs, shared_val = shared_val + 1); //! //! int val = WITH_LOCK(cs, return shared_val); //! #define WITH_LOCK(cs, code) \ [&] { \ LOCK(cs); \ code; \ }() class CSemaphore { private: std::condition_variable condition; std::mutex mutex; int value; public: explicit CSemaphore(int init) : value(init) {} void wait() { std::unique_lock lock(mutex); condition.wait(lock, [&]() { return value >= 1; }); value--; } bool try_wait() { std::lock_guard lock(mutex); if (value < 1) { return false; } value--; return true; } void post() { { std::lock_guard lock(mutex); value++; } condition.notify_one(); } }; /** RAII-style semaphore lock */ class CSemaphoreGrant { private: CSemaphore *sem; bool fHaveGrant; public: void Acquire() { if (fHaveGrant) { return; } sem->wait(); fHaveGrant = true; } void Release() { if (!fHaveGrant) { return; } sem->post(); fHaveGrant = false; } bool TryAcquire() { if (!fHaveGrant && sem->try_wait()) { fHaveGrant = true; } return fHaveGrant; } void MoveTo(CSemaphoreGrant &grant) { grant.Release(); grant.sem = sem; grant.fHaveGrant = fHaveGrant; fHaveGrant = false; } CSemaphoreGrant() : sem(nullptr), fHaveGrant(false) {} explicit CSemaphoreGrant(CSemaphore &sema, bool fTry = false) : sem(&sema), fHaveGrant(false) { if (fTry) { TryAcquire(); } else { Acquire(); } } ~CSemaphoreGrant() { Release(); } operator bool() const { return fHaveGrant; } }; // Utility class for indicating to compiler thread analysis that a mutex is // locked (when it couldn't be determined otherwise). struct SCOPED_LOCKABLE LockAssertion { template explicit LockAssertion(Mutex &mutex) EXCLUSIVE_LOCK_FUNCTION(mutex) { #ifdef DEBUG_LOCKORDER AssertLockHeld(mutex); #endif } ~LockAssertion() UNLOCK_FUNCTION() {} }; #endif // BITCOIN_SYNC_H diff --git a/src/test/reverselock_tests.cpp b/src/test/reverselock_tests.cpp index 2baaac82f..a555ccaf2 100644 --- a/src/test/reverselock_tests.cpp +++ b/src/test/reverselock_tests.cpp @@ -1,59 +1,88 @@ // Copyright (c) 2015-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include +#include #include #include BOOST_FIXTURE_TEST_SUITE(reverselock_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(reverselock_basics) { - boost::mutex mutex; - boost::unique_lock lock(mutex); + Mutex mutex; + WAIT_LOCK(mutex, lock); BOOST_CHECK(lock.owns_lock()); { - reverse_lock> rlock(lock); + REVERSE_LOCK(lock); BOOST_CHECK(!lock.owns_lock()); } BOOST_CHECK(lock.owns_lock()); } +BOOST_AUTO_TEST_CASE(reverselock_multiple) { + Mutex mutex2; + Mutex mutex; + WAIT_LOCK(mutex2, lock2); + WAIT_LOCK(mutex, lock); + + // Make sure undoing two locks succeeds + { + REVERSE_LOCK(lock); + BOOST_CHECK(!lock.owns_lock()); + REVERSE_LOCK(lock2); + BOOST_CHECK(!lock2.owns_lock()); + } + BOOST_CHECK(lock.owns_lock()); + BOOST_CHECK(lock2.owns_lock()); +} + BOOST_AUTO_TEST_CASE(reverselock_errors) { - boost::mutex mutex; - boost::unique_lock lock(mutex); + Mutex mutex2; + Mutex mutex; + WAIT_LOCK(mutex2, lock2); + WAIT_LOCK(mutex, lock); + +#ifdef DEBUG_LOCKORDER + // Make sure trying to reverse lock a previous lock fails + try { + REVERSE_LOCK(lock2); + BOOST_CHECK(false); // REVERSE_LOCK(lock2) succeeded + } catch (...) { + } + BOOST_CHECK(lock2.owns_lock()); +#endif // Make sure trying to reverse lock an unlocked lock fails lock.unlock(); BOOST_CHECK(!lock.owns_lock()); bool failed = false; try { - reverse_lock> rlock(lock); + REVERSE_LOCK(lock); } catch (...) { failed = true; } BOOST_CHECK(failed); BOOST_CHECK(!lock.owns_lock()); // Locking the original lock after it has been taken by a reverse lock // makes no sense. Ensure that the original lock no longer owns the lock // after giving it to a reverse one. lock.lock(); BOOST_CHECK(lock.owns_lock()); { - reverse_lock> rlock(lock); + REVERSE_LOCK(lock); BOOST_CHECK(!lock.owns_lock()); } BOOST_CHECK(failed); BOOST_CHECK(lock.owns_lock()); } BOOST_AUTO_TEST_SUITE_END()