diff --git a/src/rcu.cpp b/src/rcu.cpp index b7b92830a..3ee3899ec 100644 --- a/src/rcu.cpp +++ b/src/rcu.cpp @@ -1,233 +1,252 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include std::atomic RCUInfos::revision{0}; thread_local RCUInfos RCUInfos::infos{}; /** * How many time a busy loop runs before yelding. */ static constexpr int RCU_ACTIVE_LOOP_COUNT = 10; /** * We maintain a linked list of all the RCUInfos for each active thread. Upon * start, a new thread adds itself to the head of the liked list and the node is * then removed when the threads shuts down. * * Insertion is fairly straightforward. The first step is to set the next * pointer of the node being inserted as the first node in the list as follow: * * threadInfos -> Node -> ... * ^ * Nadded -| * * The second step is to update threadInfos to point on the inserted node. This * is done using compare and swap. If the head of the list changed during this * process - for instance due to another insertion, CAS will fail and we can * start again. * * threadInfos Node -> ... * | ^ * \-> Nadded -| * * Deletion is a slightly more complex process. The general idea is to go over * the list, find the parent of the item we want to remove and set it's next * pointer to jump over it. * * Nparent -> Ndelete -> Nchild * * Nparent Ndelete -> Nchild * | ^ * \--------------------| * * We run into problems when a nodes is deleted concurrently with another node * being inserted. Hopefully, we can solve that problem with CAS as well. * * threadInfos -> Ndelete -> Nchild * ^ * Nadded -| * * The insertion will try to update threadInfos to point to Nadded, while the * deletion will try to update it to point to Nchild. Whichever goes first will * cause the other to fail its CAS and restart its process. * * threadInfos Ndelete -> Nchild * | ^ * \--> Nadded -| * * After a successful insertion, threadInfos now points to Nadded, and the CAS * to move it to Nchild will fail, causing the deletion process to restart from * scratch. * * /----------------------| * | V * threadInfos Ndelete -> Nchild * ^ * Nadded -| * * After a successful deletion, threadInfos now points to NChild and the CAS to * move it to Nadded will fail, causing the insertion process to fail. * * We also run into problems when several nodes are deleted concurrently. * Because it is not possible to read Ndelete->next and update Nparent->next * atomically, we may end up setting Nparent->next to a stale value if Nchild is * deleted. * * /----------------------| * | V * Nparent Ndelete Nchild -> Ngrandchild * | ^ * \--------------------| * * This would cause Nchild to be 'resurrected', which is obviously a problem. In * order to avoid this problem, we make sure that no concurrent deletion takes * places using a good old mutex. Using a mutex for deletion also ensures we are * safe from the ABA problem. * * Once a node is deleted from the list, we cannot destroy it right away. * Readers do not hold the mutex and may still be using that node. We need to * leverage RCU to make sure all the readers have finished their work before * allowing the node to be destroyed. We need to keep the mutex during that * process, because we just removed our thread from the list of thread to wait * for. A concurrent deletion would not wait for us and may end up deleting data * we rely on as a result. */ static std::atomic threadInfos{nullptr}; static RecursiveMutex csThreadInfosDelete; RCUInfos::RCUInfos() : state(0), next(nullptr) { RCUInfos *head = threadInfos.load(); do { next.store(head); } while (!threadInfos.compare_exchange_weak(head, this)); // Release the lock. readFree(); } RCUInfos::~RCUInfos() { /** * Before the thread is removed from the list, make sure we cleanup * everything. */ runCleanups(); while (cleanups.size() > 0) { synchronize(); } while (true) { LOCK(csThreadInfosDelete); std::atomic *ptr; { RCULock lock(this); ptr = &threadInfos; while (true) { RCUInfos *current = ptr->load(); if (current == this) { break; } assert(current != nullptr); ptr = ¤t->next; } } /** * We have our node and the parent is ready to be updated. * NB: The CAS operation only checks for *ptr and not for next. This * would be a big problem in the general case, but because we only * insert at the tip of the list and cannot have concurrent deletion * thanks to the use of a mutex, we are safe. */ RCUInfos *current = this; if (!ptr->compare_exchange_strong(current, next.load())) { continue; } /** * We now wait for possible readers to go past the synchronization * point. We need to do so while holding the lock as this operation * require us to a be a reader, but we just removed ourselves from the * list of reader to check and may therefore not be waited for. */ synchronize(); break; } } void RCUInfos::synchronize() { uint64_t syncRev = ++revision; // Loop a few time lock free. for (int i = 0; i < RCU_ACTIVE_LOOP_COUNT; i++) { runCleanups(); if (cleanups.empty() && hasSyncedTo(syncRev)) { return; } } // It seems like we have some contention. Let's try to not starve the // system. Let's make sure threads that land here proceed one by one. // XXX: The best option long term is most likely to use a futex on one of // the thread causing synchronization delay so this thread can be waked up // at an appropriate time. static std::condition_variable cond; static Mutex cs; WAIT_LOCK(cs, lock); do { runCleanups(); cond.notify_one(); } while (!cond.wait_for(lock, std::chrono::microseconds(1), [&] { return cleanups.empty() && hasSyncedTo(syncRev); })); } +class RCUInfos::RCUCleanupGuard { + RCUInfos *infos; + +public: + explicit RCUCleanupGuard(RCUInfos *infosIn) : infos(infosIn) { + infos->isCleaningUp = true; + } + + ~RCUCleanupGuard() { infos->isCleaningUp = false; } +}; + void RCUInfos::runCleanups() { + if (isCleaningUp || cleanups.empty()) { + // We don't want to run cleanups within cleanups. + return; + } + + RCUCleanupGuard guard(this); + // By the time we run a set of cleanups, we may have more cleanups // available so we loop until there is nothing available for cleanup. - while (true) { - if (cleanups.empty()) { - // There is nothing to cleanup. + while (!cleanups.empty()) { + auto it = cleanups.begin(); + uint64_t syncedTo = hasSyncedTo(it->first); + if (it->first > syncedTo) { + // We have nothing more ready to be cleaned up. return; } - auto it = cleanups.begin(); - uint64_t syncedTo = hasSyncedTo(it->first); while (it != cleanups.end() && it->first <= syncedTo) { // Run the cleanup and remove it from the map. - it->second(); + auto fun = std::move(it->second); cleanups.erase(it++); + fun(); } } } uint64_t RCUInfos::hasSyncedTo(uint64_t cutoff) { uint64_t syncedTo = revision.load(); // Go over the list and check all threads are past the synchronization // point. RCULock lock(this); RCUInfos *current = threadInfos.load(); while (current != nullptr) { syncedTo = std::min(syncedTo, current->state.load()); if (syncedTo < cutoff) { return 0; } current = current->next.load(); } return syncedTo; } diff --git a/src/rcu.h b/src/rcu.h index fbd425866..cf6431963 100644 --- a/src/rcu.h +++ b/src/rcu.h @@ -1,234 +1,240 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_RCU_H #define BITCOIN_RCU_H #include #include #include #include #include #include #include #include class RCUInfos; class RCUReadLock; class RCUInfos { std::atomic state; std::atomic next; + bool isCleaningUp = false; + class RCUCleanupGuard; + std::map> cleanups; // The largest revision possible means unlocked. static const uint64_t UNLOCKED = -uint64_t(1); RCUInfos(); ~RCUInfos(); void readLock() { assert(!isLocked()); state.store(revision.load()); } void readFree() { assert(isLocked()); state.store(UNLOCKED); } bool isLocked() const { return state.load() != UNLOCKED; } void registerCleanup(const std::function &f) { cleanups.emplace(++revision, f); } void synchronize(); void runCleanups(); uint64_t hasSyncedTo(uint64_t cutoff = UNLOCKED); friend class RCULock; friend struct RCUTest; static std::atomic revision; static thread_local RCUInfos infos; }; class RCULock { RCUInfos *infos; explicit RCULock(RCUInfos *infosIn) : infos(infosIn) { infos->readLock(); } friend class RCUInfos; public: RCULock() : RCULock(&RCUInfos::infos) {} - ~RCULock() { infos->readFree(); } + ~RCULock() { + infos->readFree(); + infos->runCleanups(); + } RCULock(const RCULock &) = delete; RCULock &operator=(const RCULock &) = delete; static bool isLocked() { return RCUInfos::infos.isLocked(); } static void registerCleanup(const std::function &f) { RCUInfos::infos.registerCleanup(f); } static void synchronize() { RCUInfos::infos.synchronize(); } }; template class RCUPtr { T *ptr; // Private construction, so factories have to be used. explicit RCUPtr(T *ptrIn) : ptr(ptrIn) {} public: RCUPtr() : ptr(nullptr) {} ~RCUPtr() { if (ptr != nullptr) { ptr->decrementRefCount(); } } /** * Acquire ownership of some pointer. */ static RCUPtr acquire(T *&ptrIn) { RCUPtr ret(ptrIn); ptrIn = nullptr; return ret; } /** * Construct a new object that is owned by the pointer. */ template static RCUPtr make(Args &&...args) { return RCUPtr(new T(std::forward(args)...)); } /** * Construct a new RCUPtr without transferring owership. */ static RCUPtr copy(T *ptr) { if (ptr != nullptr) { ptr->incrementRefCount(); } return RCUPtr::acquire(ptr); } /** * Copy semantic. */ RCUPtr(const RCUPtr &src) : ptr(src.ptr) { if (ptr != nullptr) { ptr->incrementRefCount(); } } RCUPtr &operator=(const RCUPtr &rhs) { RCUPtr tmp(rhs); std::swap(ptr, tmp.ptr); return *this; } /** * Move semantic. */ RCUPtr(RCUPtr &&src) : RCUPtr() { std::swap(ptr, src.ptr); } RCUPtr &operator=(RCUPtr &&rhs) { std::swap(ptr, rhs.ptr); return *this; } /** * Get allows to access the undelying pointer. RCUPtr keeps ownership. */ T *get() { return ptr; } const T *get() const { return ptr; } /** * Release transfers ownership of the pointer from RCUPtr to the caller. */ T *release() { T *oldPtr = ptr; ptr = nullptr; return oldPtr; } /** * Operator overloading for convenience. */ T *operator->() { return ptr; } const T *operator->() const { return ptr; } T &operator*() { return *ptr; } const T &operator*() const { return *ptr; } explicit operator bool() const { return ptr != nullptr; } /** * Equality checks. */ friend bool operator==(const RCUPtr &lhs, const T *rhs) { return lhs.get() == rhs; } friend bool operator==(const RCUPtr &lhs, const RCUPtr &rhs) { return lhs == rhs.get(); } friend bool operator!=(const RCUPtr &lhs, const T *rhs) { return !(lhs == rhs); } friend bool operator!=(const RCUPtr &lhs, const RCUPtr &rhs) { return !(lhs == rhs); } /** * ostream support. */ friend std::ostream &operator<<(std::ostream &stream, const RCUPtr &rhs) { return stream << rhs.ptr; } }; #define IMPLEMENT_RCU_REFCOUNT(T) \ private: \ mutable std::atomic refcount{0}; \ \ void incrementRefCount() const { refcount++; } \ \ bool tryDecrement() const { \ T count = refcount.load(); \ while (count > 0) { \ if (refcount.compare_exchange_weak(count, count - 1)) { \ return true; \ } \ } \ \ return false; \ } \ \ void decrementRefCount() const { \ if (tryDecrement()) { \ return; \ } \ \ RCULock::registerCleanup([this] { \ if (tryDecrement()) { \ return; \ } \ \ delete this; \ }); \ } \ \ static_assert(std::is_integral::value, "T must be an integral type."); \ static_assert(std::is_unsigned::value, "T must be unsigned."); \ \ template friend class ::RCUPtr #endif // BITCOIN_RCU_H diff --git a/src/test/rcu_tests.cpp b/src/test/rcu_tests.cpp index 23a5bd11e..eb74214d7 100644 --- a/src/test/rcu_tests.cpp +++ b/src/test/rcu_tests.cpp @@ -1,392 +1,424 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include struct RCUTest { static uint64_t getRevision() { return RCUInfos::revision.load(); } static uint64_t hasSyncedTo(uint64_t syncRev) { return RCUInfos::infos.hasSyncedTo(syncRev); } static std::map> &getCleanups() { return RCUInfos::infos.cleanups; } }; BOOST_FIXTURE_TEST_SUITE(rcu_tests, BasicTestingSetup) enum RCUTestStep { Init, Locked, LockAck, RCULocked, Synchronizing, Synchronized, }; #define WAIT_FOR_STEP(step) \ do { \ cond.notify_all(); \ } while (!cond.wait_for(lock, std::chrono::milliseconds(1), \ [&] { return otherstep == step; })) void synchronize(std::atomic &step, const std::atomic &otherstep, Mutex &cs, std::condition_variable &cond, std::atomic &syncRev) { assert(step == RCUTestStep::Init); { WAIT_LOCK(cs, lock); step = RCUTestStep::Locked; // Wait for our lock to be acknowledged. WAIT_FOR_STEP(RCUTestStep::LockAck); RCULock rculock; // Update step. step = RCUTestStep::RCULocked; // Wait for master. WAIT_FOR_STEP(RCUTestStep::RCULocked); } // Update step. syncRev = RCUTest::getRevision() + 1; step = RCUTestStep::Synchronizing; assert(!RCUTest::hasSyncedTo(syncRev)); // We wait for readers. RCULock::synchronize(); // Update step. step = RCUTestStep::Synchronized; } void lockAndWaitForSynchronize(std::atomic &step, const std::atomic &otherstep, Mutex &cs, std::condition_variable &cond, std::atomic &syncRev) { assert(step == RCUTestStep::Init); WAIT_LOCK(cs, lock); // Wait for th eother thread to be locked. WAIT_FOR_STEP(RCUTestStep::Locked); step = RCUTestStep::LockAck; // Wait for the synchronizing tread to take its RCU lock. WAIT_FOR_STEP(RCUTestStep::RCULocked); assert(!RCUTest::hasSyncedTo(syncRev)); { RCULock rculock; // Update master step. step = RCUTestStep::RCULocked; while (RCUTest::getRevision() < syncRev) { WAIT_FOR_STEP(RCUTestStep::Synchronizing); } assert(RCUTest::getRevision() >= syncRev); assert(otherstep.load() == RCUTestStep::Synchronizing); } assert(RCUTest::hasSyncedTo(syncRev) >= syncRev); WAIT_FOR_STEP(RCUTestStep::Synchronized); } static const int COUNT = 128; BOOST_AUTO_TEST_CASE(synchronize_test) { Mutex cs; std::condition_variable cond; std::atomic parentstep; std::atomic childstep; std::atomic syncRev; for (int i = 0; i < COUNT; i++) { parentstep = RCUTestStep::Init; childstep = RCUTestStep::Init; syncRev = RCUTest::getRevision() + 1; std::thread tlock([&] { lockAndWaitForSynchronize(parentstep, childstep, cs, cond, syncRev); }); std::thread tsync( [&] { synchronize(childstep, parentstep, cs, cond, syncRev); }); tlock.join(); tsync.join(); } // Needed to suppress "Test case [...] did not check any assertions" BOOST_CHECK(true); } -BOOST_AUTO_TEST_CASE(cleanup_test) { +BOOST_AUTO_TEST_CASE(cleanup_simple) { RCULock::synchronize(); BOOST_CHECK(RCUTest::getCleanups().empty()); bool isClean1 = false; RCULock::registerCleanup([&] { isClean1 = true; }); BOOST_CHECK(!isClean1); BOOST_CHECK_EQUAL(RCUTest::getCleanups().size(), 1); - BOOST_CHECK_EQUAL(RCUTest::getRevision(), - RCUTest::getCleanups().begin()->first); + + auto revision = RCUTest::getCleanups().begin()->first; + BOOST_CHECK_EQUAL(RCUTest::getRevision(), revision); // Synchronize runs the cleanups. RCULock::synchronize(); BOOST_CHECK(RCUTest::getCleanups().empty()); BOOST_CHECK(isClean1); +} +BOOST_AUTO_TEST_CASE(cleanup_multiple) { // Check multiple callbacks. - isClean1 = false; + bool isClean1 = false; bool isClean2 = false; bool isClean3 = false; RCULock::registerCleanup([&] { isClean1 = true; }); RCULock::registerCleanup([&] { isClean2 = true; }); RCULock::registerCleanup([&] { isClean3 = true; }); BOOST_CHECK_EQUAL(RCUTest::getCleanups().size(), 3); RCULock::synchronize(); BOOST_CHECK(RCUTest::getCleanups().empty()); BOOST_CHECK(isClean1); BOOST_CHECK(isClean2); BOOST_CHECK(isClean3); +} +BOOST_AUTO_TEST_CASE(cleanup_test_nested) { // Check callbacks adding each others. - isClean1 = false; - isClean2 = false; - isClean3 = false; + bool isClean1 = false; + bool isClean2 = false; + bool isClean3 = false; RCULock::registerCleanup([&] { isClean1 = true; RCULock::registerCleanup([&] { isClean2 = true; RCULock::registerCleanup([&] { isClean3 = true; }); }); }); BOOST_CHECK_EQUAL(RCUTest::getCleanups().size(), 1); RCULock::synchronize(); BOOST_CHECK(RCUTest::getCleanups().empty()); BOOST_CHECK(isClean1); BOOST_CHECK(isClean2); BOOST_CHECK(isClean3); } +BOOST_AUTO_TEST_CASE(cleanup_on_unlock) { + // Check callbacks adding each others. + bool isClean1 = false; + bool isClean2 = false; + bool isClean3 = false; + + RCULock::registerCleanup([&] { + isClean1 = true; + RCULock::registerCleanup([&] { + isClean2 = true; + RCULock::registerCleanup([&] { isClean3 = true; }); + }); + }); + + BOOST_CHECK_EQUAL(RCUTest::getCleanups().size(), 1); + + { + // There is no contention, so this will cleanup. + RCULock lock; + } + + BOOST_CHECK(RCUTest::getCleanups().empty()); + BOOST_CHECK(isClean1); + BOOST_CHECK(isClean2); + BOOST_CHECK(isClean3); +} + class RCURefTestItem { IMPLEMENT_RCU_REFCOUNT(uint32_t); const std::function cleanupfun; public: explicit RCURefTestItem(const std::function &fun) : cleanupfun(fun) {} ~RCURefTestItem() { cleanupfun(); } uint32_t getRefCount() const { return refcount.load(); } }; BOOST_AUTO_TEST_CASE(rcuptr_test) { // Make sure it works for null. { RCURefTestItem *ptr = nullptr; RCUPtr::copy(ptr); RCUPtr::acquire(ptr); } // Check the destruction mechanism. bool isDestroyed = false; { auto rcuptr = RCUPtr::make([&] { isDestroyed = true; }); BOOST_CHECK_EQUAL(rcuptr->getRefCount(), 0); } // rcuptr waits for synchronization to destroy. BOOST_CHECK(!isDestroyed); RCULock::synchronize(); BOOST_CHECK(isDestroyed); // Check that copy behaves properly. isDestroyed = false; RCUPtr gptr; { auto rcuptr = RCUPtr::make([&] { isDestroyed = true; }); BOOST_CHECK_EQUAL(rcuptr->getRefCount(), 0); gptr = rcuptr; BOOST_CHECK_EQUAL(rcuptr->getRefCount(), 1); BOOST_CHECK_EQUAL(gptr->getRefCount(), 1); auto rcuptrcopy = rcuptr; BOOST_CHECK_EQUAL(rcuptrcopy->getRefCount(), 2); BOOST_CHECK_EQUAL(rcuptr->getRefCount(), 2); BOOST_CHECK_EQUAL(gptr->getRefCount(), 2); } BOOST_CHECK_EQUAL(gptr->getRefCount(), 0); RCULock::synchronize(); BOOST_CHECK(!isDestroyed); gptr = RCUPtr(); BOOST_CHECK(!isDestroyed); RCULock::synchronize(); BOOST_CHECK(isDestroyed); } BOOST_AUTO_TEST_CASE(rcuptr_operator_test) { auto gptr = RCUPtr(); auto ptr = new RCURefTestItem([] {}); auto oldPtr = ptr; auto altptr = RCUPtr::make([] {}); // Check various operators. BOOST_CHECK_EQUAL(gptr.get(), NULLPTR(RCURefTestItem)); BOOST_CHECK_EQUAL(gptr, NULLPTR(RCURefTestItem)); BOOST_CHECK(!gptr); auto copyptr = gptr; BOOST_CHECK(gptr == nullptr); BOOST_CHECK(gptr != oldPtr); BOOST_CHECK(gptr == copyptr); BOOST_CHECK(gptr != altptr); gptr = RCUPtr::acquire(ptr); BOOST_CHECK_EQUAL(ptr, NULLPTR(RCURefTestItem)); BOOST_CHECK_EQUAL(gptr.get(), oldPtr); BOOST_CHECK_EQUAL(&*gptr, oldPtr); BOOST_CHECK_EQUAL(gptr, oldPtr); BOOST_CHECK(gptr); copyptr = gptr; BOOST_CHECK(gptr != nullptr); BOOST_CHECK(gptr == oldPtr); BOOST_CHECK(gptr == copyptr); BOOST_CHECK(gptr != altptr); } BOOST_AUTO_TEST_CASE(const_rcuptr_test) { bool isDestroyed = false; auto ptr = RCUPtr::make([&] { isDestroyed = true; }); // Now let's destroy it. ptr = RCUPtr(); BOOST_CHECK(!isDestroyed); RCULock::synchronize(); BOOST_CHECK(isDestroyed); } class RCURefMoveTestItem { const std::function cleanupfun; public: explicit RCURefMoveTestItem(const std::function &fun) : cleanupfun(fun) {} ~RCURefMoveTestItem() { cleanupfun(); } void incrementRefCount() { throw std::runtime_error("RCUPtr incremented the refcount"); } void decrementRefCount() { RCULock::registerCleanup([this] { delete this; }); } }; BOOST_AUTO_TEST_CASE(move_rcuptr_test) { bool isDestroyed = false; // Check tat copy is failing. auto rcuptr1 = RCUPtr::make([&] { isDestroyed = true; }); BOOST_CHECK_THROW(rcuptr1->incrementRefCount(), std::runtime_error); BOOST_CHECK_THROW(auto rcuptrcopy = rcuptr1;, std::runtime_error); // Try to move. auto rcuptr2 = std::move(rcuptr1); RCULock::synchronize(); BOOST_CHECK(!isDestroyed); // Move to a local and check proper destruction. { auto rcuptr3 = std::move(rcuptr2); } BOOST_CHECK(!isDestroyed); RCULock::synchronize(); BOOST_CHECK(isDestroyed); // Let's try to swap. isDestroyed = false; rcuptr1 = RCUPtr::make([&] { isDestroyed = true; }); std::swap(rcuptr1, rcuptr2); RCULock::synchronize(); BOOST_CHECK(!isDestroyed); // Chain moves to make sure there are no double free. { auto rcuptr3 = std::move(rcuptr2); auto rcuptr4 = std::move(rcuptr3); std::swap(rcuptr1, rcuptr4); } RCULock::synchronize(); BOOST_CHECK(!isDestroyed); // Check we can return from a function. { auto r = ([&] { auto moved = std::move(rcuptr1); return moved; })(); RCULock::synchronize(); BOOST_CHECK(!isDestroyed); } BOOST_CHECK(!isDestroyed); RCULock::synchronize(); BOOST_CHECK(isDestroyed); // Acquire/release workflow. isDestroyed = false; auto ptr = new RCURefMoveTestItem([&] { isDestroyed = true; }); auto ptrCopy = ptr; BOOST_CHECK_THROW(RCUPtr::copy(ptr), std::runtime_error); rcuptr1 = RCUPtr::acquire(ptr); BOOST_CHECK_EQUAL(rcuptr1, ptrCopy); BOOST_CHECK_EQUAL(ptr, NULLPTR(RCURefMoveTestItem)); ptr = rcuptr1.release(); BOOST_CHECK_EQUAL(rcuptr1, NULLPTR(RCURefMoveTestItem)); BOOST_CHECK_EQUAL(ptr, ptrCopy); RCULock::synchronize(); BOOST_CHECK(!isDestroyed); RCUPtr::acquire(ptr); BOOST_CHECK_EQUAL(ptr, NULLPTR(RCURefMoveTestItem)); BOOST_CHECK(!isDestroyed); RCULock::synchronize(); BOOST_CHECK(isDestroyed); } BOOST_AUTO_TEST_SUITE_END()