diff --git a/src/avalanche/processor.cpp b/src/avalanche/processor.cpp index de7ca9501..9033836bd 100644 --- a/src/avalanche/processor.cpp +++ b/src/avalanche/processor.cpp @@ -1,638 +1,639 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include // For DecodeSecret #include #include #include #include #include #include #include #include #include /** * Run the avalanche event loop every 10ms. */ static constexpr std::chrono::milliseconds AVALANCHE_TIME_STEP{10}; // Unfortunately, the bitcoind codebase is full of global and we are kinda // forced into it here. std::unique_ptr g_avalanche; namespace avalanche { -static bool IsWorthPolling(const CBlockIndex *pindex) { +static bool IsWorthPolling(const CBlockIndex *pindex) + EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); if (pindex->nStatus.isInvalid()) { // No point polling invalid blocks. return false; } if (::ChainstateActive().IsBlockFinalized(pindex)) { // There is no point polling finalized block. return false; } return true; } static bool VerifyProof(const Proof &proof, bilingual_str &error) { ProofValidationState proof_state; if (!proof.verify(proof_state)) { switch (proof_state.GetResult()) { case ProofValidationResult::NO_STAKE: error = _("The avalanche proof has no stake."); return false; case ProofValidationResult::DUST_THRESOLD: error = _("The avalanche proof stake is too low."); return false; case ProofValidationResult::DUPLICATE_STAKE: error = _("The avalanche proof has duplicated stake."); return false; case ProofValidationResult::INVALID_STAKE_SIGNATURE: error = _("The avalanche proof has invalid stake signatures."); return false; case ProofValidationResult::TOO_MANY_UTXOS: error = strprintf( _("The avalanche proof has too many utxos (max: %u)."), AVALANCHE_MAX_PROOF_STAKES); return false; default: error = _("The avalanche proof is invalid."); return false; } } return true; } static bool VerifyDelegation(const Delegation &dg, const CPubKey &expectedPubKey, bilingual_str &error) { DelegationState dg_state; CPubKey auth; if (!dg.verify(dg_state, auth)) { switch (dg_state.GetResult()) { case avalanche::DelegationResult::INVALID_SIGNATURE: error = _("The avalanche delegation has invalid signatures."); return false; default: error = _("The avalanche delegation is invalid."); return false; } } if (auth != expectedPubKey) { error = _( "The avalanche delegation does not match the expected public key."); return false; } return true; } struct Processor::PeerData { std::shared_ptr proof; Delegation delegation; }; class Processor::NotificationsHandler : public interfaces::Chain::Notifications { Processor *m_processor; public: NotificationsHandler(Processor *p) : m_processor(p) {} void updatedBlockTip() override { LOCK(m_processor->cs_peerManager); if (m_processor->peerData && m_processor->peerData->proof) { m_processor->peerManager->registerProof( m_processor->peerData->proof); } m_processor->peerManager->updatedBlockTip(); } }; Processor::Processor(interfaces::Chain &chain, CConnman *connmanIn, std::unique_ptr peerDataIn, CKey sessionKeyIn) : connman(connmanIn), queryTimeoutDuration(AVALANCHE_DEFAULT_QUERY_TIMEOUT), round(0), peerManager(std::make_unique()), peerData(std::move(peerDataIn)), sessionKey(std::move(sessionKeyIn)) { // Make sure we get notified of chain state changes. chainNotificationsHandler = chain.handleNotifications(std::make_shared(this)); } Processor::~Processor() { chainNotificationsHandler.reset(); stopEventLoop(); } std::unique_ptr Processor::MakeProcessor(const ArgsManager &argsman, interfaces::Chain &chain, CConnman *connman, bilingual_str &error) { std::unique_ptr peerData; CKey masterKey; CKey sessionKey; if (argsman.IsArgSet("-avasessionkey")) { sessionKey = DecodeSecret(argsman.GetArg("-avasessionkey", "")); if (!sessionKey.IsValid()) { error = _("The avalanche session key is invalid."); return nullptr; } } else { // Pick a random key for the session. sessionKey.MakeNewKey(true); } if (argsman.IsArgSet("-avaproof")) { if (!argsman.IsArgSet("-avamasterkey")) { error = _( "The avalanche master key is missing for the avalanche proof."); return nullptr; } masterKey = DecodeSecret(argsman.GetArg("-avamasterkey", "")); if (!masterKey.IsValid()) { error = _("The avalanche master key is invalid."); return nullptr; } peerData = std::make_unique(); peerData->proof = std::make_shared(); if (!Proof::FromHex(*peerData->proof, argsman.GetArg("-avaproof", ""), error)) { // error is set by FromHex return nullptr; } if (!VerifyProof(*peerData->proof, error)) { // error is set by VerifyProof return nullptr; } std::unique_ptr dgb; const CPubKey &masterPubKey = masterKey.GetPubKey(); if (argsman.IsArgSet("-avadelegation")) { Delegation dg; if (!Delegation::FromHex(dg, argsman.GetArg("-avadelegation", ""), error)) { // error is set by FromHex() return nullptr; } if (dg.getProofId() != peerData->proof->getId()) { error = _("The delegation does not match the proof."); return nullptr; } if (masterPubKey != dg.getDelegatedPubkey()) { error = _( "The master key does not match the delegation public key."); return nullptr; } dgb = std::make_unique(dg); } else { if (masterPubKey != peerData->proof->getMaster()) { error = _("The master key does not match the proof public key."); return nullptr; } dgb = std::make_unique(*peerData->proof); } // Generate the delegation to the session key. const CPubKey sessionPubKey = sessionKey.GetPubKey(); if (sessionPubKey != masterPubKey) { if (!dgb->addLevel(masterKey, sessionPubKey)) { error = _("Failed to generate a delegation for this session."); return nullptr; } } peerData->delegation = dgb->build(); if (!VerifyDelegation(peerData->delegation, sessionPubKey, error)) { // error is set by VerifyDelegation return nullptr; } } // We can't use std::make_unique with a private constructor return std::unique_ptr(new Processor( chain, connman, std::move(peerData), std::move(sessionKey))); } bool Processor::addBlockToReconcile(const CBlockIndex *pindex) { bool isAccepted; { LOCK(cs_main); if (!IsWorthPolling(pindex)) { // There is no point polling this block. return false; } isAccepted = ::ChainActive().Contains(pindex); } return vote_records.getWriteView() ->insert(std::make_pair(pindex, VoteRecord(isAccepted))) .second; } bool Processor::isAccepted(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return false; } return it->second.isAccepted(); } int Processor::getConfidence(const CBlockIndex *pindex) const { auto r = vote_records.getReadView(); auto it = r->find(pindex); if (it == r.end()) { return -1; } return it->second.getConfidence(); } namespace { /** * When using TCP, we need to sign all messages as the transport layer is * not secure. */ class TCPResponse { Response response; SchnorrSig sig; public: TCPResponse(Response responseIn, const CKey &key) : response(std::move(responseIn)) { CHashWriter hasher(SER_GETHASH, 0); hasher << response; const uint256 hash = hasher.GetHash(); // Now let's sign! if (!key.SignSchnorr(hash, sig)) { sig.fill(0); } } // serialization support SERIALIZE_METHODS(TCPResponse, obj) { READWRITE(obj.response, obj.sig); } }; } // namespace void Processor::sendResponse(CNode *pfrom, Response response) const { connman->PushMessage( pfrom, CNetMsgMaker(pfrom->GetCommonVersion()) .Make(NetMsgType::AVARESPONSE, TCPResponse(std::move(response), sessionKey))); } bool Processor::registerVotes(NodeId nodeid, const Response &response, std::vector &updates, int &banscore, std::string &error) { { // Save the time at which we can query again. LOCK(cs_peerManager); // FIXME: This will override the time even when we received an old stale // message. This should check that the message is indeed the most up to // date one before updating the time. peerManager->updateNextRequestTime( nodeid, std::chrono::steady_clock::now() + std::chrono::milliseconds(response.getCooldown())); } std::vector invs; { // Check that the query exists. auto w = queries.getWriteView(); auto it = w->find(std::make_tuple(nodeid, response.getRound())); if (it == w.end()) { banscore = 2; error = "unexpected-ava-response"; return false; } invs = std::move(it->invs); w->erase(it); } // Verify that the request and the vote are consistent. const std::vector &votes = response.GetVotes(); size_t size = invs.size(); if (votes.size() != size) { banscore = 100; error = "invalid-ava-response-size"; return false; } for (size_t i = 0; i < size; i++) { if (invs[i].hash != votes[i].GetHash()) { banscore = 100; error = "invalid-ava-response-content"; return false; } } std::map responseIndex; { LOCK(cs_main); for (const auto &v : votes) { auto pindex = LookupBlockIndex(BlockHash(v.GetHash())); if (!pindex) { // This should not happen, but just in case... continue; } if (!IsWorthPolling(pindex)) { // There is no point polling this block. continue; } responseIndex.insert(std::make_pair(pindex, v)); } } { // Register votes. auto w = vote_records.getWriteView(); for (const auto &p : responseIndex) { CBlockIndex *pindex = p.first; const Vote &v = p.second; auto it = w->find(pindex); if (it == w.end()) { // We are not voting on that item anymore. continue; } auto &vr = it->second; if (!vr.registerVote(nodeid, v.GetError())) { // This vote did not provide any extra information, move on. continue; } if (!vr.hasFinalized()) { // This item has note been finalized, so we have nothing more to // do. updates.emplace_back( pindex, vr.isAccepted() ? BlockUpdate::Status::Accepted : BlockUpdate::Status::Rejected); continue; } // We just finalized a vote. If it is valid, then let the caller // know. Either way, remove the item from the map. updates.emplace_back(pindex, vr.isAccepted() ? BlockUpdate::Status::Finalized : BlockUpdate::Status::Invalid); w->erase(it); } } return true; } CPubKey Processor::getSessionPubKey() const { return sessionKey.GetPubKey(); } uint256 Processor::buildLocalSighash(CNode *pfrom) const { CHashWriter hasher(SER_GETHASH, 0); hasher << peerData->delegation.getId(); hasher << pfrom->GetLocalNonce(); hasher << pfrom->nRemoteHostNonce; hasher << pfrom->GetLocalExtraEntropy(); hasher << pfrom->nRemoteExtraEntropy; return hasher.GetHash(); } bool Processor::sendHello(CNode *pfrom) const { if (!peerData) { // We do not have a delegation to advertise. return false; } // Now let's sign! SchnorrSig sig; { const uint256 hash = buildLocalSighash(pfrom); if (!sessionKey.SignSchnorr(hash, sig)) { return false; } } connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()) .Make(NetMsgType::AVAHELLO, Hello(peerData->delegation, sig))); pfrom->AddKnownProof(peerData->delegation.getProofId()); return true; } std::shared_ptr Processor::getLocalProof() const { return peerData ? peerData->proof : nullptr; } bool Processor::startEventLoop(CScheduler &scheduler) { return eventLoop.startEventLoop( scheduler, [this]() { this->runEventLoop(); }, AVALANCHE_TIME_STEP); } bool Processor::stopEventLoop() { return eventLoop.stopEventLoop(); } std::vector Processor::getInvsForNextPoll(bool forPoll) { std::vector invs; // First remove all blocks that are not worth polling. { LOCK(cs_main); auto w = vote_records.getWriteView(); for (auto it = w->begin(); it != w->end();) { const CBlockIndex *pindex = it->first; if (!IsWorthPolling(pindex)) { w->erase(it++); } else { ++it; } } } auto r = vote_records.getReadView(); for (const std::pair &p : reverse_iterate(r)) { // Check if we can run poll. const bool shouldPoll = forPoll ? p.second.registerPoll() : p.second.shouldPoll(); if (!shouldPoll) { continue; } // We don't have a decision, we need more votes. invs.emplace_back(MSG_BLOCK, p.first->GetBlockHash()); if (invs.size() >= AVALANCHE_MAX_ELEMENT_POLL) { // Make sure we do not produce more invs than specified by the // protocol. return invs; } } return invs; } NodeId Processor::getSuitableNodeToQuery() { LOCK(cs_peerManager); return peerManager->selectNode(); } void Processor::clearTimedoutRequests() { auto now = std::chrono::steady_clock::now(); std::map timedout_items{}; { // Clear expired requests. auto w = queries.getWriteView(); auto it = w->get().begin(); while (it != w->get().end() && it->timeout < now) { for (const auto &i : it->invs) { timedout_items[i]++; } w->get().erase(it++); } } if (timedout_items.empty()) { return; } // In flight request accounting. for (const auto &p : timedout_items) { const CInv &inv = p.first; assert(inv.type == MSG_BLOCK); CBlockIndex *pindex; { LOCK(cs_main); pindex = LookupBlockIndex(BlockHash(inv.hash)); if (!pindex) { continue; } } auto w = vote_records.getWriteView(); auto it = w->find(pindex); if (it == w.end()) { continue; } it->second.clearInflightRequest(p.second); } } void Processor::runEventLoop() { // Don't do Avalanche while node is IBD'ing if (::ChainstateActive().IsInitialBlockDownload()) { return; } // First things first, check if we have requests that timed out and clear // them. clearTimedoutRequests(); // Make sure there is at least one suitable node to query before gathering // invs. NodeId nodeid = getSuitableNodeToQuery(); if (nodeid == NO_NODE) { return; } std::vector invs = getInvsForNextPoll(); if (invs.empty()) { return; } do { /** * If we lost contact to that node, then we remove it from nodeids, but * never add the request to queries, which ensures bad nodes get cleaned * up over time. */ bool hasSent = connman->ForNode(nodeid, [this, &invs](CNode *pnode) { uint64_t current_round = round++; { // Compute the time at which this requests times out. auto timeout = std::chrono::steady_clock::now() + queryTimeoutDuration; // Register the query. queries.getWriteView()->insert( {pnode->GetId(), current_round, timeout, invs}); // Set the timeout. LOCK(cs_peerManager); peerManager->updateNextRequestTime(pnode->GetId(), timeout); } pnode->m_avalanche_state->invsPolled(invs.size()); // Send the query to the node. connman->PushMessage( pnode, CNetMsgMaker(pnode->GetCommonVersion()) .Make(NetMsgType::AVAPOLL, Poll(current_round, std::move(invs)))); return true; }); // Success! if (hasSent) { return; } { // This node is obsolete, delete it. LOCK(cs_peerManager); peerManager->removeNode(nodeid); } // Get next suitable node to try again nodeid = getSuitableNodeToQuery(); } while (nodeid != NO_NODE); } } // namespace avalanche diff --git a/src/sync.h b/src/sync.h index 24a41ca85..62d8a491d 100644 --- a/src/sync.h +++ b/src/sync.h @@ -1,374 +1,373 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SYNC_H #define BITCOIN_SYNC_H #include #include #include #include #include #include ///////////////////////////////////////////////// // // // THE SIMPLE DEFINITION, EXCLUDING DEBUG CODE // // // ///////////////////////////////////////////////// /* RecursiveMutex mutex; std::recursive_mutex mutex; LOCK(mutex); std::unique_lock criticalblock(mutex); LOCK2(mutex1, mutex2); std::unique_lock criticalblock1(mutex1); std::unique_lock criticalblock2(mutex2); TRY_LOCK(mutex, name); std::unique_lock name(mutex, std::try_to_lock_t); ENTER_CRITICAL_SECTION(mutex); // no RAII mutex.lock(); LEAVE_CRITICAL_SECTION(mutex); // no RAII mutex.unlock(); */ /////////////////////////////// // // // THE ACTUAL IMPLEMENTATION // // // /////////////////////////////// #ifdef DEBUG_LOCKORDER void EnterCritical(const char *pszName, const char *pszFile, int nLine, void *cs, bool fTry = false); void LeaveCritical(); void CheckLastCritical(void *cs, std::string &lockname, const char *guardname, const char *file, int line); std::string LocksHeld(); template void AssertLockHeldInternal(const char *pszName, const char *pszFile, int nLine, - MutexType *cs) ASSERT_EXCLUSIVE_LOCK(cs); + MutexType *cs); void AssertLockNotHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs); void DeleteLock(void *cs); bool LockStackEmpty(); /** * Call abort() if a potential lock order deadlock bug is detected, instead of * just logging information and throwing a logic_error. Defaults to true, and * set to false in DEBUG_LOCKORDER unit tests. */ extern bool g_debug_lockorder_abort; #else inline void EnterCritical(const char *pszName, const char *pszFile, int nLine, void *cs, bool fTry = false) {} inline void LeaveCritical() {} inline void CheckLastCritical(void *cs, std::string &lockname, const char *guardname, const char *file, int line) {} template inline void AssertLockHeldInternal(const char *pszName, const char *pszFile, - int nLine, MutexType *cs) - ASSERT_EXCLUSIVE_LOCK(cs) {} + int nLine, MutexType *cs) {} inline void AssertLockNotHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) {} inline void DeleteLock(void *cs) {} inline bool LockStackEmpty() { return true; } #endif #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) #define AssertLockNotHeld(cs) \ AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs) /** * Template mixin that adds -Wthread-safety locking annotations and lock order * checking to a subset of the mutex API. */ template class LOCKABLE AnnotatedMixin : public PARENT { public: ~AnnotatedMixin() { DeleteLock((void *)this); } void lock() EXCLUSIVE_LOCK_FUNCTION() { PARENT::lock(); } void unlock() UNLOCK_FUNCTION() { PARENT::unlock(); } bool try_lock() EXCLUSIVE_TRYLOCK_FUNCTION(true) { return PARENT::try_lock(); } using UniqueLock = std::unique_lock; #ifdef __clang__ //! For negative capabilities in the Clang Thread Safety Analysis. //! A negative requirement uses the EXCLUSIVE_LOCKS_REQUIRED attribute, in //! conjunction with the ! operator, to indicate that a mutex should not be //! held. const AnnotatedMixin &operator!() const { return *this; } #endif // __clang__ }; /** * Wrapped mutex: supports recursive locking, but no waiting * TODO: We should move away from using the recursive lock by default. */ using RecursiveMutex = AnnotatedMixin; /** Wrapped mutex: supports waiting but not recursive locking */ typedef AnnotatedMixin Mutex; #ifdef DEBUG_LOCKCONTENTION void PrintLockContention(const char *pszName, const char *pszFile, int nLine); #endif /** Wrapper around std::unique_lock style lock for Mutex. */ template class SCOPED_LOCKABLE UniqueLock : public Base { private: void Enter(const char *pszName, const char *pszFile, int nLine) { EnterCritical(pszName, pszFile, nLine, (void *)(Base::mutex())); #ifdef DEBUG_LOCKCONTENTION if (!Base::try_lock()) { PrintLockContention(pszName, pszFile, nLine); #endif Base::lock(); #ifdef DEBUG_LOCKCONTENTION } #endif } bool TryEnter(const char *pszName, const char *pszFile, int nLine) { EnterCritical(pszName, pszFile, nLine, (void *)(Base::mutex()), true); Base::try_lock(); if (!Base::owns_lock()) { LeaveCritical(); } return Base::owns_lock(); } public: UniqueLock(Mutex &mutexIn, const char *pszName, const char *pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(mutexIn) : Base(mutexIn, std::defer_lock) { if (fTry) { TryEnter(pszName, pszFile, nLine); } else { Enter(pszName, pszFile, nLine); } } UniqueLock(Mutex *pmutexIn, const char *pszName, const char *pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(pmutexIn) { if (!pmutexIn) { return; } *static_cast(this) = Base(*pmutexIn, std::defer_lock); if (fTry) { TryEnter(pszName, pszFile, nLine); } else { Enter(pszName, pszFile, nLine); } } ~UniqueLock() UNLOCK_FUNCTION() { if (Base::owns_lock()) { LeaveCritical(); } } operator bool() { return Base::owns_lock(); } protected: // needed for reverse_lock UniqueLock() {} public: /** * An RAII-style reverse lock. Unlocks on construction and locks on * destruction. */ class reverse_lock { public: explicit reverse_lock(UniqueLock &_lock, const char *_guardname, const char *_file, int _line) : lock(_lock), file(_file), line(_line) { CheckLastCritical((void *)lock.mutex(), lockname, _guardname, _file, _line); lock.unlock(); LeaveCritical(); lock.swap(templock); } ~reverse_lock() { templock.swap(lock); EnterCritical(lockname.c_str(), file.c_str(), line, (void *)lock.mutex()); lock.lock(); } private: reverse_lock(reverse_lock const &); reverse_lock &operator=(reverse_lock const &); UniqueLock &lock; UniqueLock templock; std::string lockname; const std::string file; const int line; }; friend class reverse_lock; }; #define REVERSE_LOCK(g) \ typename std::decay::type::reverse_lock PASTE2( \ revlock, __COUNTER__)(g, #g, __FILE__, __LINE__) template using DebugLock = UniqueLock::type>::type>; #define LOCK(cs) \ DebugLock PASTE2(criticalblock, \ __COUNTER__)(cs, #cs, __FILE__, __LINE__) #define LOCK2(cs1, cs2) \ DebugLock criticalblock1(cs1, #cs1, __FILE__, __LINE__); \ DebugLock criticalblock2(cs2, #cs2, __FILE__, __LINE__); #define TRY_LOCK(cs, name) \ DebugLock name(cs, #cs, __FILE__, __LINE__, true) #define WAIT_LOCK(cs, name) \ DebugLock name(cs, #cs, __FILE__, __LINE__) #define ENTER_CRITICAL_SECTION(cs) \ { \ EnterCritical(#cs, __FILE__, __LINE__, (void *)(&cs)); \ (cs).lock(); \ } #define LEAVE_CRITICAL_SECTION(cs) \ { \ (cs).unlock(); \ LeaveCritical(); \ } //! Run code while locking a mutex. //! //! Examples: //! //! WITH_LOCK(cs, shared_val = shared_val + 1); //! //! int val = WITH_LOCK(cs, return shared_val); //! #define WITH_LOCK(cs, code) \ [&] { \ LOCK(cs); \ code; \ }() class CSemaphore { private: std::condition_variable condition; std::mutex mutex; int value; public: explicit CSemaphore(int init) : value(init) {} void wait() { std::unique_lock lock(mutex); condition.wait(lock, [&]() { return value >= 1; }); value--; } bool try_wait() { std::lock_guard lock(mutex); if (value < 1) { return false; } value--; return true; } void post() { { std::lock_guard lock(mutex); value++; } condition.notify_one(); } }; /** RAII-style semaphore lock */ class CSemaphoreGrant { private: CSemaphore *sem; bool fHaveGrant; public: void Acquire() { if (fHaveGrant) { return; } sem->wait(); fHaveGrant = true; } void Release() { if (!fHaveGrant) { return; } sem->post(); fHaveGrant = false; } bool TryAcquire() { if (!fHaveGrant && sem->try_wait()) { fHaveGrant = true; } return fHaveGrant; } void MoveTo(CSemaphoreGrant &grant) { grant.Release(); grant.sem = sem; grant.fHaveGrant = fHaveGrant; fHaveGrant = false; } CSemaphoreGrant() : sem(nullptr), fHaveGrant(false) {} explicit CSemaphoreGrant(CSemaphore &sema, bool fTry = false) : sem(&sema), fHaveGrant(false) { if (fTry) { TryAcquire(); } else { Acquire(); } } ~CSemaphoreGrant() { Release(); } operator bool() const { return fHaveGrant; } }; // Utility class for indicating to compiler thread analysis that a mutex is // locked (when it couldn't be determined otherwise). struct SCOPED_LOCKABLE LockAssertion { template explicit LockAssertion(Mutex &mutex) EXCLUSIVE_LOCK_FUNCTION(mutex) { #ifdef DEBUG_LOCKORDER AssertLockHeld(mutex); #endif } ~LockAssertion() UNLOCK_FUNCTION() {} }; #endif // BITCOIN_SYNC_H