diff --git a/src/random.cpp b/src/random.cpp index e7dd6b477..baa3fddb2 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -1,485 +1,483 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "random.h" +#include -#include "crypto/sha512.h" -#include "support/cleanse.h" #ifdef WIN32 -#include "compat.h" // for Windows API +#include // for Windows API #include #endif -#include "logging.h" // for LogPrint() -#include "sync.h" // for WAIT_LOCK -#include "utiltime.h" // for GetTime() +#include +#include // for LogPrint() +#include +#include // for WAIT_LOCK +#include // for GetTime() + +#include +#include #include #include #include +#include #include - #ifndef WIN32 #include #include #endif #ifdef HAVE_SYS_GETRANDOM #include #include #endif #if defined(HAVE_GETENTROPY) || \ (defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)) #include #endif #if defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX) #include #endif #ifdef HAVE_SYSCTL_ARND #include #include // for ARRAYLEN #endif -#include - #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) #include #endif -#include -#include - [[noreturn]] static void RandFailure() { LogPrintf("Failed to read randomness, aborting\n"); std::abort(); } static inline int64_t GetPerformanceCounter() { // Read the hardware time stamp counter when available. // See https://en.wikipedia.org/wiki/Time_Stamp_Counter for more information. #if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) return __rdtsc(); #elif !defined(_MSC_VER) && defined(__i386__) uint64_t r = 0; // Constrain the r variable to the eax:edx pair. __asm__ volatile("rdtsc" : "=A"(r)); return r; #elif !defined(_MSC_VER) && (defined(__x86_64__) || defined(__amd64__)) uint64_t r1 = 0, r2 = 0; // Constrain r1 to rax and r2 to rdx. __asm__ volatile("rdtsc" : "=a"(r1), "=d"(r2)); return (r2 << 32) | r1; #else // Fall back to using C++11 clock (usually microsecond or nanosecond // precision) return std::chrono::high_resolution_clock::now().time_since_epoch().count(); #endif } #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) static std::atomic hwrand_initialized{false}; static bool rdrand_supported = false; static constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000; static void RDRandInit() { uint32_t eax, ebx, ecx, edx; if (__get_cpuid(1, &eax, &ebx, &ecx, &edx) && (ecx & CPUID_F1_ECX_RDRAND)) { LogPrintf("Using RdRand as an additional entropy source\n"); rdrand_supported = true; } hwrand_initialized.store(true); } #else static void RDRandInit() {} #endif static bool GetHWRand(uint8_t *ent32) { #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) assert(hwrand_initialized.load(std::memory_order_relaxed)); if (rdrand_supported) { uint8_t ok; // Not all assemblers support the rdrand instruction, write it in hex. #ifdef __i386__ for (int iter = 0; iter < 4; ++iter) { uint32_t r1, r2; __asm__ volatile(".byte 0x0f, 0xc7, 0xf0;" // rdrand %eax ".byte 0x0f, 0xc7, 0xf2;" // rdrand %edx "setc %2" : "=a"(r1), "=d"(r2), "=q"(ok)::"cc"); if (!ok) { return false; } WriteLE32(ent32 + 8 * iter, r1); WriteLE32(ent32 + 8 * iter + 4, r2); } #else uint64_t r1, r2, r3, r4; __asm__ volatile(".byte 0x48, 0x0f, 0xc7, 0xf0, " // rdrand %rax "0x48, 0x0f, 0xc7, 0xf3, " // rdrand %rbx "0x48, 0x0f, 0xc7, 0xf1, " // rdrand %rcx "0x48, 0x0f, 0xc7, 0xf2; " // rdrand %rdx "setc %4" : "=a"(r1), "=b"(r2), "=c"(r3), "=d"(r4), "=q"(ok)::"cc"); if (!ok) { return false; } WriteLE64(ent32, r1); WriteLE64(ent32 + 8, r2); WriteLE64(ent32 + 16, r3); WriteLE64(ent32 + 24, r4); #endif return true; } #endif return false; } void RandAddSeed() { // Seed with CPU performance counter int64_t nCounter = GetPerformanceCounter(); RAND_add(&nCounter, sizeof(nCounter), 1.5); memory_cleanse((void *)&nCounter, sizeof(nCounter)); } static void RandAddSeedPerfmon() { RandAddSeed(); #ifdef WIN32 // Don't need this on Linux, OpenSSL automatically uses /dev/urandom // Seed with the entire set of perfmon data // This can take up to 2 seconds, so only do it every 10 minutes static int64_t nLastPerfmon; if (GetTime() < nLastPerfmon + 10 * 60) { return; } nLastPerfmon = GetTime(); std::vector vData(250000, 0); long ret = 0; unsigned long nSize = 0; // Bail out at more than 10MB of performance data const size_t nMaxSize = 10000000; while (true) { nSize = vData.size(); ret = RegQueryValueExA(HKEY_PERFORMANCE_DATA, "Global", nullptr, nullptr, vData.data(), &nSize); if (ret != ERROR_MORE_DATA || vData.size() >= nMaxSize) { break; } // Grow size of buffer exponentially vData.resize(std::max((vData.size() * 3) / 2, nMaxSize)); } RegCloseKey(HKEY_PERFORMANCE_DATA); if (ret == ERROR_SUCCESS) { RAND_add(vData.data(), nSize, nSize / 100.0); memory_cleanse(vData.data(), nSize); LogPrint(BCLog::RAND, "%s: %lu bytes\n", __func__, nSize); } else { // Warn only once static bool warned = false; if (!warned) { LogPrintf("%s: Warning: RegQueryValueExA(HKEY_PERFORMANCE_DATA) " "failed with code %i\n", __func__, ret); warned = true; } } #endif } #ifndef WIN32 /** * Fallback: get 32 bytes of system entropy from /dev/urandom. The most * compatible way to get cryptographic randomness on UNIX-ish platforms. */ static void GetDevURandom(uint8_t *ent32) { int f = open("/dev/urandom", O_RDONLY); if (f == -1) { RandFailure(); } int have = 0; do { ssize_t n = read(f, ent32 + have, NUM_OS_RANDOM_BYTES - have); if (n <= 0 || n + have > NUM_OS_RANDOM_BYTES) { close(f); RandFailure(); } have += n; } while (have < NUM_OS_RANDOM_BYTES); close(f); } #endif /** Get 32 bytes of system entropy. */ void GetOSRand(uint8_t *ent32) { #if defined(WIN32) HCRYPTPROV hProvider; int ret = CryptAcquireContextW(&hProvider, nullptr, nullptr, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); if (!ret) { RandFailure(); } ret = CryptGenRandom(hProvider, NUM_OS_RANDOM_BYTES, ent32); if (!ret) { RandFailure(); } CryptReleaseContext(hProvider, 0); #elif defined(HAVE_SYS_GETRANDOM) /** * Linux. From the getrandom(2) man page: * "If the urandom source has been initialized, reads of up to 256 bytes * will always return as many bytes as requested and will not be interrupted * by signals." */ int rv = syscall(SYS_getrandom, ent32, NUM_OS_RANDOM_BYTES, 0); if (rv != NUM_OS_RANDOM_BYTES) { if (rv < 0 && errno == ENOSYS) { /* Fallback for kernel <3.17: the return value will be -1 and errno * ENOSYS if the syscall is not available, in that case fall back * to /dev/urandom. */ GetDevURandom(ent32); } else { RandFailure(); } } #elif defined(HAVE_GETENTROPY) && defined(__OpenBSD__) /** * On OpenBSD this can return up to 256 bytes of entropy, will return an * error if more are requested. * The call cannot return less than the requested number of bytes. * getentropy is explicitly limited to openbsd here, as a similar (but not * the same) function may exist on other platforms via glibc. */ if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) { RandFailure(); } #elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX) // We need a fallback for OSX < 10.12 if (&getentropy != nullptr) { if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) { RandFailure(); } } else { GetDevURandom(ent32); } #elif defined(HAVE_SYSCTL_ARND) /** * FreeBSD and similar. It is possible for the call to return less bytes * than requested, so need to read in a loop. */ static const int name[2] = {CTL_KERN, KERN_ARND}; int have = 0; do { size_t len = NUM_OS_RANDOM_BYTES - have; if (sysctl(name, ARRAYLEN(name), ent32 + have, &len, nullptr, 0) != 0) { RandFailure(); } have += len; } while (have < NUM_OS_RANDOM_BYTES); #else /** * Fall back to /dev/urandom if there is no specific method implemented to * get system entropy for this OS. */ GetDevURandom(ent32); #endif } void GetRandBytes(uint8_t *buf, int num) { if (RAND_bytes(buf, num) != 1) { RandFailure(); } } static void AddDataToRng(void *data, size_t len); void RandAddSeedSleep() { int64_t nPerfCounter1 = GetPerformanceCounter(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); int64_t nPerfCounter2 = GetPerformanceCounter(); // Combine with and update state AddDataToRng(&nPerfCounter1, sizeof(nPerfCounter1)); AddDataToRng(&nPerfCounter2, sizeof(nPerfCounter2)); memory_cleanse(&nPerfCounter1, sizeof(nPerfCounter1)); memory_cleanse(&nPerfCounter2, sizeof(nPerfCounter2)); } static CWaitableCriticalSection cs_rng_state; static uint8_t rng_state[32] = {0}; static uint64_t rng_counter = 0; static void AddDataToRng(void *data, size_t len) { CSHA512 hasher; hasher.Write((const uint8_t *)&len, sizeof(len)); hasher.Write((const uint8_t *)data, len); uint8_t buf[64]; { WAIT_LOCK(cs_rng_state, lock); hasher.Write(rng_state, sizeof(rng_state)); hasher.Write((const uint8_t *)&rng_counter, sizeof(rng_counter)); ++rng_counter; hasher.Finalize(buf); memcpy(rng_state, buf + 32, 32); } memory_cleanse(buf, 64); } void GetStrongRandBytes(uint8_t *out, int num) { assert(num <= 32); CSHA512 hasher; uint8_t buf[64]; // First source: OpenSSL's RNG RandAddSeedPerfmon(); GetRandBytes(buf, 32); hasher.Write(buf, 32); // Second source: OS RNG GetOSRand(buf); hasher.Write(buf, 32); // Third source: HW RNG, if available. if (GetHWRand(buf)) { hasher.Write(buf, 32); } // Combine with and update state { WAIT_LOCK(cs_rng_state, lock); hasher.Write(rng_state, sizeof(rng_state)); hasher.Write((const uint8_t *)&rng_counter, sizeof(rng_counter)); ++rng_counter; hasher.Finalize(buf); memcpy(rng_state, buf + 32, 32); } // Produce output memcpy(out, buf, num); memory_cleanse(buf, 64); } uint64_t GetRand(uint64_t nMax) { if (nMax == 0) { return 0; } // The range of the random source must be a multiple of the modulus to give // every possible output value an equal possibility uint64_t nRange = (std::numeric_limits::max() / nMax) * nMax; uint64_t nRand = 0; do { GetRandBytes((uint8_t *)&nRand, sizeof(nRand)); } while (nRand >= nRange); return (nRand % nMax); } int GetRandInt(int nMax) { return GetRand(nMax); } uint256 GetRandHash() { uint256 hash; GetRandBytes((uint8_t *)&hash, sizeof(hash)); return hash; } void FastRandomContext::RandomSeed() { uint256 seed = GetRandHash(); rng.SetKey(seed.begin(), 32); requires_seed = false; } uint256 FastRandomContext::rand256() { if (bytebuf_size < 32) { FillByteBuffer(); } uint256 ret; memcpy(ret.begin(), bytebuf + 64 - bytebuf_size, 32); bytebuf_size -= 32; return ret; } std::vector FastRandomContext::randbytes(size_t len) { std::vector ret(len); if (len > 0) { rng.Output(&ret[0], len); } return ret; } FastRandomContext::FastRandomContext(const uint256 &seed) : requires_seed(false), bytebuf_size(0), bitbuf_size(0) { rng.SetKey(seed.begin(), 32); } bool Random_SanityCheck() { uint64_t start = GetPerformanceCounter(); /** * This does not measure the quality of randomness, but it does test that * OSRandom() overwrites all 32 bytes of the output given a maximum number * of tries. */ static const ssize_t MAX_TRIES = 1024; uint8_t data[NUM_OS_RANDOM_BYTES]; /* Tracks which bytes have been overwritten at least once */ bool overwritten[NUM_OS_RANDOM_BYTES] = {}; int num_overwritten; int tries = 0; /** * Loop until all bytes have been overwritten at least once, or max number * tries reached. */ do { memset(data, 0, NUM_OS_RANDOM_BYTES); GetOSRand(data); for (int x = 0; x < NUM_OS_RANDOM_BYTES; ++x) { overwritten[x] |= (data[x] != 0); } num_overwritten = 0; for (int x = 0; x < NUM_OS_RANDOM_BYTES; ++x) { if (overwritten[x]) { num_overwritten += 1; } } tries += 1; } while (num_overwritten < NUM_OS_RANDOM_BYTES && tries < MAX_TRIES); /* If this failed, bailed out after too many tries */ if (num_overwritten != NUM_OS_RANDOM_BYTES) { return false; } // Check that GetPerformanceCounter increases at least during a GetOSRand() // call + 1ms sleep. std::this_thread::sleep_for(std::chrono::milliseconds(1)); uint64_t stop = GetPerformanceCounter(); if (stop == start) { return false; } // We called GetPerformanceCounter. Use it as entropy. RAND_add((const uint8_t *)&start, sizeof(start), 1); RAND_add((const uint8_t *)&stop, sizeof(stop), 1); return true; } FastRandomContext::FastRandomContext(bool fDeterministic) : requires_seed(!fDeterministic), bytebuf_size(0), bitbuf_size(0) { if (!fDeterministic) { return; } uint256 seed; rng.SetKey(seed.begin(), 32); } void RandomInit() { RDRandInit(); } diff --git a/src/random.h b/src/random.h index f60a233ee..3a06fd839 100644 --- a/src/random.h +++ b/src/random.h @@ -1,153 +1,153 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_RANDOM_H #define BITCOIN_RANDOM_H -#include "crypto/chacha20.h" -#include "crypto/common.h" -#include "uint256.h" +#include +#include +#include #include /** * Seed OpenSSL PRNG with additional entropy data. */ void RandAddSeed(); /** * Functions to gather random data via the OpenSSL PRNG */ void GetRandBytes(uint8_t *buf, int num); uint64_t GetRand(uint64_t nMax); int GetRandInt(int nMax); uint256 GetRandHash(); /** * Add a little bit of randomness to the output of GetStrongRangBytes. * This sleeps for a millisecond, so should only be called when there is no * other work to be done. */ void RandAddSeedSleep(); /** * Function to gather random data from multiple sources, failing whenever any of * those source fail to provide a result. */ void GetStrongRandBytes(uint8_t *buf, int num); /** * Fast randomness source. This is seeded once with secure random data, but is * completely deterministic and insecure after that. * This class is not thread-safe. */ class FastRandomContext { private: bool requires_seed; ChaCha20 rng; uint8_t bytebuf[64]; int bytebuf_size; uint64_t bitbuf; int bitbuf_size; void RandomSeed(); void FillByteBuffer() { if (requires_seed) { RandomSeed(); } rng.Output(bytebuf, sizeof(bytebuf)); bytebuf_size = sizeof(bytebuf); } void FillBitBuffer() { bitbuf = rand64(); bitbuf_size = 64; } public: explicit FastRandomContext(bool fDeterministic = false); /** Initialize with explicit seed (only for testing) */ explicit FastRandomContext(const uint256 &seed); /** Generate a random 64-bit integer. */ uint64_t rand64() { if (bytebuf_size < 8) { FillByteBuffer(); } uint64_t ret = ReadLE64(bytebuf + 64 - bytebuf_size); bytebuf_size -= 8; return ret; } /** Generate a random (bits)-bit integer. */ uint64_t randbits(int bits) { if (bits == 0) { return 0; } else if (bits > 32) { return rand64() >> (64 - bits); } else { if (bitbuf_size < bits) { FillBitBuffer(); } uint64_t ret = bitbuf & (~uint64_t(0) >> (64 - bits)); bitbuf >>= bits; bitbuf_size -= bits; return ret; } } /** Generate a random integer in the range [0..range). */ uint64_t randrange(uint64_t range) { --range; int bits = CountBits(range); while (true) { uint64_t ret = randbits(bits); if (ret <= range) { return ret; } } } /** Generate random bytes. */ std::vector randbytes(size_t len); /** Generate a random 32-bit integer. */ uint32_t rand32() { return randbits(32); } /** generate a random uint256. */ uint256 rand256(); /** Generate a random boolean. */ bool randbool() { return randbits(1); } }; /** * Number of random bytes returned by GetOSRand. * When changing this constant make sure to change all call sites, and make sure * that the underlying OS APIs for all platforms support the number (many cap * out at 256 bytes). */ static const ssize_t NUM_OS_RANDOM_BYTES = 32; /** * Get 32 bytes of system entropy. Do not use this in application code: use * GetStrongRandBytes instead. */ void GetOSRand(uint8_t *ent32); /** * Check that OS randomness is available and returning the requested number of * bytes. */ bool Random_SanityCheck(); /** Initialize the RNG. */ void RandomInit(); #endif // BITCOIN_RANDOM_H diff --git a/src/rcu.cpp b/src/rcu.cpp index c4aaffe61..d708355f8 100644 --- a/src/rcu.cpp +++ b/src/rcu.cpp @@ -1,232 +1,233 @@ // Copyright (c) 2018 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "rcu.h" -#include "sync.h" +#include + +#include #include #include #include std::atomic RCUInfos::revision{0}; thread_local RCUInfos RCUInfos::infos{}; /** * How many time a busy loop runs before yelding. */ static const int RCU_ACTIVE_LOOP_COUNT = 10; /** * We maintain a linked list of all the RCUInfos for each active thread. Upon * start, a new thread adds itself to the head of the liked list and the node is * then removed when the threads shuts down. * * Insertion is fairly straightforward. The first step is to set the next * pointer of the node being inserted as the first node in the list as follow: * * threadInfos -> Node -> ... * ^ * Nadded -| * * The second step is to update threadInfos to point on the inserted node. This * is done using compare and swap. If the head of the list changed during this * process - for instance due to another insertion, CAS will fail and we can * start again. * * threadInfos Node -> ... * | ^ * \-> Nadded -| * * Deletion is a slightly more complex process. The general idea is to go over * the list, find the parent of the item we want to remove and set it's next * pointer to jump over it. * * Nparent -> Ndelete -> Nchild * * Nparent Ndelete -> Nchild * | ^ * \--------------------| * * We run into problems when a nodes is deleted concurrently with another node * being inserted. Hopefully, we can solve that problem with CAS as well. * * threadInfos -> Ndelete -> Nchild * ^ * Nadded -| * * The insertion will try to update threadInfos to point to Nadded, while the * deletion will try to update it to point to Nchild. Whichever goes first will * cause the other to fail its CAS and restart its process. * * threadInfos Ndelete -> Nchild * | ^ * \--> Nadded -| * * After a successful insertion, threadInfos now points to Nadded, and the CAS * to move it to Nchild will fail, causing the deletion process to restart from * scratch. * * /----------------------| * | V * threadInfos Ndelete -> Nchild * ^ * Nadded -| * * After a succesful deletion, threadInfos now points to NChild and the CAS to * move it to Nadded will fail, causing the insertion process to fail. * * We also run into problems when several nodes are deleted concurrently. * Because it is not possible to read Ndelete->next and update Nparent->next * atomically, we may end up setting Nparent->next to a stale value if Nchild is * deleted. * * /----------------------| * | V * Nparent Ndelete Nchild -> Ngrandchild * | ^ * \--------------------| * * This would cause Nchild to be 'resurrected', which is obviously a problem. In * order to avoid this problem, we make sure that no concurrent deletion takes * places using a good old mutex. Using a mutex for deletion also ensures we are * safe from the ABA problem. * * Once a node is deleted from the list, we cannot destroy it right away. * Readers do not hold the mutex and may still be using that node. We need to * leverage RCU to make sure all the readers have finished their work before * allowing the node to be destroyed. We need to keep the mutex during that * process, because we just removed our thread from the list of thread to wait * for. A concurrent deletion would not wait for us and may end up deleting data * we rely on as a result. */ static std::atomic threadInfos{nullptr}; static CCriticalSection csThreadInfosDelete; RCUInfos::RCUInfos() : state(0), next(nullptr) { RCUInfos *head = threadInfos.load(); do { next.store(head); } while (!threadInfos.compare_exchange_weak(head, this)); // Release the lock. readFree(); } RCUInfos::~RCUInfos() { /** * Before the thread is removed from the list, make sure we cleanup * everything. */ runCleanups(); while (cleanups.size() > 0) { synchronize(); } while (true) { LOCK(csThreadInfosDelete); std::atomic *ptr; { RCULock lock(this); ptr = &threadInfos; while (true) { RCUInfos *current = ptr->load(); if (current == this) { break; } assert(current != nullptr); ptr = ¤t->next; } } /** * We have our node and the parent is ready to be updated. * NB: The CAS operation only checks for *ptr and not for next. This * would be a big problem in the general case, but because we only * insert at the tip of the list and cannot have concurrent deletion * thanks to the use of a mutex, we are safe. */ RCUInfos *current = this; if (!ptr->compare_exchange_strong(current, next.load())) { continue; } /** * We now wait for possible readers to go past the synchronization * point. We need to do so while holding the lock as this operation * require us to a be a reader, but we just removed ourselves from the * list of reader to check and may therefore not be waited for. */ synchronize(); break; } } void RCUInfos::synchronize() { uint64_t syncRev = ++revision; // Loop a few time lock free. for (int i = 0; i < RCU_ACTIVE_LOOP_COUNT; i++) { runCleanups(); if (cleanups.empty() && hasSyncedTo(syncRev)) { return; } } // It seems like we have some contention. Let's try to not starve the // system. Let's make sure threads that land here proceed one by one. // XXX: The best option long term is most likely to use a futex on one of // the thread causing synchronization delay so this thread can be waked up // at an apropriate time. static std::condition_variable cond; static CWaitableCriticalSection cs; WAIT_LOCK(cs, lock); do { runCleanups(); cond.notify_one(); } while (!cond.wait_for(lock, std::chrono::microseconds(1), [&] { return cleanups.empty() && hasSyncedTo(syncRev); })); } void RCUInfos::runCleanups() { // By the time we run a set of cleanups, we may have more cleanups // available so we loop until there is nothing available for cleanup. while (true) { if (cleanups.empty()) { // There is nothing to cleanup. return; } auto it = cleanups.begin(); uint64_t syncedTo = hasSyncedTo(it->first); while (it != cleanups.end() && it->first <= syncedTo) { // Run the cleanup and remove it from the map. it->second(); cleanups.erase(it++); } } } uint64_t RCUInfos::hasSyncedTo(uint64_t cutoff) { uint64_t syncedTo = revision.load(); // Go over the list and check all threads are past the synchronization // point. RCULock lock(this); RCUInfos *current = threadInfos.load(); while (current != nullptr) { syncedTo = std::min(syncedTo, current->state.load()); if (syncedTo < cutoff) { return 0; } current = current->next.load(); } return syncedTo; } diff --git a/src/rest.cpp b/src/rest.cpp index ddcf969b9..01fcf67ff 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -1,707 +1,707 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "chain.h" -#include "chainparams.h" -#include "config.h" -#include "core_io.h" -#include "httpserver.h" -#include "primitives/block.h" -#include "primitives/transaction.h" -#include "rpc/blockchain.h" -#include "rpc/server.h" -#include "rpc/tojson.h" -#include "streams.h" -#include "sync.h" -#include "txmempool.h" -#include "utilstrencodings.h" -#include "validation.h" -#include "version.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include // Allow a max of 15 outpoints to be queried at once. static const size_t MAX_GETUTXOS_OUTPOINTS = 15; enum class RetFormat { UNDEF, BINARY, HEX, JSON, }; static const struct { enum RetFormat rf; const char *name; } rf_names[] = { {RetFormat::UNDEF, ""}, {RetFormat::BINARY, "bin"}, {RetFormat::HEX, "hex"}, {RetFormat::JSON, "json"}, }; struct CCoin { uint32_t nHeight; CTxOut out; CCoin() : nHeight(0) {} explicit CCoin(Coin in) : nHeight(in.GetHeight()), out(std::move(in.GetTxOut())) {} ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { uint32_t nTxVerDummy = 0; READWRITE(nTxVerDummy); READWRITE(nHeight); READWRITE(out); } }; extern UniValue mempoolInfoToJSON(); extern UniValue mempoolToJSON(bool fVerbose = false); static bool RESTERR(HTTPRequest *req, enum HTTPStatusCode status, std::string message) { req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(status, message + "\r\n"); return false; } static enum RetFormat ParseDataFormat(std::string ¶m, const std::string &strReq) { const std::string::size_type pos = strReq.rfind('.'); if (pos == std::string::npos) { param = strReq; return rf_names[0].rf; } param = strReq.substr(0, pos); const std::string suff(strReq, pos + 1); for (size_t i = 0; i < ARRAYLEN(rf_names); i++) { if (suff == rf_names[i].name) { return rf_names[i].rf; } } /* If no suffix is found, return original string. */ param = strReq; return rf_names[0].rf; } static std::string AvailableDataFormatsString() { std::string formats = ""; for (size_t i = 0; i < ARRAYLEN(rf_names); i++) { if (strlen(rf_names[i].name) > 0) { formats.append("."); formats.append(rf_names[i].name); formats.append(", "); } } if (formats.length() > 0) { return formats.substr(0, formats.length() - 2); } return formats; } static bool ParseHashStr(const std::string &strReq, uint256 &v) { if (!IsHex(strReq) || (strReq.size() != 64)) { return false; } v.SetHex(strReq); return true; } static bool CheckWarmup(HTTPRequest *req) { std::string statusmessage; if (RPCIsInWarmup(&statusmessage)) { return RESTERR(req, HTTP_SERVICE_UNAVAILABLE, "Service temporarily unavailable: " + statusmessage); } return true; } static bool rest_headers(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); std::vector path; boost::split(path, param, boost::is_any_of("/")); if (path.size() != 2) { return RESTERR(req, HTTP_BAD_REQUEST, "No header count specified. Use " "/rest/headers//.."); } long count = strtol(path[0].c_str(), nullptr, 10); if (count < 1 || count > 2000) { return RESTERR(req, HTTP_BAD_REQUEST, "Header count out of range: " + path[0]); } std::string hashStr = path[1]; uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } std::vector headers; headers.reserve(count); { LOCK(cs_main); const CBlockIndex *pindex = LookupBlockIndex(hash); while (pindex != nullptr && chainActive.Contains(pindex)) { headers.push_back(pindex); if (headers.size() == size_t(count)) { break; } pindex = chainActive.Next(pindex); } } CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { ssHeader << pindex->GetBlockHeader(); } switch (rf) { case RetFormat::BINARY: { std::string binaryHeader = ssHeader.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryHeader); return true; } case RetFormat::HEX: { std::string strHex = HexStr(ssHeader.begin(), ssHeader.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue jsonHeaders(UniValue::VARR); { LOCK(cs_main); for (const CBlockIndex *pindex : headers) { jsonHeaders.push_back(blockheaderToJSON(pindex)); } } std::string strJSON = jsonHeaders.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: .bin, .hex)"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_block(const Config &config, HTTPRequest *req, const std::string &strURIPart, bool showTxDetails) { if (!CheckWarmup(req)) { return false; } std::string hashStr; const RetFormat rf = ParseDataFormat(hashStr, strURIPart); uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } CBlock block; CBlockIndex *pblockindex = nullptr; { LOCK(cs_main); pblockindex = LookupBlockIndex(hash); if (!pblockindex) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } if (fHavePruned && !pblockindex->nStatus.hasData() && pblockindex->nTx > 0) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)"); } if (!ReadBlockFromDisk(block, pblockindex, config)) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } } CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; switch (rf) { case RetFormat::BINARY: { std::string binaryBlock = ssBlock.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryBlock); return true; } case RetFormat::HEX: { std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue objBlock; { LOCK(cs_main); objBlock = blockToJSON(config, block, pblockindex, showTxDetails); } std::string strJSON = objBlock.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_block_extended(Config &config, HTTPRequest *req, const std::string &strURIPart) { return rest_block(config, req, strURIPart, true); } static bool rest_block_notxdetails(Config &config, HTTPRequest *req, const std::string &strURIPart) { return rest_block(config, req, strURIPart, false); } static bool rest_chaininfo(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RetFormat::JSON: { JSONRPCRequest jsonRequest; jsonRequest.params = UniValue(UniValue::VARR); UniValue chainInfoObject = getblockchaininfo(config, jsonRequest); std::string strJSON = chainInfoObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_mempool_info(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RetFormat::JSON: { UniValue mempoolInfoObject = mempoolInfoToJSON(); std::string strJSON = mempoolInfoObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_mempool_contents(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RetFormat::JSON: { UniValue mempoolObject = mempoolToJSON(true); std::string strJSON = mempoolObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_tx(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string hashStr; const RetFormat rf = ParseDataFormat(hashStr, strURIPart); uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } const TxId txid(hash); CTransactionRef tx; uint256 hashBlock = uint256(); if (!GetTransaction(config, txid, tx, hashBlock, true)) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssTx << tx; switch (rf) { case RetFormat::BINARY: { std::string binaryTx = ssTx.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryTx); return true; } case RetFormat::HEX: { std::string strHex = HexStr(ssTx.begin(), ssTx.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue objTx(UniValue::VOBJ); TxToUniv(*tx, hashBlock, objTx); std::string strJSON = objTx.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static bool rest_getutxos(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); std::vector uriParts; if (param.length() > 1) { std::string strUriParams = param.substr(1); boost::split(uriParts, strUriParams, boost::is_any_of("/")); } // throw exception in case of a empty request std::string strRequestMutable = req->ReadBody(); if (strRequestMutable.length() == 0 && uriParts.size() == 0) { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } bool fInputParsed = false; bool fCheckMemPool = false; std::vector vOutPoints; // parse/deserialize input // input-format = output-format, rest/getutxos/bin requires binary input, // gives binary output, ... if (uriParts.size() > 0) { // inputs is sent over URI scheme // (/rest/getutxos/checkmempool/txid1-n/txid2-n/...) if (uriParts[0] == "checkmempool") { fCheckMemPool = true; } for (size_t i = (fCheckMemPool) ? 1 : 0; i < uriParts.size(); i++) { uint256 txid; int32_t nOutput; std::string strTxid = uriParts[i].substr(0, uriParts[i].find("-")); std::string strOutput = uriParts[i].substr(uriParts[i].find("-") + 1); if (!ParseInt32(strOutput, &nOutput) || !IsHex(strTxid)) { return RESTERR(req, HTTP_BAD_REQUEST, "Parse error"); } txid.SetHex(strTxid); vOutPoints.push_back(COutPoint(txid, (uint32_t)nOutput)); } if (vOutPoints.size() > 0) { fInputParsed = true; } else { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } } switch (rf) { case RetFormat::HEX: { // convert hex to bin, continue then with bin part std::vector strRequestV = ParseHex(strRequestMutable); strRequestMutable.assign(strRequestV.begin(), strRequestV.end()); } // FALLTHROUGH case RetFormat::BINARY: { try { // deserialize only if user sent a request if (strRequestMutable.size() > 0) { // don't allow sending input over URI and HTTP RAW DATA if (fInputParsed) { return RESTERR(req, HTTP_BAD_REQUEST, "Combination of URI scheme inputs and " "raw post data is not allowed"); } CDataStream oss(SER_NETWORK, PROTOCOL_VERSION); oss << strRequestMutable; oss >> fCheckMemPool; oss >> vOutPoints; } } catch (const std::ios_base::failure &e) { // abort in case of unreadable binary data return RESTERR(req, HTTP_BAD_REQUEST, "Parse error"); } break; } case RetFormat::JSON: { if (!fInputParsed) { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } break; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // limit max outpoints if (vOutPoints.size() > MAX_GETUTXOS_OUTPOINTS) { return RESTERR( req, HTTP_BAD_REQUEST, strprintf("Error: max outpoints exceeded (max: %d, tried: %d)", MAX_GETUTXOS_OUTPOINTS, vOutPoints.size())); } // check spentness and form a bitmap (as well as a JSON capable // human-readable string representation) std::vector bitmap; std::vector outs; std::string bitmapStringRepresentation; std::vector hits; bitmap.resize((vOutPoints.size() + 7) / 8); { LOCK2(cs_main, g_mempool.cs); CCoinsView viewDummy; CCoinsViewCache view(&viewDummy); CCoinsViewCache &viewChain = *pcoinsTip; CCoinsViewMemPool viewMempool(&viewChain, g_mempool); if (fCheckMemPool) { // switch cache backend to db+mempool in case user likes to query // mempool. view.SetBackend(viewMempool); } for (size_t i = 0; i < vOutPoints.size(); i++) { Coin coin; bool hit = false; if (view.GetCoin(vOutPoints[i], coin) && !g_mempool.isSpent(vOutPoints[i])) { hit = true; outs.emplace_back(std::move(coin)); } hits.push_back(hit); // form a binary string representation (human-readable for json // output) bitmapStringRepresentation.append(hit ? "1" : "0"); bitmap[i / 8] |= ((uint8_t)hit) << (i % 8); } } switch (rf) { case RetFormat::BINARY: { // serialize data // use exact same output as mentioned in Bip64 CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string ssGetUTXOResponseString = ssGetUTXOResponse.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, ssGetUTXOResponseString); return true; } case RetFormat::HEX: { CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string strHex = HexStr(ssGetUTXOResponse.begin(), ssGetUTXOResponse.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue objGetUTXOResponse(UniValue::VOBJ); // pack in some essentials // use more or less the same output as mentioned in Bip64 objGetUTXOResponse.pushKV("chainHeight", chainActive.Height()); objGetUTXOResponse.pushKV( "chaintipHash", chainActive.Tip()->GetBlockHash().GetHex()); objGetUTXOResponse.pushKV("bitmap", bitmapStringRepresentation); UniValue utxos(UniValue::VARR); for (const CCoin &coin : outs) { UniValue utxo(UniValue::VOBJ); utxo.pushKV("height", int32_t(coin.nHeight)); utxo.pushKV("value", ValueFromAmount(coin.out.nValue)); // include the script in a json output UniValue o(UniValue::VOBJ); ScriptPubKeyToUniv(coin.out.scriptPubKey, o, true); utxo.pushKV("scriptPubKey", o); utxos.push_back(utxo); } objGetUTXOResponse.pushKV("utxos", utxos); // return json string std::string strJSON = objGetUTXOResponse.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // not reached // continue to process further HTTP reqs on this cxn return true; } static const struct { const char *prefix; bool (*handler)(Config &config, HTTPRequest *req, const std::string &strReq); } uri_prefixes[] = { {"/rest/tx/", rest_tx}, {"/rest/block/notxdetails/", rest_block_notxdetails}, {"/rest/block/", rest_block_extended}, {"/rest/chaininfo", rest_chaininfo}, {"/rest/mempool/info", rest_mempool_info}, {"/rest/mempool/contents", rest_mempool_contents}, {"/rest/headers/", rest_headers}, {"/rest/getutxos", rest_getutxos}, }; bool StartREST() { for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) { RegisterHTTPHandler(uri_prefixes[i].prefix, false, uri_prefixes[i].handler); } return true; } void InterruptREST() {} void StopREST() { for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) { UnregisterHTTPHandler(uri_prefixes[i].prefix, false); } } diff --git a/src/scheduler.cpp b/src/scheduler.cpp index f34ae7f58..23945cf09 100644 --- a/src/scheduler.cpp +++ b/src/scheduler.cpp @@ -1,204 +1,205 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "scheduler.h" +#include -#include "random.h" -#include "reverselock.h" +#include +#include #include + #include #include CScheduler::CScheduler() : nThreadsServicingQueue(0), stopRequested(false), stopWhenEmpty(false) {} CScheduler::~CScheduler() { assert(nThreadsServicingQueue == 0); } void CScheduler::serviceQueue() { boost::unique_lock lock(newTaskMutex); ++nThreadsServicingQueue; // newTaskMutex is locked throughout this loop EXCEPT when the thread is // waiting or when the user's function is called. while (!shouldStop()) { try { if (!shouldStop() && taskQueue.empty()) { reverse_lock> rlock(lock); // Use this chance to get a tiny bit more entropy RandAddSeedSleep(); } while (!shouldStop() && taskQueue.empty()) { // Wait until there is something to do. newTaskScheduled.wait(lock); } // Wait until either there is a new task, or until the time of the // first item on the queue. // Some boost versions have a conflicting overload of wait_until // that returns void. Explicitly use a template here to avoid // hitting that overload. while (!shouldStop() && !taskQueue.empty()) { boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first; if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout) { // Exit loop after timeout, it means we reached the time of // the event break; } } // If there are multiple threads, the queue can empty while we're // waiting (another thread may service the task we were waiting on). if (shouldStop() || taskQueue.empty()) { continue; } Function f = taskQueue.begin()->second; taskQueue.erase(taskQueue.begin()); { // Unlock before calling f, so it can reschedule itself or // another task without deadlocking: reverse_lock> rlock(lock); f(); } } catch (...) { --nThreadsServicingQueue; throw; } } --nThreadsServicingQueue; newTaskScheduled.notify_one(); } void CScheduler::stop(bool drain) { { boost::unique_lock lock(newTaskMutex); if (drain) { stopWhenEmpty = true; } else { stopRequested = true; } } newTaskScheduled.notify_all(); } void CScheduler::schedule(CScheduler::Function f, boost::chrono::system_clock::time_point t) { { boost::unique_lock lock(newTaskMutex); taskQueue.insert(std::make_pair(t, f)); } newTaskScheduled.notify_one(); } void CScheduler::scheduleFromNow(CScheduler::Function f, int64_t deltaMilliSeconds) { schedule(f, boost::chrono::system_clock::now() + boost::chrono::milliseconds(deltaMilliSeconds)); } static void Repeat(CScheduler *s, CScheduler::Predicate p, int64_t deltaMilliSeconds) { if (p()) { s->scheduleFromNow(boost::bind(&Repeat, s, p, deltaMilliSeconds), deltaMilliSeconds); } } void CScheduler::scheduleEvery(CScheduler::Predicate p, int64_t deltaMilliSeconds) { scheduleFromNow(boost::bind(&Repeat, this, p, deltaMilliSeconds), deltaMilliSeconds); } size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point &first, boost::chrono::system_clock::time_point &last) const { boost::unique_lock lock(newTaskMutex); size_t result = taskQueue.size(); if (!taskQueue.empty()) { first = taskQueue.begin()->first; last = taskQueue.rbegin()->first; } return result; } bool CScheduler::AreThreadsServicingQueue() const { return nThreadsServicingQueue; } void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue() { { LOCK(m_cs_callbacks_pending); // Try to avoid scheduling too many copies here, but if we // accidentally have two ProcessQueue's scheduled at once its // not a big deal. if (m_are_callbacks_running) return; if (m_callbacks_pending.empty()) return; } m_pscheduler->schedule( std::bind(&SingleThreadedSchedulerClient::ProcessQueue, this)); } void SingleThreadedSchedulerClient::ProcessQueue() { std::function callback; { LOCK(m_cs_callbacks_pending); if (m_are_callbacks_running) return; if (m_callbacks_pending.empty()) return; m_are_callbacks_running = true; callback = std::move(m_callbacks_pending.front()); m_callbacks_pending.pop_front(); } // RAII the setting of fCallbacksRunning and calling // MaybeScheduleProcessQueue // to ensure both happen safely even if callback() throws. struct RAIICallbacksRunning { SingleThreadedSchedulerClient *instance; explicit RAIICallbacksRunning(SingleThreadedSchedulerClient *_instance) : instance(_instance) {} ~RAIICallbacksRunning() { { LOCK(instance->m_cs_callbacks_pending); instance->m_are_callbacks_running = false; } instance->MaybeScheduleProcessQueue(); } } raiicallbacksrunning(this); callback(); } void SingleThreadedSchedulerClient::AddToProcessQueue( std::function func) { assert(m_pscheduler); { LOCK(m_cs_callbacks_pending); m_callbacks_pending.emplace_back(std::move(func)); } MaybeScheduleProcessQueue(); } void SingleThreadedSchedulerClient::EmptyQueue() { assert(!m_pscheduler->AreThreadsServicingQueue()); bool should_continue = true; while (should_continue) { ProcessQueue(); LOCK(m_cs_callbacks_pending); should_continue = !m_callbacks_pending.empty(); } } size_t SingleThreadedSchedulerClient::CallbacksPending() { LOCK(m_cs_callbacks_pending); return m_callbacks_pending.size(); } diff --git a/src/scheduler.h b/src/scheduler.h index 86700c92d..202963784 100644 --- a/src/scheduler.h +++ b/src/scheduler.h @@ -1,121 +1,121 @@ // Copyright (c) 2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SCHEDULER_H #define BITCOIN_SCHEDULER_H -#include "sync.h" +#include // // NOTE: // boost::thread / boost::chrono should be ported to // std::thread / std::chrono when we support C++11. // #include #include #include // // Simple class for background tasks that should be run periodically or once // "after a while" // // Usage: // // CScheduler* s = new CScheduler(); // s->scheduleFromNow(doSomething, 11); // Assuming a: void doSomething() { } // s->scheduleFromNow(std::bind(Class::func, this, argument), 3); // boost::thread* t = new boost::thread(boost::bind(CScheduler::serviceQueue, // s)); // // ... then at program shutdown, clean up the thread running serviceQueue: // t->interrupt(); // t->join(); // delete t; // delete s; // Must be done after thread is interrupted/joined. // class CScheduler { public: CScheduler(); ~CScheduler(); typedef std::function Function; typedef std::function Predicate; // Call func at/after time t void schedule(Function f, boost::chrono::system_clock::time_point t = boost::chrono::system_clock::now()); // Convenience method: call f once deltaMilliSeconds from now void scheduleFromNow(Function f, int64_t deltaMilliSeconds); // Another convenience method: call f approximately every deltaMilliSeconds // forever, starting deltaMilliSeconds from now. To be more precise: every // time f is finished, it is rescheduled to run deltaMilliSeconds later. If // you need more accurate scheduling, don't use this method. void scheduleEvery(Predicate p, int64_t deltaMilliSeconds); // To keep things as simple as possible, there is no unschedule. // Services the queue 'forever'. Should be run in a thread, and interrupted // using boost::interrupt_thread void serviceQueue(); // Tell any threads running serviceQueue to stop as soon as they're done // servicing whatever task they're currently servicing (drain=false) or when // there is no work left to be done (drain=true) void stop(bool drain = false); // Returns number of tasks waiting to be serviced, and first and last task // times size_t getQueueInfo(boost::chrono::system_clock::time_point &first, boost::chrono::system_clock::time_point &last) const; // Returns true if there are threads actively running in serviceQueue() bool AreThreadsServicingQueue() const; private: std::multimap taskQueue; boost::condition_variable newTaskScheduled; mutable boost::mutex newTaskMutex; int nThreadsServicingQueue; bool stopRequested; bool stopWhenEmpty; bool shouldStop() const { return stopRequested || (stopWhenEmpty && taskQueue.empty()); } }; /** * Class used by CScheduler clients which may schedule multiple jobs * which are required to be run serially. Does not require such jobs * to be executed on the same thread, but no two jobs will be executed * at the same time. */ class SingleThreadedSchedulerClient { private: CScheduler *m_pscheduler; CCriticalSection m_cs_callbacks_pending; std::list> m_callbacks_pending; bool m_are_callbacks_running = false; void MaybeScheduleProcessQueue(); void ProcessQueue(); public: explicit SingleThreadedSchedulerClient(CScheduler *pschedulerIn) : m_pscheduler(pschedulerIn) {} void AddToProcessQueue(std::function func); // Processes all remaining queue members on the calling thread, blocking // until queue is empty // Must be called after the CScheduler has no remaining processing threads! void EmptyQueue(); size_t CallbacksPending(); }; #endif diff --git a/src/serialize.h b/src/serialize.h index d4e8abf53..ef66f4557 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -1,919 +1,918 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SERIALIZE_H #define BITCOIN_SERIALIZE_H -#include "compat/endian.h" +#include +#include #include #include #include #include #include #include #include #include #include #include #include #include -#include "prevector.h" - static const uint64_t MAX_SIZE = 0x02000000; /** * Dummy data type to identify deserializing constructors. * * By convention, a constructor of a type T with signature * * template T::T(deserialize_type, Stream& s) * * is a deserializing constructor, which builds the type by deserializing it * from s. If T contains const fields, this is likely the only way to do so. */ struct deserialize_type {}; constexpr deserialize_type deserialize{}; /** * Used to bypass the rule against non-const reference to temporary where it * makes sense with wrappers such as CFlatData or CTxDB */ template inline T &REF(const T &val) { return const_cast(val); } /** * Used to acquire a non-const pointer "this" to generate bodies of const * serialization operations from a template */ template inline T *NCONST_PTR(const T *val) { return const_cast(val); } /* * Lowest-level serialization and conversion. * @note Sizes of these types are verified in the tests */ template inline void ser_writedata8(Stream &s, uint8_t obj) { s.write((char *)&obj, 1); } template inline void ser_writedata16(Stream &s, uint16_t obj) { obj = htole16(obj); s.write((char *)&obj, 2); } template inline void ser_writedata32(Stream &s, uint32_t obj) { obj = htole32(obj); s.write((char *)&obj, 4); } template inline void ser_writedata64(Stream &s, uint64_t obj) { obj = htole64(obj); s.write((char *)&obj, 8); } template inline uint8_t ser_readdata8(Stream &s) { uint8_t obj; s.read((char *)&obj, 1); return obj; } template inline uint16_t ser_readdata16(Stream &s) { uint16_t obj; s.read((char *)&obj, 2); return le16toh(obj); } template inline uint32_t ser_readdata32(Stream &s) { uint32_t obj; s.read((char *)&obj, 4); return le32toh(obj); } template inline uint64_t ser_readdata64(Stream &s) { uint64_t obj; s.read((char *)&obj, 8); return le64toh(obj); } inline uint64_t ser_double_to_uint64(double x) { union { double x; uint64_t y; } tmp; tmp.x = x; return tmp.y; } inline uint32_t ser_float_to_uint32(float x) { union { float x; uint32_t y; } tmp; tmp.x = x; return tmp.y; } inline double ser_uint64_to_double(uint64_t y) { union { double x; uint64_t y; } tmp; tmp.y = y; return tmp.x; } inline float ser_uint32_to_float(uint32_t y) { union { float x; uint32_t y; } tmp; tmp.y = y; return tmp.x; } ///////////////////////////////////////////////////////////////// // // Templates for serializing to anything that looks like a stream, // i.e. anything that supports .read(char*, size_t) and .write(char*, size_t) // class CSizeComputer; enum { // primary actions SER_NETWORK = (1 << 0), SER_DISK = (1 << 1), SER_GETHASH = (1 << 2), }; #define READWRITE(obj) (::SerReadWrite(s, (obj), ser_action)) #define READWRITEMANY(...) (::SerReadWriteMany(s, ser_action, __VA_ARGS__)) /** * Implement three methods for serializable objects. These are actually wrappers * over "SerializationOp" template, which implements the body of each class' * serialization code. Adding "ADD_SERIALIZE_METHODS" in the body of the class * causes these wrappers to be added as members. */ #define ADD_SERIALIZE_METHODS \ template void Serialize(Stream &s) const { \ NCONST_PTR(this)->SerializationOp(s, CSerActionSerialize()); \ } \ template void Unserialize(Stream &s) { \ SerializationOp(s, CSerActionUnserialize()); \ } template inline void Serialize(Stream &s, char a) { ser_writedata8(s, a); } // TODO Get rid of bare char template inline void Serialize(Stream &s, int8_t a) { ser_writedata8(s, a); } template inline void Serialize(Stream &s, uint8_t a) { ser_writedata8(s, a); } template inline void Serialize(Stream &s, int16_t a) { ser_writedata16(s, a); } template inline void Serialize(Stream &s, uint16_t a) { ser_writedata16(s, a); } template inline void Serialize(Stream &s, int32_t a) { ser_writedata32(s, a); } template inline void Serialize(Stream &s, uint32_t a) { ser_writedata32(s, a); } template inline void Serialize(Stream &s, int64_t a) { ser_writedata64(s, a); } template inline void Serialize(Stream &s, uint64_t a) { ser_writedata64(s, a); } template inline void Serialize(Stream &s, float a) { ser_writedata32(s, ser_float_to_uint32(a)); } template inline void Serialize(Stream &s, double a) { ser_writedata64(s, ser_double_to_uint64(a)); } // TODO Get rid of bare char template inline void Unserialize(Stream &s, char &a) { a = ser_readdata8(s); } template inline void Unserialize(Stream &s, int8_t &a) { a = ser_readdata8(s); } template inline void Unserialize(Stream &s, uint8_t &a) { a = ser_readdata8(s); } template inline void Unserialize(Stream &s, int16_t &a) { a = ser_readdata16(s); } template inline void Unserialize(Stream &s, uint16_t &a) { a = ser_readdata16(s); } template inline void Unserialize(Stream &s, int32_t &a) { a = ser_readdata32(s); } template inline void Unserialize(Stream &s, uint32_t &a) { a = ser_readdata32(s); } template inline void Unserialize(Stream &s, int64_t &a) { a = ser_readdata64(s); } template inline void Unserialize(Stream &s, uint64_t &a) { a = ser_readdata64(s); } template inline void Unserialize(Stream &s, float &a) { a = ser_uint32_to_float(ser_readdata32(s)); } template inline void Unserialize(Stream &s, double &a) { a = ser_uint64_to_double(ser_readdata64(s)); } template inline void Serialize(Stream &s, bool a) { char f = a; ser_writedata8(s, f); } template inline void Unserialize(Stream &s, bool &a) { char f = ser_readdata8(s); a = f; } /** * Compact Size * size < 253 -- 1 byte * size <= USHRT_MAX -- 3 bytes (253 + 2 bytes) * size <= UINT_MAX -- 5 bytes (254 + 4 bytes) * size > UINT_MAX -- 9 bytes (255 + 8 bytes) */ inline uint32_t GetSizeOfCompactSize(uint64_t nSize) { if (nSize < 253) { return sizeof(uint8_t); } if (nSize <= std::numeric_limits::max()) { return sizeof(uint8_t) + sizeof(uint16_t); } if (nSize <= std::numeric_limits::max()) { return sizeof(uint8_t) + sizeof(uint32_t); } return sizeof(uint8_t) + sizeof(uint64_t); } inline void WriteCompactSize(CSizeComputer &os, uint64_t nSize); template void WriteCompactSize(Stream &os, uint64_t nSize) { if (nSize < 253) { ser_writedata8(os, nSize); } else if (nSize <= std::numeric_limits::max()) { ser_writedata8(os, 253); ser_writedata16(os, nSize); } else if (nSize <= std::numeric_limits::max()) { ser_writedata8(os, 254); ser_writedata32(os, nSize); } else { ser_writedata8(os, 255); ser_writedata64(os, nSize); } return; } template uint64_t ReadCompactSize(Stream &is) { uint8_t chSize = ser_readdata8(is); uint64_t nSizeRet = 0; if (chSize < 253) { nSizeRet = chSize; } else if (chSize == 253) { nSizeRet = ser_readdata16(is); if (nSizeRet < 253) { throw std::ios_base::failure("non-canonical ReadCompactSize()"); } } else if (chSize == 254) { nSizeRet = ser_readdata32(is); if (nSizeRet < 0x10000u) { throw std::ios_base::failure("non-canonical ReadCompactSize()"); } } else { nSizeRet = ser_readdata64(is); if (nSizeRet < 0x100000000ULL) { throw std::ios_base::failure("non-canonical ReadCompactSize()"); } } if (nSizeRet > MAX_SIZE) { throw std::ios_base::failure("ReadCompactSize(): size too large"); } return nSizeRet; } /** * Variable-length integers: bytes are a MSB base-128 encoding of the number. * The high bit in each byte signifies whether another digit follows. To make * sure the encoding is one-to-one, one is subtracted from all but the last * digit. Thus, the byte sequence a[] with length len, where all but the last * byte has bit 128 set, encodes the number: * * (a[len-1] & 0x7F) + sum(i=1..len-1, 128^i*((a[len-i-1] & 0x7F)+1)) * * Properties: * * Very small (0-127: 1 byte, 128-16511: 2 bytes, 16512-2113663: 3 bytes) * * Every integer has exactly one encoding * * Encoding does not depend on size of original integer type * * No redundancy: every (infinite) byte sequence corresponds to a list * of encoded integers. * * 0: [0x00] 256: [0x81 0x00] * 1: [0x01] 16383: [0xFE 0x7F] * 127: [0x7F] 16384: [0xFF 0x00] * 128: [0x80 0x00] 16511: [0xFF 0x7F] * 255: [0x80 0x7F] 65535: [0x82 0xFE 0x7F] * 2^32: [0x8E 0xFE 0xFE 0xFF 0x00] */ template inline unsigned int GetSizeOfVarInt(I n) { int nRet = 0; while (true) { nRet++; if (n <= 0x7F) { return nRet; } n = (n >> 7) - 1; } } template inline void WriteVarInt(CSizeComputer &os, I n); template void WriteVarInt(Stream &os, I n) { uint8_t tmp[(sizeof(n) * 8 + 6) / 7]; int len = 0; while (true) { tmp[len] = (n & 0x7F) | (len ? 0x80 : 0x00); if (n <= 0x7F) { break; } n = (n >> 7) - 1; len++; } do { ser_writedata8(os, tmp[len]); } while (len--); } template I ReadVarInt(Stream &is) { I n = 0; while (true) { uint8_t chData = ser_readdata8(is); n = (n << 7) | (chData & 0x7F); if ((chData & 0x80) == 0) { return n; } n++; } } #define FLATDATA(obj) \ REF(CFlatData((char *)&(obj), (char *)&(obj) + sizeof(obj))) #define VARINT(obj) REF(WrapVarInt(REF(obj))) #define COMPACTSIZE(obj) REF(CCompactSize(REF(obj))) #define LIMITED_STRING(obj, n) REF(LimitedString(REF(obj))) /** * Wrapper for serializing arrays and POD. */ class CFlatData { protected: char *pbegin; char *pend; public: CFlatData(void *pbeginIn, void *pendIn) : pbegin((char *)pbeginIn), pend((char *)pendIn) {} template explicit CFlatData(std::vector &v) { pbegin = (char *)v.data(); pend = (char *)(v.data() + v.size()); } template explicit CFlatData(prevector &v) { pbegin = (char *)v.data(); pend = (char *)(v.data() + v.size()); } char *begin() { return pbegin; } const char *begin() const { return pbegin; } char *end() { return pend; } const char *end() const { return pend; } template void Serialize(Stream &s) const { s.write(pbegin, pend - pbegin); } template void Unserialize(Stream &s) { s.read(pbegin, pend - pbegin); } }; template class CVarInt { protected: I &n; public: explicit CVarInt(I &nIn) : n(nIn) {} template void Serialize(Stream &s) const { WriteVarInt(s, n); } template void Unserialize(Stream &s) { n = ReadVarInt(s); } }; class CCompactSize { protected: uint64_t &n; public: explicit CCompactSize(uint64_t &nIn) : n(nIn) {} template void Serialize(Stream &s) const { WriteCompactSize(s, n); } template void Unserialize(Stream &s) { n = ReadCompactSize(s); } }; template class LimitedString { protected: std::string &string; public: explicit LimitedString(std::string &_string) : string(_string) {} template void Unserialize(Stream &s) { size_t size = ReadCompactSize(s); if (size > Limit) { throw std::ios_base::failure("String length limit exceeded"); } string.resize(size); if (size != 0) { s.read((char *)&string[0], size); } } template void Serialize(Stream &s) const { WriteCompactSize(s, string.size()); if (!string.empty()) { s.write((char *)&string[0], string.size()); } } }; template CVarInt WrapVarInt(I &n) { return CVarInt(n); } /** * Forward declarations */ /** * string */ template void Serialize(Stream &os, const std::basic_string &str); template void Unserialize(Stream &is, std::basic_string &str); /** * prevector * prevectors of uint8_t are a special case and are intended to be serialized as * a single opaque blob. */ template void Serialize_impl(Stream &os, const prevector &v, const uint8_t &); template void Serialize_impl(Stream &os, const prevector &v, const V &); template inline void Serialize(Stream &os, const prevector &v); template void Unserialize_impl(Stream &is, prevector &v, const uint8_t &); template void Unserialize_impl(Stream &is, prevector &v, const V &); template inline void Unserialize(Stream &is, prevector &v); /** * vector * vectors of uint8_t are a special case and are intended to be serialized as a * single opaque blob. */ template void Serialize_impl(Stream &os, const std::vector &v, const uint8_t &); template void Serialize_impl(Stream &os, const std::vector &v, const V &); template inline void Serialize(Stream &os, const std::vector &v); template void Unserialize_impl(Stream &is, std::vector &v, const uint8_t &); template void Unserialize_impl(Stream &is, std::vector &v, const V &); template inline void Unserialize(Stream &is, std::vector &v); /** * pair */ template void Serialize(Stream &os, const std::pair &item); template void Unserialize(Stream &is, std::pair &item); /** * map */ template void Serialize(Stream &os, const std::map &m); template void Unserialize(Stream &is, std::map &m); /** * set */ template void Serialize(Stream &os, const std::set &m); template void Unserialize(Stream &is, std::set &m); /** * shared_ptr */ template void Serialize(Stream &os, const std::shared_ptr &p); template void Unserialize(Stream &os, std::shared_ptr &p); /** * unique_ptr */ template void Serialize(Stream &os, const std::unique_ptr &p); template void Unserialize(Stream &os, std::unique_ptr &p); /** * If none of the specialized versions above matched, default to calling member * function. */ template inline void Serialize(Stream &os, const T &a) { a.Serialize(os); } template inline void Unserialize(Stream &is, T &a) { a.Unserialize(is); } /** * string */ template void Serialize(Stream &os, const std::basic_string &str) { WriteCompactSize(os, str.size()); if (!str.empty()) { os.write((char *)&str[0], str.size() * sizeof(str[0])); } } template void Unserialize(Stream &is, std::basic_string &str) { size_t nSize = ReadCompactSize(is); str.resize(nSize); if (nSize != 0) { is.read((char *)&str[0], nSize * sizeof(str[0])); } } /** * prevector */ template void Serialize_impl(Stream &os, const prevector &v, const uint8_t &) { WriteCompactSize(os, v.size()); if (!v.empty()) { os.write((char *)&v[0], v.size() * sizeof(T)); } } template void Serialize_impl(Stream &os, const prevector &v, const V &) { WriteCompactSize(os, v.size()); for (const T &i : v) { ::Serialize(os, i); } } template inline void Serialize(Stream &os, const prevector &v) { Serialize_impl(os, v, T()); } template void Unserialize_impl(Stream &is, prevector &v, const uint8_t &) { // Limit size per read so bogus size value won't cause out of memory v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; while (i < nSize) { size_t blk = std::min(nSize - i, size_t(1 + 4999999 / sizeof(T))); v.resize(i + blk); is.read((char *)&v[i], blk * sizeof(T)); i += blk; } } template void Unserialize_impl(Stream &is, prevector &v, const V &) { v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; size_t nMid = 0; while (nMid < nSize) { nMid += 5000000 / sizeof(T); if (nMid > nSize) { nMid = nSize; } v.resize(nMid); for (; i < nMid; i++) { Unserialize(is, v[i]); } } } template inline void Unserialize(Stream &is, prevector &v) { Unserialize_impl(is, v, T()); } /** * vector */ template void Serialize_impl(Stream &os, const std::vector &v, const uint8_t &) { WriteCompactSize(os, v.size()); if (!v.empty()) { os.write((char *)&v[0], v.size() * sizeof(T)); } } template void Serialize_impl(Stream &os, const std::vector &v, const V &) { WriteCompactSize(os, v.size()); for (const T &i : v) { ::Serialize(os, i); } } template inline void Serialize(Stream &os, const std::vector &v) { Serialize_impl(os, v, T()); } template void Unserialize_impl(Stream &is, std::vector &v, const uint8_t &) { // Limit size per read so bogus size value won't cause out of memory v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; while (i < nSize) { size_t blk = std::min(nSize - i, size_t(1 + 4999999 / sizeof(T))); v.resize(i + blk); is.read((char *)&v[i], blk * sizeof(T)); i += blk; } } template void Unserialize_impl(Stream &is, std::vector &v, const V &) { v.clear(); size_t nSize = ReadCompactSize(is); size_t i = 0; size_t nMid = 0; while (nMid < nSize) { nMid += 5000000 / sizeof(T); if (nMid > nSize) { nMid = nSize; } v.resize(nMid); for (; i < nMid; i++) { Unserialize(is, v[i]); } } } template inline void Unserialize(Stream &is, std::vector &v) { Unserialize_impl(is, v, T()); } /** * pair */ template void Serialize(Stream &os, const std::pair &item) { Serialize(os, item.first); Serialize(os, item.second); } template void Unserialize(Stream &is, std::pair &item) { Unserialize(is, item.first); Unserialize(is, item.second); } /** * map */ template void Serialize(Stream &os, const std::map &m) { WriteCompactSize(os, m.size()); for (const auto &entry : m) { Serialize(os, entry); } } template void Unserialize(Stream &is, std::map &m) { m.clear(); size_t nSize = ReadCompactSize(is); typename std::map::iterator mi = m.begin(); for (size_t i = 0; i < nSize; i++) { std::pair item; Unserialize(is, item); mi = m.insert(mi, item); } } /** * set */ template void Serialize(Stream &os, const std::set &m) { WriteCompactSize(os, m.size()); for (const K &i : m) { Serialize(os, i); } } template void Unserialize(Stream &is, std::set &m) { m.clear(); size_t nSize = ReadCompactSize(is); typename std::set::iterator it = m.begin(); for (size_t i = 0; i < nSize; i++) { K key; Unserialize(is, key); it = m.insert(it, key); } } /** * unique_ptr */ template void Serialize(Stream &os, const std::unique_ptr &p) { Serialize(os, *p); } template void Unserialize(Stream &is, std::unique_ptr &p) { p.reset(new T(deserialize, is)); } /** * shared_ptr */ template void Serialize(Stream &os, const std::shared_ptr &p) { Serialize(os, *p); } template void Unserialize(Stream &is, std::shared_ptr &p) { p = std::make_shared(deserialize, is); } /** * Support for ADD_SERIALIZE_METHODS and READWRITE macro */ struct CSerActionSerialize { constexpr bool ForRead() const { return false; } }; struct CSerActionUnserialize { constexpr bool ForRead() const { return true; } }; template inline void SerReadWrite(Stream &s, const T &obj, CSerActionSerialize ser_action) { ::Serialize(s, obj); } template inline void SerReadWrite(Stream &s, T &obj, CSerActionUnserialize ser_action) { ::Unserialize(s, obj); } /** * ::GetSerializeSize implementations * * Computing the serialized size of objects is done through a special stream * object of type CSizeComputer, which only records the number of bytes written * to it. * * If your Serialize or SerializationOp method has non-trivial overhead for * serialization, it may be worthwhile to implement a specialized version for * CSizeComputer, which uses the s.seek() method to record bytes that would * be written instead. */ class CSizeComputer { protected: size_t nSize; const int nType; const int nVersion; public: CSizeComputer(int nTypeIn, int nVersionIn) : nSize(0), nType(nTypeIn), nVersion(nVersionIn) {} void write(const char *psz, size_t _nSize) { this->nSize += _nSize; } /** Pretend _nSize bytes are written, without specifying them. */ void seek(size_t _nSize) { this->nSize += _nSize; } template CSizeComputer &operator<<(const T &obj) { ::Serialize(*this, obj); return (*this); } size_t size() const { return nSize; } int GetVersion() const { return nVersion; } int GetType() const { return nType; } }; template void SerializeMany(Stream &s) {} template void SerializeMany(Stream &s, Arg &&arg) { ::Serialize(s, std::forward(arg)); } template void SerializeMany(Stream &s, Arg &&arg, Args &&... args) { ::Serialize(s, std::forward(arg)); ::SerializeMany(s, std::forward(args)...); } template inline void UnserializeMany(Stream &s) {} template inline void UnserializeMany(Stream &s, Arg &arg) { ::Unserialize(s, arg); } template inline void UnserializeMany(Stream &s, Arg &arg, Args &... args) { ::Unserialize(s, arg); ::UnserializeMany(s, args...); } template inline void SerReadWriteMany(Stream &s, CSerActionSerialize ser_action, Args &&... args) { ::SerializeMany(s, std::forward(args)...); } template inline void SerReadWriteMany(Stream &s, CSerActionUnserialize ser_action, Args &... args) { ::UnserializeMany(s, args...); } template inline void WriteVarInt(CSizeComputer &s, I n) { s.seek(GetSizeOfVarInt(n)); } inline void WriteCompactSize(CSizeComputer &s, uint64_t nSize) { s.seek(GetSizeOfCompactSize(nSize)); } template size_t GetSerializeSize(const T &t, int nType, int nVersion = 0) { return (CSizeComputer(nType, nVersion) << t).size(); } template size_t GetSerializeSize(const S &s, const T &t) { return (CSizeComputer(s.GetType(), s.GetVersion()) << t).size(); } #endif // BITCOIN_SERIALIZE_H diff --git a/src/streams.h b/src/streams.h index 4de5516c9..7d3d84cf9 100644 --- a/src/streams.h +++ b/src/streams.h @@ -1,820 +1,820 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_STREAMS_H #define BITCOIN_STREAMS_H -#include "serialize.h" -#include "support/allocators/zeroafterfree.h" +#include +#include #include #include #include #include #include #include #include #include #include #include #include #include template class OverrideStream { Stream *stream; const int nType; const int nVersion; public: OverrideStream(Stream *stream_, int nType_, int nVersion_) : stream(stream_), nType(nType_), nVersion(nVersion_) {} template OverrideStream &operator<<(const T &obj) { // Serialize to this stream ::Serialize(*this, obj); return (*this); } template OverrideStream &operator>>(T &obj) { // Unserialize from this stream ::Unserialize(*this, obj); return (*this); } void write(const char *pch, size_t nSize) { stream->write(pch, nSize); } void read(char *pch, size_t nSize) { stream->read(pch, nSize); } int GetVersion() const { return nVersion; } int GetType() const { return nType; } }; template OverrideStream WithOrVersion(S *s, int nVersionFlag) { return OverrideStream(s, s->GetType(), s->GetVersion() | nVersionFlag); } /** * Minimal stream for overwriting and/or appending to an existing byte vector. * * The referenced vector will grow as necessary. */ class CVectorWriter { public: /** * @param[in] nTypeIn Serialization Type * @param[in] nVersionIn Serialization Version (including any flags) * @param[in] vchDataIn Referenced byte vector to overwrite/append * @param[in] nPosIn Starting position. Vector index where writes should * start. The vector will initially grow as necessary to max(index, * vec.size()). So to append, use vec.size(). */ CVectorWriter(int nTypeIn, int nVersionIn, std::vector &vchDataIn, size_t nPosIn) : nType(nTypeIn), nVersion(nVersionIn), vchData(vchDataIn), nPos(nPosIn) { if (nPos > vchData.size()) vchData.resize(nPos); } /** * (other params same as above) * @param[in] args A list of items to serialize starting at nPos. */ template CVectorWriter(int nTypeIn, int nVersionIn, std::vector &vchDataIn, size_t nPosIn, Args &&... args) : CVectorWriter(nTypeIn, nVersionIn, vchDataIn, nPosIn) { ::SerializeMany(*this, std::forward(args)...); } void write(const char *pch, size_t nSize) { assert(nPos <= vchData.size()); size_t nOverwrite = std::min(nSize, vchData.size() - nPos); if (nOverwrite) { memcpy(vchData.data() + nPos, reinterpret_cast(pch), nOverwrite); } if (nOverwrite < nSize) { vchData.insert(vchData.end(), reinterpret_cast(pch) + nOverwrite, reinterpret_cast(pch) + nSize); } nPos += nSize; } template CVectorWriter &operator<<(const T &obj) { // Serialize to this stream ::Serialize(*this, obj); return (*this); } int GetVersion() const { return nVersion; } int GetType() const { return nType; } void seek(size_t nSize) { nPos += nSize; if (nPos > vchData.size()) vchData.resize(nPos); } private: const int nType; const int nVersion; std::vector &vchData; size_t nPos; }; /** * Minimal stream for reading from an existing vector by reference */ class VectorReader { private: const int m_type; const int m_version; const std::vector &m_data; size_t m_pos = 0; public: /** * @param[in] type Serialization Type * @param[in] version Serialization Version (including any flags) * @param[in] data Referenced byte vector to overwrite/append * @param[in] pos Starting position. Vector index where reads should start. */ VectorReader(int type, int version, const std::vector &data, size_t pos) : m_type(type), m_version(version), m_data(data), m_pos(pos) { if (m_pos > m_data.size()) { throw std::ios_base::failure( "VectorReader(...): end of data (m_pos > m_data.size())"); } } /** * (other params same as above) * @param[in] args A list of items to deserialize starting at pos. */ template VectorReader(int type, int version, const std::vector &data, size_t pos, Args &&... args) : VectorReader(type, version, data, pos) { ::UnserializeMany(*this, std::forward(args)...); } template VectorReader &operator>>(T &obj) { // Unserialize from this stream ::Unserialize(*this, obj); return (*this); } int GetVersion() const { return m_version; } int GetType() const { return m_type; } size_t size() const { return m_data.size() - m_pos; } bool empty() const { return m_data.size() == m_pos; } void read(char *dst, size_t n) { if (n == 0) { return; } // Read from the beginning of the buffer size_t pos_next = m_pos + n; if (pos_next > m_data.size()) { throw std::ios_base::failure("VectorReader::read(): end of data"); } memcpy(dst, m_data.data() + m_pos, n); m_pos = pos_next; } }; /** * Double ended buffer combining vector and stream-like interfaces. * * >> and << read and write unformatted data using the above serialization * templates. Fills with data in linear time; some stringstream implementations * take N^2 time. */ class CDataStream { protected: typedef CSerializeData vector_type; vector_type vch; unsigned int nReadPos; int nType; int nVersion; public: typedef vector_type::allocator_type allocator_type; typedef vector_type::size_type size_type; typedef vector_type::difference_type difference_type; typedef vector_type::reference reference; typedef vector_type::const_reference const_reference; typedef vector_type::value_type value_type; typedef vector_type::iterator iterator; typedef vector_type::const_iterator const_iterator; typedef vector_type::reverse_iterator reverse_iterator; explicit CDataStream(int nTypeIn, int nVersionIn) { Init(nTypeIn, nVersionIn); } CDataStream(const_iterator pbegin, const_iterator pend, int nTypeIn, int nVersionIn) : vch(pbegin, pend) { Init(nTypeIn, nVersionIn); } CDataStream(const char *pbegin, const char *pend, int nTypeIn, int nVersionIn) : vch(pbegin, pend) { Init(nTypeIn, nVersionIn); } CDataStream(const vector_type &vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end()) { Init(nTypeIn, nVersionIn); } CDataStream(const std::vector &vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end()) { Init(nTypeIn, nVersionIn); } CDataStream(const std::vector &vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end()) { Init(nTypeIn, nVersionIn); } template CDataStream(int nTypeIn, int nVersionIn, Args &&... args) { Init(nTypeIn, nVersionIn); ::SerializeMany(*this, std::forward(args)...); } void Init(int nTypeIn, int nVersionIn) { nReadPos = 0; nType = nTypeIn; nVersion = nVersionIn; } CDataStream &operator+=(const CDataStream &b) { vch.insert(vch.end(), b.begin(), b.end()); return *this; } friend CDataStream operator+(const CDataStream &a, const CDataStream &b) { CDataStream ret = a; ret += b; return (ret); } std::string str() const { return (std::string(begin(), end())); } // // Vector subset // const_iterator begin() const { return vch.begin() + nReadPos; } iterator begin() { return vch.begin() + nReadPos; } const_iterator end() const { return vch.end(); } iterator end() { return vch.end(); } size_type size() const { return vch.size() - nReadPos; } bool empty() const { return vch.size() == nReadPos; } void resize(size_type n, value_type c = 0) { vch.resize(n + nReadPos, c); } void reserve(size_type n) { vch.reserve(n + nReadPos); } const_reference operator[](size_type pos) const { return vch[pos + nReadPos]; } reference operator[](size_type pos) { return vch[pos + nReadPos]; } void clear() { vch.clear(); nReadPos = 0; } iterator insert(iterator it, const char x = char()) { return vch.insert(it, x); } void insert(iterator it, size_type n, const char x) { vch.insert(it, n, x); } value_type *data() { return vch.data() + nReadPos; } const value_type *data() const { return vch.data() + nReadPos; } void insert(iterator it, std::vector::const_iterator first, std::vector::const_iterator last) { if (last == first) { return; } assert(last - first > 0); if (it == vch.begin() + nReadPos && (unsigned int)(last - first) <= nReadPos) { // special case for inserting at the front when there's room nReadPos -= (last - first); memcpy(&vch[nReadPos], &first[0], last - first); } else { vch.insert(it, first, last); } } void insert(iterator it, const char *first, const char *last) { if (last == first) { return; } assert(last - first > 0); if (it == vch.begin() + nReadPos && (unsigned int)(last - first) <= nReadPos) { // special case for inserting at the front when there's room nReadPos -= (last - first); memcpy(&vch[nReadPos], &first[0], last - first); } else { vch.insert(it, first, last); } } iterator erase(iterator it) { if (it == vch.begin() + nReadPos) { // special case for erasing from the front if (++nReadPos >= vch.size()) { // whenever we reach the end, we take the opportunity to clear // the buffer nReadPos = 0; return vch.erase(vch.begin(), vch.end()); } return vch.begin() + nReadPos; } else { return vch.erase(it); } } iterator erase(iterator first, iterator last) { if (first == vch.begin() + nReadPos) { // special case for erasing from the front if (last == vch.end()) { nReadPos = 0; return vch.erase(vch.begin(), vch.end()); } else { nReadPos = (last - vch.begin()); return last; } } else return vch.erase(first, last); } inline void Compact() { vch.erase(vch.begin(), vch.begin() + nReadPos); nReadPos = 0; } bool Rewind(size_type n) { // Rewind by n characters if the buffer hasn't been compacted yet if (n > nReadPos) return false; nReadPos -= n; return true; } // // Stream subset // bool eof() const { return size() == 0; } CDataStream *rdbuf() { return this; } int in_avail() const { return size(); } void SetType(int n) { nType = n; } int GetType() const { return nType; } void SetVersion(int n) { nVersion = n; } int GetVersion() const { return nVersion; } void read(char *pch, size_t nSize) { if (nSize == 0) { return; } // Read from the beginning of the buffer unsigned int nReadPosNext = nReadPos + nSize; if (nReadPosNext > vch.size()) { throw std::ios_base::failure("CDataStream::read(): end of data"); } memcpy(pch, &vch[nReadPos], nSize); if (nReadPosNext == vch.size()) { nReadPos = 0; vch.clear(); return; } nReadPos = nReadPosNext; } void ignore(int nSize) { // Ignore from the beginning of the buffer if (nSize < 0) { throw std::ios_base::failure( "CDataStream::ignore(): nSize negative"); } unsigned int nReadPosNext = nReadPos + nSize; if (nReadPosNext >= vch.size()) { if (nReadPosNext > vch.size()) throw std::ios_base::failure( "CDataStream::ignore(): end of data"); nReadPos = 0; vch.clear(); return; } nReadPos = nReadPosNext; } void write(const char *pch, size_t nSize) { // Write to the end of the buffer vch.insert(vch.end(), pch, pch + nSize); } template void Serialize(Stream &s) const { // Special case: stream << stream concatenates like stream += stream if (!vch.empty()) s.write((char *)&vch[0], vch.size() * sizeof(vch[0])); } template CDataStream &operator<<(const T &obj) { // Serialize to this stream ::Serialize(*this, obj); return (*this); } template CDataStream &operator>>(T &obj) { // Unserialize from this stream ::Unserialize(*this, obj); return (*this); } void GetAndClear(CSerializeData &d) { d.insert(d.end(), begin(), end()); clear(); } /** * XOR the contents of this stream with a certain key. * * @param[in] key The key used to XOR the data in this stream. */ void Xor(const std::vector &key) { if (key.size() == 0) { return; } for (size_type i = 0, j = 0; i != size(); i++) { vch[i] ^= key[j++]; // This potentially acts on very many bytes of data, so it's // important that we calculate `j`, i.e. the `key` index in this way // instead of doing a %, which would effectively be a division for // each byte Xor'd -- much slower than need be. if (j == key.size()) j = 0; } } }; template class BitStreamReader { private: IStream &m_istream; /// Buffered byte read in from the input stream. A new byte is read into the /// buffer when m_offset reaches 8. uint8_t m_buffer{0}; /// Number of high order bits in m_buffer already returned by previous /// Read() calls. The next bit to be returned is at this offset from the /// most significant bit position. int m_offset{8}; public: explicit BitStreamReader(IStream &istream) : m_istream(istream) {} /** * Read the specified number of bits from the stream. The data is returned * in the nbits least significant bits of a 64-bit uint. */ uint64_t Read(int nbits) { if (nbits < 0 || nbits > 64) { throw std::out_of_range("nbits must be between 0 and 64"); } uint64_t data = 0; while (nbits > 0) { if (m_offset == 8) { m_istream >> m_buffer; m_offset = 0; } int bits = std::min(8 - m_offset, nbits); data <<= bits; data |= static_cast(m_buffer << m_offset) >> (8 - bits); m_offset += bits; nbits -= bits; } return data; } }; template class BitStreamWriter { private: OStream &m_ostream; /// Buffered byte waiting to be written to the output stream. The byte is /// written buffer when m_offset reaches 8 or Flush() is called. uint8_t m_buffer{0}; /// Number of high order bits in m_buffer already written by previous /// Write() calls and not yet flushed to the stream. The next bit to be /// written to is at this offset from the most significant bit position. int m_offset{0}; public: explicit BitStreamWriter(OStream &ostream) : m_ostream(ostream) {} ~BitStreamWriter() { Flush(); } /** * Write the nbits least significant bits of a 64-bit int to the output * stream. Data is buffered until it completes an octet. */ void Write(uint64_t data, int nbits) { if (nbits < 0 || nbits > 64) { throw std::out_of_range("nbits must be between 0 and 64"); } while (nbits > 0) { int bits = std::min(8 - m_offset, nbits); m_buffer |= (data << (64 - nbits)) >> (64 - 8 + m_offset); m_offset += bits; nbits -= bits; if (m_offset == 8) { Flush(); } } } /** * Flush any unwritten bits to the output stream, padding with 0's to the * next byte boundary. */ void Flush() { if (m_offset == 0) { return; } m_ostream << m_buffer; m_buffer = 0; m_offset = 0; } }; /** * Non-refcounted RAII wrapper for FILE* * * Will automatically close the file when it goes out of scope if not null. If * you're returning the file pointer, return file.release(). If you need to * close the file early, use file.fclose() instead of fclose(file). */ class CAutoFile { private: // Disallow copies CAutoFile(const CAutoFile &); CAutoFile &operator=(const CAutoFile &); const int nType; const int nVersion; FILE *file; public: CAutoFile(FILE *filenew, int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn) { file = filenew; } ~CAutoFile() { fclose(); } void fclose() { if (file) { ::fclose(file); file = nullptr; } } /** * Get wrapped FILE* with transfer of ownership. * @note This will invalidate the CAutoFile object, and makes it the * responsibility of the caller of this function to clean up the returned * FILE*. */ FILE *release() { FILE *ret = file; file = nullptr; return ret; } /** * Get wrapped FILE* without transfer of ownership. * @note Ownership of the FILE* will remain with this class. Use this only * if the scope of the CAutoFile outlives use of the passed pointer. */ FILE *Get() const { return file; } /** Return true if the wrapped FILE* is nullptr, false otherwise. */ bool IsNull() const { return (file == nullptr); } // // Stream subset // int GetType() const { return nType; } int GetVersion() const { return nVersion; } void read(char *pch, size_t nSize) { if (!file) throw std::ios_base::failure( "CAutoFile::read: file handle is nullptr"); if (fread(pch, 1, nSize, file) != nSize) throw std::ios_base::failure(feof(file) ? "CAutoFile::read: end of file" : "CAutoFile::read: fread failed"); } void ignore(size_t nSize) { if (!file) throw std::ios_base::failure( "CAutoFile::ignore: file handle is nullptr"); uint8_t data[4096]; while (nSize > 0) { size_t nNow = std::min(nSize, sizeof(data)); if (fread(data, 1, nNow, file) != nNow) throw std::ios_base::failure( feof(file) ? "CAutoFile::ignore: end of file" : "CAutoFile::read: fread failed"); nSize -= nNow; } } void write(const char *pch, size_t nSize) { if (!file) throw std::ios_base::failure( "CAutoFile::write: file handle is nullptr"); if (fwrite(pch, 1, nSize, file) != nSize) throw std::ios_base::failure("CAutoFile::write: write failed"); } template CAutoFile &operator<<(const T &obj) { // Serialize to this stream if (!file) throw std::ios_base::failure( "CAutoFile::operator<<: file handle is nullptr"); ::Serialize(*this, obj); return (*this); } template CAutoFile &operator>>(T &obj) { // Unserialize from this stream if (!file) throw std::ios_base::failure( "CAutoFile::operator>>: file handle is nullptr"); ::Unserialize(*this, obj); return (*this); } }; /** * Non-refcounted RAII wrapper around a FILE* that implements a ring buffer to * deserialize from. It guarantees the ability to rewind a given number of * bytes. * * Will automatically close the file when it goes out of scope if not null. If * you need to close the file early, use file.fclose() instead of fclose(file). */ class CBufferedFile { private: // Disallow copies CBufferedFile(const CBufferedFile &); CBufferedFile &operator=(const CBufferedFile &); const int nType; const int nVersion; // source file FILE *src; // how many bytes have been read from source uint64_t nSrcPos; // how many bytes have been read from this uint64_t nReadPos; // up to which position we're allowed to read uint64_t nReadLimit; // how many bytes we guarantee to rewind uint64_t nRewind; // the buffer std::vector vchBuf; protected: // read data from the source to fill the buffer bool Fill() { unsigned int pos = nSrcPos % vchBuf.size(); unsigned int readNow = vchBuf.size() - pos; unsigned int nAvail = vchBuf.size() - (nSrcPos - nReadPos) - nRewind; if (nAvail < readNow) readNow = nAvail; if (readNow == 0) return false; size_t nBytes = fread((void *)&vchBuf[pos], 1, readNow, src); if (nBytes == 0) { throw std::ios_base::failure( feof(src) ? "CBufferedFile::Fill: end of file" : "CBufferedFile::Fill: fread failed"); } else { nSrcPos += nBytes; return true; } } public: CBufferedFile(FILE *fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), nReadPos(0), nReadLimit((uint64_t)(-1)), nRewind(nRewindIn), vchBuf(nBufSize, 0) { src = fileIn; } ~CBufferedFile() { fclose(); } int GetVersion() const { return nVersion; } int GetType() const { return nType; } void fclose() { if (src) { ::fclose(src); src = nullptr; } } // check whether we're at the end of the source file bool eof() const { return nReadPos == nSrcPos && feof(src); } // read a number of bytes void read(char *pch, size_t nSize) { if (nSize + nReadPos > nReadLimit) throw std::ios_base::failure("Read attempted past buffer limit"); if (nSize + nRewind > vchBuf.size()) throw std::ios_base::failure("Read larger than buffer size"); while (nSize > 0) { if (nReadPos == nSrcPos) Fill(); unsigned int pos = nReadPos % vchBuf.size(); size_t nNow = nSize; if (nNow + pos > vchBuf.size()) nNow = vchBuf.size() - pos; if (nNow + nReadPos > nSrcPos) nNow = nSrcPos - nReadPos; memcpy(pch, &vchBuf[pos], nNow); nReadPos += nNow; pch += nNow; nSize -= nNow; } } // return the current reading position uint64_t GetPos() const { return nReadPos; } // rewind to a given reading position bool SetPos(uint64_t nPos) { nReadPos = nPos; if (nReadPos + nRewind < nSrcPos) { nReadPos = nSrcPos - nRewind; return false; } else if (nReadPos > nSrcPos) { nReadPos = nSrcPos; return false; } else { return true; } } bool Seek(uint64_t nPos) { long nLongPos = nPos; if (nPos != (uint64_t)nLongPos) return false; if (fseek(src, nLongPos, SEEK_SET)) return false; nLongPos = ftell(src); nSrcPos = nLongPos; nReadPos = nLongPos; return true; } // Prevent reading beyond a certain position. No argument removes the limit. bool SetLimit(uint64_t nPos = (uint64_t)(-1)) { if (nPos < nReadPos) return false; nReadLimit = nPos; return true; } template CBufferedFile &operator>>(T &obj) { // Unserialize from this stream ::Unserialize(*this, obj); return (*this); } // search for a given byte in the stream, and remain positioned on it void FindByte(char ch) { while (true) { if (nReadPos == nSrcPos) Fill(); if (vchBuf[nReadPos % vchBuf.size()] == ch) break; nReadPos++; } } }; #endif // BITCOIN_STREAMS_H diff --git a/src/sync.cpp b/src/sync.cpp index 8eb8ee660..5862b6870 100644 --- a/src/sync.cpp +++ b/src/sync.cpp @@ -1,204 +1,204 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "sync.h" +#include -#include "logging.h" -#include "utilstrencodings.h" +#include +#include #include #include #include #include #ifdef DEBUG_LOCKCONTENTION void PrintLockContention(const char *pszName, const char *pszFile, int nLine) { LogPrintf("LOCKCONTENTION: %s\n", pszName); LogPrintf("Locker: %s:%d\n", pszFile, nLine); } #endif /* DEBUG_LOCKCONTENTION */ #ifdef DEBUG_LOCKORDER // // Early deadlock detection. // Problem being solved: // Thread 1 locks A, then B, then C // Thread 2 locks D, then C, then A // --> may result in deadlock between the two threads, depending on when // they run. // Solution implemented here: // Keep track of pairs of locks: (A before B), (A before C), etc. // Complain if any thread tries to lock in a different order. // struct CLockLocation { CLockLocation(const char *pszName, const char *pszFile, int nLine, bool fTryIn) { mutexName = pszName; sourceFile = pszFile; sourceLine = nLine; fTry = fTryIn; } std::string ToString() const { return mutexName + " " + sourceFile + ":" + itostr(sourceLine) + (fTry ? " (TRY)" : ""); } private: bool fTry; std::string mutexName; std::string sourceFile; int sourceLine; }; typedef std::vector> LockStack; typedef std::map, LockStack> LockOrders; typedef std::set> InvLockOrders; struct LockData { // Very ugly hack: as the global constructs and destructors run single // threaded, we use this boolean to know whether LockData still exists, // as DeleteLock can get called by global CCriticalSection destructors // after LockData disappears. bool available; LockData() : available(true) {} ~LockData() { available = false; } LockOrders lockorders; InvLockOrders invlockorders; std::mutex dd_mutex; } static lockdata; static thread_local std::unique_ptr lockstack; static void potential_deadlock_detected(const std::pair &mismatch, const LockStack &s1, const LockStack &s2) { LogPrintf("POTENTIAL DEADLOCK DETECTED\n"); LogPrintf("Previous lock order was:\n"); for (const std::pair &i : s2) { if (i.first == mismatch.first) { LogPrintf(" (1)"); } if (i.first == mismatch.second) { LogPrintf(" (2)"); } LogPrintf(" %s\n", i.second.ToString()); } LogPrintf("Current lock order is:\n"); for (const std::pair &i : s1) { if (i.first == mismatch.first) { LogPrintf(" (1)"); } if (i.first == mismatch.second) { LogPrintf(" (2)"); } LogPrintf(" %s\n", i.second.ToString()); } if (g_debug_lockorder_abort) { fprintf(stderr, "Assertion failed: detected inconsistent lock order at %s:%i, " "details in debug log.\n", __FILE__, __LINE__); abort(); } throw std::logic_error("potential deadlock detected"); } static void push_lock(void *c, const CLockLocation &locklocation) { if (!lockstack) { lockstack.reset(new LockStack); } std::lock_guard lock(lockdata.dd_mutex); lockstack->push_back(std::make_pair(c, locklocation)); for (const std::pair &i : (*lockstack)) { if (i.first == c) break; std::pair p1 = std::make_pair(i.first, c); if (lockdata.lockorders.count(p1)) continue; lockdata.lockorders[p1] = (*lockstack); std::pair p2 = std::make_pair(c, i.first); lockdata.invlockorders.insert(p2); if (lockdata.lockorders.count(p2)) potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]); } } static void pop_lock() { (*lockstack).pop_back(); } void EnterCritical(const char *pszName, const char *pszFile, int nLine, void *cs, bool fTry) { push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry)); } void LeaveCritical() { pop_lock(); } std::string LocksHeld() { std::string result; for (const std::pair &i : *lockstack) { result += i.second.ToString() + std::string("\n"); } return result; } void AssertLockHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) { for (const std::pair &i : *lockstack) { if (i.first == cs) return; } fprintf(stderr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld().c_str()); abort(); } void AssertLockNotHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) { for (const std::pair &i : *lockstack) { if (i.first == cs) { fprintf(stderr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld().c_str()); abort(); } } } void DeleteLock(void *cs) { if (!lockdata.available) { // We're already shutting down. return; } std::lock_guard lock(lockdata.dd_mutex); std::pair item = std::make_pair(cs, nullptr); LockOrders::iterator it = lockdata.lockorders.lower_bound(item); while (it != lockdata.lockorders.end() && it->first.first == cs) { std::pair invitem = std::make_pair(it->first.second, it->first.first); lockdata.invlockorders.erase(invitem); lockdata.lockorders.erase(it++); } InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item); while (invit != lockdata.invlockorders.end() && invit->first == cs) { std::pair invinvitem = std::make_pair(invit->second, invit->first); lockdata.lockorders.erase(invinvitem); lockdata.invlockorders.erase(invit++); } } bool g_debug_lockorder_abort = true; #endif /* DEBUG_LOCKORDER */ diff --git a/src/sync.h b/src/sync.h index eb2e498ef..2b97011f4 100644 --- a/src/sync.h +++ b/src/sync.h @@ -1,280 +1,280 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SYNC_H #define BITCOIN_SYNC_H -#include "threadsafety.h" +#include #include #include #include ///////////////////////////////////////////////// // // // THE SIMPLE DEFINITION, EXCLUDING DEBUG CODE // // // ///////////////////////////////////////////////// /* CCriticalSection mutex; std::recursive_mutex mutex; LOCK(mutex); std::unique_lock criticalblock(mutex); LOCK2(mutex1, mutex2); std::unique_lock criticalblock1(mutex1); std::unique_lock criticalblock2(mutex2); TRY_LOCK(mutex, name); std::unique_lock name(mutex, std::try_to_lock_t); ENTER_CRITICAL_SECTION(mutex); // no RAII mutex.lock(); LEAVE_CRITICAL_SECTION(mutex); // no RAII mutex.unlock(); */ /////////////////////////////// // // // THE ACTUAL IMPLEMENTATION // // // /////////////////////////////// #ifdef DEBUG_LOCKORDER void EnterCritical(const char *pszName, const char *pszFile, int nLine, void *cs, bool fTry = false); void LeaveCritical(); std::string LocksHeld(); void AssertLockHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) ASSERT_EXCLUSIVE_LOCK(cs); void AssertLockNotHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs); void DeleteLock(void *cs); /** * Call abort() if a potential lock order deadlock bug is detected, instead of * just logging information and throwing a logic_error. Defaults to true, and * set to false in DEBUG_LOCKORDER unit tests. */ extern bool g_debug_lockorder_abort; #else static inline void EnterCritical(const char *pszName, const char *pszFile, int nLine, void *cs, bool fTry = false) {} static inline void LeaveCritical() {} static inline void AssertLockHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) ASSERT_EXCLUSIVE_LOCK(cs) {} static inline void AssertLockNotHeldInternal(const char *pszName, const char *pszFile, int nLine, void *cs) {} static inline void DeleteLock(void *cs) {} #endif #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) #define AssertLockNotHeld(cs) \ AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs) /** * Template mixin that adds -Wthread-safety locking annotations and lock order * checking to a subset of the mutex API. */ template class LOCKABLE AnnotatedMixin : public PARENT { public: ~AnnotatedMixin() { DeleteLock((void *)this); } void lock() EXCLUSIVE_LOCK_FUNCTION() { PARENT::lock(); } void unlock() UNLOCK_FUNCTION() { PARENT::unlock(); } bool try_lock() EXCLUSIVE_TRYLOCK_FUNCTION(true) { return PARENT::try_lock(); } using UniqueLock = std::unique_lock; }; /** * Wrapped mutex: supports recursive locking, but no waiting * TODO: We should move away from using the recursive lock by default. */ typedef AnnotatedMixin CCriticalSection; /** Wrapped mutex: supports waiting but not recursive locking */ typedef AnnotatedMixin CWaitableCriticalSection; /** * Just a typedef for std::condition_variable, can be wrapped later if desired. */ typedef std::condition_variable CConditionVariable; #ifdef DEBUG_LOCKCONTENTION void PrintLockContention(const char *pszName, const char *pszFile, int nLine); #endif /** Wrapper around std::unique_lock style lock for Mutex. */ template class SCOPED_LOCKABLE CCriticalBlock : public Base { private: void Enter(const char *pszName, const char *pszFile, int nLine) { EnterCritical(pszName, pszFile, nLine, (void *)(Base::mutex())); #ifdef DEBUG_LOCKCONTENTION if (!Base::try_lock()) { PrintLockContention(pszName, pszFile, nLine); #endif Base::lock(); #ifdef DEBUG_LOCKCONTENTION } #endif } bool TryEnter(const char *pszName, const char *pszFile, int nLine) { EnterCritical(pszName, pszFile, nLine, (void *)(Base::mutex()), true); Base::try_lock(); if (!Base::owns_lock()) LeaveCritical(); return Base::owns_lock(); } public: CCriticalBlock(Mutex &mutexIn, const char *pszName, const char *pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(mutexIn) : Base(mutexIn, std::defer_lock) { if (fTry) TryEnter(pszName, pszFile, nLine); else Enter(pszName, pszFile, nLine); } CCriticalBlock(Mutex *pmutexIn, const char *pszName, const char *pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(pmutexIn) { if (!pmutexIn) return; *static_cast(this) = Base(*pmutexIn, std::defer_lock); if (fTry) TryEnter(pszName, pszFile, nLine); else Enter(pszName, pszFile, nLine); } ~CCriticalBlock() UNLOCK_FUNCTION() { if (Base::owns_lock()) LeaveCritical(); } operator bool() { return Base::owns_lock(); } }; template using DebugLock = CCriticalBlock::type>::type>; #define PASTE(x, y) x##y #define PASTE2(x, y) PASTE(x, y) #define LOCK(cs) \ DebugLock PASTE2(criticalblock, \ __COUNTER__)(cs, #cs, __FILE__, __LINE__) #define LOCK2(cs1, cs2) \ DebugLock criticalblock1(cs1, #cs1, __FILE__, __LINE__); \ DebugLock criticalblock2(cs2, #cs2, __FILE__, __LINE__); #define TRY_LOCK(cs, name) \ DebugLock name(cs, #cs, __FILE__, __LINE__, true) #define WAIT_LOCK(cs, name) \ DebugLock name(cs, #cs, __FILE__, __LINE__) #define ENTER_CRITICAL_SECTION(cs) \ { \ EnterCritical(#cs, __FILE__, __LINE__, (void *)(&cs)); \ (cs).lock(); \ } #define LEAVE_CRITICAL_SECTION(cs) \ { \ (cs).unlock(); \ LeaveCritical(); \ } class CSemaphore { private: std::condition_variable condition; std::mutex mutex; int value; public: explicit CSemaphore(int init) : value(init) {} void wait() { std::unique_lock lock(mutex); condition.wait(lock, [&]() { return value >= 1; }); value--; } bool try_wait() { std::lock_guard lock(mutex); if (value < 1) { return false; } value--; return true; } void post() { { std::lock_guard lock(mutex); value++; } condition.notify_one(); } }; /** RAII-style semaphore lock */ class CSemaphoreGrant { private: CSemaphore *sem; bool fHaveGrant; public: void Acquire() { if (fHaveGrant) return; sem->wait(); fHaveGrant = true; } void Release() { if (!fHaveGrant) return; sem->post(); fHaveGrant = false; } bool TryAcquire() { if (!fHaveGrant && sem->try_wait()) fHaveGrant = true; return fHaveGrant; } void MoveTo(CSemaphoreGrant &grant) { grant.Release(); grant.sem = sem; grant.fHaveGrant = fHaveGrant; fHaveGrant = false; } CSemaphoreGrant() : sem(nullptr), fHaveGrant(false) {} explicit CSemaphoreGrant(CSemaphore &sema, bool fTry = false) : sem(&sema), fHaveGrant(false) { if (fTry) TryAcquire(); else Acquire(); } ~CSemaphoreGrant() { Release(); } operator bool() const { return fHaveGrant; } }; #endif // BITCOIN_SYNC_H diff --git a/src/threadinterrupt.cpp b/src/threadinterrupt.cpp index 00aaa5d51..a1ea97c50 100644 --- a/src/threadinterrupt.cpp +++ b/src/threadinterrupt.cpp @@ -1,41 +1,41 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "threadinterrupt.h" +#include -#include "sync.h" +#include CThreadInterrupt::operator bool() const { return flag.load(std::memory_order_acquire); } void CThreadInterrupt::reset() { flag.store(false, std::memory_order_release); } void CThreadInterrupt::operator()() { { LOCK(mut); flag.store(true, std::memory_order_release); } cond.notify_all(); } bool CThreadInterrupt::sleep_for(std::chrono::milliseconds rel_time) { WAIT_LOCK(mut, lock); return !cond.wait_for(lock, rel_time, [this]() { return flag.load(std::memory_order_acquire); }); } bool CThreadInterrupt::sleep_for(std::chrono::seconds rel_time) { return sleep_for( std::chrono::duration_cast(rel_time)); } bool CThreadInterrupt::sleep_for(std::chrono::minutes rel_time) { return sleep_for( std::chrono::duration_cast(rel_time)); } diff --git a/src/timedata.cpp b/src/timedata.cpp index e35b0f3f7..8320a3506 100644 --- a/src/timedata.cpp +++ b/src/timedata.cpp @@ -1,119 +1,119 @@ // Copyright (c) 2014-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) -#include "config/bitcoin-config.h" +#include #endif -#include "timedata.h" +#include -#include "netaddress.h" -#include "sync.h" -#include "ui_interface.h" -#include "util.h" -#include "utilstrencodings.h" -#include "warnings.h" +#include +#include +#include +#include +#include +#include static CCriticalSection cs_nTimeOffset; static int64_t nTimeOffset = 0; /** * "Never go to sea with two chronometers; take one or three." * Our three time sources are: * - System clock * - Median of other nodes clocks * - The user (asking the user to fix the system clock if the first two * disagree) */ int64_t GetTimeOffset() { LOCK(cs_nTimeOffset); return nTimeOffset; } int64_t GetAdjustedTime() { return GetTime() + GetTimeOffset(); } static int64_t abs64(int64_t n) { return (n >= 0 ? n : -n); } #define BITCOIN_TIMEDATA_MAX_SAMPLES 200 void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample) { LOCK(cs_nTimeOffset); // Ignore duplicates static std::set setKnown; if (setKnown.size() == BITCOIN_TIMEDATA_MAX_SAMPLES) return; if (!setKnown.insert(ip).second) return; // Add data static CMedianFilter vTimeOffsets(BITCOIN_TIMEDATA_MAX_SAMPLES, 0); vTimeOffsets.input(nOffsetSample); LogPrint(BCLog::NET, "added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample / 60); // There is a known issue here (see issue #4521): // // - The structure vTimeOffsets contains up to 200 elements, after which any // new element added to it will not increase its size, replacing the oldest // element. // // - The condition to update nTimeOffset includes checking whether the // number of elements in vTimeOffsets is odd, which will never happen after // there are 200 elements. // // But in this case the 'bug' is protective against some attacks, and may // actually explain why we've never seen attacks which manipulate the clock // offset. // // So we should hold off on fixing this and clean it up as part of a timing // cleanup that strengthens it in a number of other ways. // if (vTimeOffsets.size() >= 5 && vTimeOffsets.size() % 2 == 1) { int64_t nMedian = vTimeOffsets.median(); std::vector vSorted = vTimeOffsets.sorted(); // Only let other nodes change our time by so much if (abs64(nMedian) <= std::max(0, gArgs.GetArg("-maxtimeadjustment", DEFAULT_MAX_TIME_ADJUSTMENT))) { nTimeOffset = nMedian; } else { nTimeOffset = 0; static bool fDone; if (!fDone) { // If nobody has a time different than ours but within 5 minutes // of ours, give a warning bool fMatch = false; for (int64_t nOffset : vSorted) { if (nOffset != 0 && abs64(nOffset) < 5 * 60) fMatch = true; } if (!fMatch) { fDone = true; std::string strMessage = strprintf(_("Please check that your computer's date " "and time are correct! If your clock is " "wrong, %s will not work properly."), _(PACKAGE_NAME)); SetMiscWarning(strMessage); uiInterface.ThreadSafeMessageBox( strMessage, "", CClientUIInterface::MSG_WARNING); } } } if (LogAcceptCategory(BCLog::NET)) { for (int64_t n : vSorted) { LogPrint(BCLog::NET, "%+d ", n); } LogPrint(BCLog::NET, "| "); LogPrint(BCLog::NET, "nTimeOffset = %+d (%+d minutes)\n", nTimeOffset, nTimeOffset / 60); } } } diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 1116082d6..6d456aed2 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -1,813 +1,814 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "torcontrol.h" -#include "crypto/hmac_sha256.h" -#include "net.h" -#include "netbase.h" -#include "util.h" -#include "utilstrencodings.h" +#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include #include #include #include #include #include #include #include #include +#include +#include +#include +#include + /** Default control port */ const std::string DEFAULT_TOR_CONTROL = "127.0.0.1:9051"; /** Tor cookie size (from control-spec.txt) */ static const int TOR_COOKIE_SIZE = 32; /** Size of client/server nonce for SAFECOOKIE */ static const int TOR_NONCE_SIZE = 32; /** For computing serverHash in SAFECOOKIE */ static const std::string TOR_SAFE_SERVERKEY = "Tor safe cookie authentication server-to-controller hash"; /** For computing clientHash in SAFECOOKIE */ static const std::string TOR_SAFE_CLIENTKEY = "Tor safe cookie authentication controller-to-server hash"; /** Exponential backoff configuration - initial timeout in seconds */ static const float RECONNECT_TIMEOUT_START = 1.0; /** Exponential backoff configuration - growth factor */ static const float RECONNECT_TIMEOUT_EXP = 1.5; /** * Maximum length for lines received on TorControlConnection. * tor-control-spec.txt mentions that there is explicitly no limit defined to * line length, this is belt-and-suspenders sanity limit to prevent memory * exhaustion. */ static const int MAX_LINE_LENGTH = 100000; /****** Low-level TorControlConnection ********/ /** Reply from Tor, can be single or multi-line */ class TorControlReply { public: TorControlReply() { Clear(); } int code; std::vector lines; void Clear() { code = 0; lines.clear(); } }; /** * Low-level handling for Tor control connection. * Speaks the SMTP-like protocol as defined in torspec/control-spec.txt */ class TorControlConnection { public: typedef std::function ConnectionCB; typedef std::function ReplyHandlerCB; /** Create a new TorControlConnection. */ explicit TorControlConnection(struct event_base *base); ~TorControlConnection(); /** * Connect to a Tor control port. * target is address of the form host:port. * connected is the handler that is called when connection is successfully * established. * disconnected is a handler that is called when the connection is broken. * Return true on success. */ bool Connect(const std::string &target, const ConnectionCB &connected, const ConnectionCB &disconnected); /** * Disconnect from Tor control port. */ bool Disconnect(); /** * Send a command, register a handler for the reply. * A trailing CRLF is automatically added. * Return true on success. */ bool Command(const std::string &cmd, const ReplyHandlerCB &reply_handler); /** Response handlers for async replies */ boost::signals2::signal async_handler; private: /** Callback when ready for use */ std::function connected; /** Callback when connection lost */ std::function disconnected; /** Libevent event base */ struct event_base *base; /** Connection to control socket */ struct bufferevent *b_conn; /** Message being received */ TorControlReply message; /** Response handlers */ std::deque reply_handlers; /** Libevent handlers: internal */ static void readcb(struct bufferevent *bev, void *ctx); static void eventcb(struct bufferevent *bev, short what, void *ctx); }; TorControlConnection::TorControlConnection(struct event_base *_base) : base(_base), b_conn(nullptr) {} TorControlConnection::~TorControlConnection() { if (b_conn) { bufferevent_free(b_conn); } } void TorControlConnection::readcb(struct bufferevent *bev, void *ctx) { TorControlConnection *self = static_cast(ctx); struct evbuffer *input = bufferevent_get_input(bev); size_t n_read_out = 0; char *line; assert(input); // If there is not a whole line to read, evbuffer_readln returns nullptr while ((line = evbuffer_readln(input, &n_read_out, EVBUFFER_EOL_CRLF)) != nullptr) { std::string s(line, n_read_out); free(line); // Short line if (s.size() < 4) { continue; } // (-|+| ) self->message.code = atoi(s.substr(0, 3)); self->message.lines.push_back(s.substr(4)); // '-','+' or ' ' char ch = s[3]; if (ch == ' ') { // Final line, dispatch reply and clean up if (self->message.code >= 600) { // Dispatch async notifications to async handler. // Synchronous and asynchronous messages are never interleaved self->async_handler(*self, self->message); } else { if (!self->reply_handlers.empty()) { // Invoke reply handler with message self->reply_handlers.front()(*self, self->message); self->reply_handlers.pop_front(); } else { LogPrint(BCLog::TOR, "tor: Received unexpected sync reply %i\n", self->message.code); } } self->message.Clear(); } } // Check for size of buffer - protect against memory exhaustion with very // long lines. Do this after evbuffer_readln to make sure all full lines // have been removed from the buffer. Everything left is an incomplete line. if (evbuffer_get_length(input) > MAX_LINE_LENGTH) { LogPrintf("tor: Disconnecting because MAX_LINE_LENGTH exceeded\n"); self->Disconnect(); } } void TorControlConnection::eventcb(struct bufferevent *bev, short what, void *ctx) { TorControlConnection *self = static_cast(ctx); if (what & BEV_EVENT_CONNECTED) { LogPrint(BCLog::TOR, "tor: Successfully connected!\n"); self->connected(*self); } else if (what & (BEV_EVENT_EOF | BEV_EVENT_ERROR)) { if (what & BEV_EVENT_ERROR) { LogPrint(BCLog::TOR, "tor: Error connecting to Tor control socket\n"); } else { LogPrint(BCLog::TOR, "tor: End of stream\n"); } self->Disconnect(); self->disconnected(*self); } } bool TorControlConnection::Connect(const std::string &target, const ConnectionCB &_connected, const ConnectionCB &_disconnected) { if (b_conn) { Disconnect(); } // Parse target address:port struct sockaddr_storage connect_to_addr; int connect_to_addrlen = sizeof(connect_to_addr); if (evutil_parse_sockaddr_port(target.c_str(), (struct sockaddr *)&connect_to_addr, &connect_to_addrlen) < 0) { LogPrintf("tor: Error parsing socket address %s\n", target); return false; } // Create a new socket, set up callbacks and enable notification bits b_conn = bufferevent_socket_new(base, -1, BEV_OPT_CLOSE_ON_FREE); if (!b_conn) { return false; } bufferevent_setcb(b_conn, TorControlConnection::readcb, nullptr, TorControlConnection::eventcb, this); bufferevent_enable(b_conn, EV_READ | EV_WRITE); this->connected = _connected; this->disconnected = _disconnected; // Finally, connect to target if (bufferevent_socket_connect(b_conn, (struct sockaddr *)&connect_to_addr, connect_to_addrlen) < 0) { LogPrintf("tor: Error connecting to address %s\n", target); return false; } return true; } bool TorControlConnection::Disconnect() { if (b_conn) { bufferevent_free(b_conn); } b_conn = nullptr; return true; } bool TorControlConnection::Command(const std::string &cmd, const ReplyHandlerCB &reply_handler) { if (!b_conn) { return false; } struct evbuffer *buf = bufferevent_get_output(b_conn); if (!buf) { return false; } evbuffer_add(buf, cmd.data(), cmd.size()); evbuffer_add(buf, "\r\n", 2); reply_handlers.push_back(reply_handler); return true; } /****** General parsing utilities ********/ /* Split reply line in the form 'AUTH METHODS=...' into a type * 'AUTH' and arguments 'METHODS=...'. */ static std::pair SplitTorReplyLine(const std::string &s) { size_t ptr = 0; std::string type; while (ptr < s.size() && s[ptr] != ' ') { type.push_back(s[ptr]); ++ptr; } if (ptr < s.size()) { // skip ' ' ++ptr; } return make_pair(type, s.substr(ptr)); } /** * Parse reply arguments in the form 'METHODS=COOKIE,SAFECOOKIE * COOKIEFILE=".../control_auth_cookie"'. */ static std::map ParseTorReplyMapping(const std::string &s) { std::map mapping; size_t ptr = 0; while (ptr < s.size()) { std::string key, value; while (ptr < s.size() && s[ptr] != '=') { key.push_back(s[ptr]); ++ptr; } // unexpected end of line if (ptr == s.size()) { return std::map(); } // skip '=' ++ptr; // Quoted string if (ptr < s.size() && s[ptr] == '"') { // skip '=' ++ptr; bool escape_next = false; while (ptr < s.size() && (!escape_next && s[ptr] != '"')) { escape_next = (s[ptr] == '\\'); value.push_back(s[ptr]); ++ptr; } // unexpected end of line if (ptr == s.size()) { return std::map(); } // skip closing '"' ++ptr; /* TODO: unescape value - according to the spec this depends on the * context, some strings use C-LogPrintf style escape codes, some * don't. So may be better handled at the call site. */ } else { // Unquoted value. Note that values can contain '=' at will, just no // spaces while (ptr < s.size() && s[ptr] != ' ') { value.push_back(s[ptr]); ++ptr; } } if (ptr < s.size() && s[ptr] == ' ') { // skip ' ' after key=value ++ptr; } mapping[key] = value; } return mapping; } /** * Read full contents of a file and return them in a std::string. * Returns a pair . * If an error occurred, status will be false, otherwise status will be true and * the data will be returned in string. * * @param maxsize Puts a maximum size limit on the file that is read. If the * file is larger than this, truncated data * (with len > maxsize) will be returned. */ static std::pair ReadBinaryFile(const fs::path &filename, size_t maxsize = std::numeric_limits::max()) { FILE *f = fsbridge::fopen(filename, "rb"); if (f == nullptr) { return std::make_pair(false, ""); } std::string retval; char buffer[128]; size_t n; while ((n = fread(buffer, 1, sizeof(buffer), f)) > 0) { retval.append(buffer, buffer + n); if (retval.size() > maxsize) { break; } } fclose(f); return std::make_pair(true, retval); } /** * Write contents of std::string to a file. * @return true on success. */ static bool WriteBinaryFile(const fs::path &filename, const std::string &data) { FILE *f = fsbridge::fopen(filename, "wb"); if (f == nullptr) { return false; } if (fwrite(data.data(), 1, data.size(), f) != data.size()) { fclose(f); return false; } fclose(f); return true; } /****** Bitcoin specific TorController implementation ********/ /** * Controller that connects to Tor control socket, authenticate, then create * and maintain a ephemeral hidden service. */ class TorController { public: TorController(struct event_base *base, const std::string &target); ~TorController(); /** Get name fo file to store private key in */ fs::path GetPrivateKeyFile(); /** Reconnect, after getting disconnected */ void Reconnect(); private: struct event_base *base; std::string target; TorControlConnection conn; std::string private_key; std::string service_id; bool reconnect; struct event *reconnect_ev; float reconnect_timeout; CService service; /** Cookie for SAFECOOKIE auth */ std::vector cookie; /** ClientNonce for SAFECOOKIE auth */ std::vector clientNonce; /** Callback for ADD_ONION result */ void add_onion_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback for AUTHENTICATE result */ void auth_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback for AUTHCHALLENGE result */ void authchallenge_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback for PROTOCOLINFO result */ void protocolinfo_cb(TorControlConnection &conn, const TorControlReply &reply); /** Callback after successful connection */ void connected_cb(TorControlConnection &conn); /** Callback after connection lost or failed connection attempt */ void disconnected_cb(TorControlConnection &conn); /** Callback for reconnect timer */ static void reconnect_cb(evutil_socket_t fd, short what, void *arg); }; TorController::TorController(struct event_base *_base, const std::string &_target) : base(_base), target(_target), conn(base), reconnect(true), reconnect_ev(0), reconnect_timeout(RECONNECT_TIMEOUT_START) { reconnect_ev = event_new(base, -1, 0, reconnect_cb, this); if (!reconnect_ev) { LogPrintf( "tor: Failed to create event for reconnection: out of memory?\n"); } // Start connection attempts immediately if (!conn.Connect(_target, boost::bind(&TorController::connected_cb, this, _1), boost::bind(&TorController::disconnected_cb, this, _1))) { LogPrintf("tor: Initiating connection to Tor control port %s failed\n", _target); } // Read service private key if cached std::pair pkf = ReadBinaryFile(GetPrivateKeyFile()); if (pkf.first) { LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", GetPrivateKeyFile()); private_key = pkf.second; } } TorController::~TorController() { if (reconnect_ev) { event_free(reconnect_ev); reconnect_ev = nullptr; } if (service.IsValid()) { RemoveLocal(service); } } void TorController::add_onion_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { LogPrint(BCLog::TOR, "tor: ADD_ONION successful\n"); for (const std::string &s : reply.lines) { std::map m = ParseTorReplyMapping(s); std::map::iterator i; if ((i = m.find("ServiceID")) != m.end()) { service_id = i->second; } if ((i = m.find("PrivateKey")) != m.end()) { private_key = i->second; } } service = LookupNumeric(std::string(service_id + ".onion").c_str(), GetListenPort()); LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString()); if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) { LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", GetPrivateKeyFile()); } else { LogPrintf("tor: Error writing service private key to %s\n", GetPrivateKeyFile()); } AddLocal(service, LOCAL_MANUAL); // ... onion requested - keep connection open } else if (reply.code == 510) { // 510 Unrecognized command LogPrintf("tor: Add onion failed with unrecognized command (You " "probably need to upgrade Tor)\n"); } else { LogPrintf("tor: Add onion failed; error code %d\n", reply.code); } } void TorController::auth_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { LogPrint(BCLog::TOR, "tor: Authentication successful\n"); // Now that we know Tor is running setup the proxy for onion addresses // if -onion isn't set to something else. if (gArgs.GetArg("-onion", "") == "") { CService resolved(LookupNumeric("127.0.0.1", 9050)); proxyType addrOnion = proxyType(resolved, true); SetProxy(NET_ONION, addrOnion); SetLimited(NET_ONION, false); } // Finally - now create the service // No private key, generate one if (private_key.empty()) { // Explicitly request RSA1024 - see issue #9214 private_key = "NEW:RSA1024"; } // Request hidden service, redirect port. // Note that the 'virtual' port doesn't have to be the same as our // internal port, but this is just a convenient choice. TODO; refactor // the shutdown sequence some day. _conn.Command(strprintf("ADD_ONION %s Port=%i,127.0.0.1:%i", private_key, GetListenPort(), GetListenPort()), boost::bind(&TorController::add_onion_cb, this, _1, _2)); } else { LogPrintf("tor: Authentication failed\n"); } } /** Compute Tor SAFECOOKIE response. * * ServerHash is computed as: * HMAC-SHA256("Tor safe cookie authentication server-to-controller hash", * CookieString | ClientNonce | ServerNonce) * (with the HMAC key as its first argument) * * After a controller sends a successful AUTHCHALLENGE command, the * next command sent on the connection must be an AUTHENTICATE command, * and the only authentication string which that AUTHENTICATE command * will accept is: * * HMAC-SHA256("Tor safe cookie authentication controller-to-server hash", * CookieString | ClientNonce | ServerNonce) * */ static std::vector ComputeResponse(const std::string &key, const std::vector &cookie, const std::vector &clientNonce, const std::vector &serverNonce) { CHMAC_SHA256 computeHash((const uint8_t *)key.data(), key.size()); std::vector computedHash(CHMAC_SHA256::OUTPUT_SIZE, 0); computeHash.Write(cookie.data(), cookie.size()); computeHash.Write(clientNonce.data(), clientNonce.size()); computeHash.Write(serverNonce.data(), serverNonce.size()); computeHash.Finalize(computedHash.data()); return computedHash; } void TorController::authchallenge_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { LogPrint(BCLog::TOR, "tor: SAFECOOKIE authentication challenge successful\n"); std::pair l = SplitTorReplyLine(reply.lines[0]); if (l.first == "AUTHCHALLENGE") { std::map m = ParseTorReplyMapping(l.second); std::vector serverHash = ParseHex(m["SERVERHASH"]); std::vector serverNonce = ParseHex(m["SERVERNONCE"]); LogPrint(BCLog::TOR, "tor: AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce)); if (serverNonce.size() != 32) { LogPrintf( "tor: ServerNonce is not 32 bytes, as required by spec\n"); return; } std::vector computedServerHash = ComputeResponse( TOR_SAFE_SERVERKEY, cookie, clientNonce, serverNonce); if (computedServerHash != serverHash) { LogPrintf("tor: ServerHash %s does not match expected " "ServerHash %s\n", HexStr(serverHash), HexStr(computedServerHash)); return; } std::vector computedClientHash = ComputeResponse( TOR_SAFE_CLIENTKEY, cookie, clientNonce, serverNonce); _conn.Command("AUTHENTICATE " + HexStr(computedClientHash), boost::bind(&TorController::auth_cb, this, _1, _2)); } else { LogPrintf("tor: Invalid reply to AUTHCHALLENGE\n"); } } else { LogPrintf("tor: SAFECOOKIE authentication challenge failed\n"); } } void TorController::protocolinfo_cb(TorControlConnection &_conn, const TorControlReply &reply) { if (reply.code == 250) { std::set methods; std::string cookiefile; /* * 250-AUTH METHODS=COOKIE,SAFECOOKIE * COOKIEFILE="/home/x/.tor/control_auth_cookie" * 250-AUTH METHODS=NULL * 250-AUTH METHODS=HASHEDPASSWORD */ for (const std::string &s : reply.lines) { std::pair l = SplitTorReplyLine(s); if (l.first == "AUTH") { std::map m = ParseTorReplyMapping(l.second); std::map::iterator i; if ((i = m.find("METHODS")) != m.end()) { boost::split(methods, i->second, boost::is_any_of(",")); } if ((i = m.find("COOKIEFILE")) != m.end()) { cookiefile = i->second; } } else if (l.first == "VERSION") { std::map m = ParseTorReplyMapping(l.second); std::map::iterator i; if ((i = m.find("Tor")) != m.end()) { LogPrint(BCLog::TOR, "tor: Connected to Tor version %s\n", i->second); } } } for (const std::string &s : methods) { LogPrint(BCLog::TOR, "tor: Supported authentication method: %s\n", s); } // Prefer NULL, otherwise SAFECOOKIE. If a password is provided, use // HASHEDPASSWORD /* Authentication: * cookie: hex-encoded ~/.tor/control_auth_cookie * password: "password" */ std::string torpassword = gArgs.GetArg("-torpassword", ""); if (!torpassword.empty()) { if (methods.count("HASHEDPASSWORD")) { LogPrint(BCLog::TOR, "tor: Using HASHEDPASSWORD authentication\n"); boost::replace_all(torpassword, "\"", "\\\""); _conn.Command( "AUTHENTICATE \"" + torpassword + "\"", boost::bind(&TorController::auth_cb, this, _1, _2)); } else { LogPrintf("tor: Password provided with -torpassword, but " "HASHEDPASSWORD authentication is not available\n"); } } else if (methods.count("NULL")) { LogPrint(BCLog::TOR, "tor: Using NULL authentication\n"); _conn.Command("AUTHENTICATE", boost::bind(&TorController::auth_cb, this, _1, _2)); } else if (methods.count("SAFECOOKIE")) { // Cookie: hexdump -e '32/1 "%02x""\n"' ~/.tor/control_auth_cookie LogPrint(BCLog::TOR, "tor: Using SAFECOOKIE authentication, " "reading cookie authentication from %s\n", cookiefile); std::pair status_cookie = ReadBinaryFile(cookiefile, TOR_COOKIE_SIZE); if (status_cookie.first && status_cookie.second.size() == TOR_COOKIE_SIZE) { // _conn.Command("AUTHENTICATE " + HexStr(status_cookie.second), // boost::bind(&TorController::auth_cb, this, _1, _2)); cookie = std::vector(status_cookie.second.begin(), status_cookie.second.end()); clientNonce = std::vector(TOR_NONCE_SIZE, 0); GetRandBytes(&clientNonce[0], TOR_NONCE_SIZE); _conn.Command("AUTHCHALLENGE SAFECOOKIE " + HexStr(clientNonce), boost::bind(&TorController::authchallenge_cb, this, _1, _2)); } else { if (status_cookie.first) { LogPrintf("tor: Authentication cookie %s is not exactly %i " "bytes, as is required by the spec\n", cookiefile, TOR_COOKIE_SIZE); } else { LogPrintf("tor: Authentication cookie %s could not be " "opened (check permissions)\n", cookiefile); } } } else if (methods.count("HASHEDPASSWORD")) { LogPrintf("tor: The only supported authentication mechanism left " "is password, but no password provided with " "-torpassword\n"); } else { LogPrintf("tor: No supported authentication method\n"); } } else { LogPrintf("tor: Requesting protocol info failed\n"); } } void TorController::connected_cb(TorControlConnection &_conn) { reconnect_timeout = RECONNECT_TIMEOUT_START; // First send a PROTOCOLINFO command to figure out what authentication is // expected if (!_conn.Command( "PROTOCOLINFO 1", boost::bind(&TorController::protocolinfo_cb, this, _1, _2))) { LogPrintf("tor: Error sending initial protocolinfo command\n"); } } void TorController::disconnected_cb(TorControlConnection &_conn) { // Stop advertising service when disconnected if (service.IsValid()) { RemoveLocal(service); } service = CService(); if (!reconnect) { return; } LogPrint(BCLog::TOR, "tor: Not connected to Tor control port %s, trying to reconnect\n", target); // Single-shot timer for reconnect. Use exponential backoff. struct timeval time = MillisToTimeval(int64_t(reconnect_timeout * 1000.0)); if (reconnect_ev) { event_add(reconnect_ev, &time); } reconnect_timeout *= RECONNECT_TIMEOUT_EXP; } void TorController::Reconnect() { /* Try to reconnect and reestablish if we get booted - for example, Tor may * be restarting. */ if (!conn.Connect(target, boost::bind(&TorController::connected_cb, this, _1), boost::bind(&TorController::disconnected_cb, this, _1))) { LogPrintf( "tor: Re-initiating connection to Tor control port %s failed\n", target); } } fs::path TorController::GetPrivateKeyFile() { return GetDataDir() / "onion_private_key"; } void TorController::reconnect_cb(evutil_socket_t fd, short what, void *arg) { TorController *self = static_cast(arg); self->Reconnect(); } /****** Thread ********/ static struct event_base *gBase; static std::thread torControlThread; static void TorControlThread() { TorController ctrl(gBase, gArgs.GetArg("-torcontrol", DEFAULT_TOR_CONTROL)); event_base_dispatch(gBase); } void StartTorControl() { assert(!gBase); #ifdef WIN32 evthread_use_windows_threads(); #else evthread_use_pthreads(); #endif gBase = event_base_new(); if (!gBase) { LogPrintf("tor: Unable to create event_base\n"); return; } torControlThread = std::thread( std::bind(&TraceThread, "torcontrol", &TorControlThread)); } void InterruptTorControl() { if (gBase) { LogPrintf("tor: Thread interrupt\n"); event_base_loopbreak(gBase); } } void StopTorControl() { if (gBase) { torControlThread.join(); event_base_free(gBase); gBase = nullptr; } } diff --git a/src/torcontrol.h b/src/torcontrol.h index a47ab22d1..f4bfa24c0 100644 --- a/src/torcontrol.h +++ b/src/torcontrol.h @@ -1,20 +1,20 @@ // Copyright (c) 2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. /** * Functionality for communicating with Tor. */ #ifndef BITCOIN_TORCONTROL_H #define BITCOIN_TORCONTROL_H -#include "scheduler.h" +#include extern const std::string DEFAULT_TOR_CONTROL; static const bool DEFAULT_LISTEN_ONION = true; void StartTorControl(); void InterruptTorControl(); void StopTorControl(); #endif /* BITCOIN_TORCONTROL_H */ diff --git a/src/txdb.cpp b/src/txdb.cpp index e672e2a18..902622955 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -1,456 +1,456 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "txdb.h" - -#include "chainparams.h" -#include "hash.h" -#include "init.h" -#include "pow.h" -#include "random.h" -#include "ui_interface.h" -#include "uint256.h" -#include "util.h" +#include + +#include +#include +#include +#include +#include +#include +#include +#include #include #include static const char DB_COIN = 'C'; static const char DB_COINS = 'c'; static const char DB_BLOCK_FILES = 'f'; static const char DB_TXINDEX = 't'; static const char DB_BLOCK_INDEX = 'b'; static const char DB_BEST_BLOCK = 'B'; static const char DB_HEAD_BLOCKS = 'H'; static const char DB_FLAG = 'F'; static const char DB_REINDEX_FLAG = 'R'; static const char DB_LAST_BLOCK = 'l'; namespace { struct CoinEntry { COutPoint *outpoint; char key; explicit CoinEntry(const COutPoint *ptr) : outpoint(const_cast(ptr)), key(DB_COIN) {} template void Serialize(Stream &s) const { s << key; s << outpoint->GetTxId(); s << VARINT(outpoint->GetN()); } template void Unserialize(Stream &s) { s >> key; uint256 id; s >> id; uint32_t n = 0; s >> VARINT(n); *outpoint = COutPoint(id, n); } }; } // namespace CCoinsViewDB::CCoinsViewDB(size_t nCacheSize, bool fMemory, bool fWipe) : db(GetDataDir() / "chainstate", nCacheSize, fMemory, fWipe, true) {} bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const { return db.Read(CoinEntry(&outpoint), coin); } bool CCoinsViewDB::HaveCoin(const COutPoint &outpoint) const { return db.Exists(CoinEntry(&outpoint)); } uint256 CCoinsViewDB::GetBestBlock() const { uint256 hashBestChain; if (!db.Read(DB_BEST_BLOCK, hashBestChain)) return uint256(); return hashBestChain; } std::vector CCoinsViewDB::GetHeadBlocks() const { std::vector vhashHeadBlocks; if (!db.Read(DB_HEAD_BLOCKS, vhashHeadBlocks)) { return std::vector(); } return vhashHeadBlocks; } bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { CDBBatch batch(db); size_t count = 0; size_t changed = 0; size_t batch_size = (size_t)gArgs.GetArg("-dbbatchsize", nDefaultDbBatchSize); int crash_simulate = gArgs.GetArg("-dbcrashratio", 0); assert(!hashBlock.IsNull()); uint256 old_tip = GetBestBlock(); if (old_tip.IsNull()) { // We may be in the middle of replaying. std::vector old_heads = GetHeadBlocks(); if (old_heads.size() == 2) { assert(old_heads[0] == hashBlock); old_tip = old_heads[1]; } } // In the first batch, mark the database as being in the middle of a // transition from old_tip to hashBlock. // A vector is used for future extensibility, as we may want to support // interrupting after partial writes from multiple independent reorgs. batch.Erase(DB_BEST_BLOCK); batch.Write(DB_HEAD_BLOCKS, std::vector{hashBlock, old_tip}); for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { CoinEntry entry(&it->first); if (it->second.coin.IsSpent()) { batch.Erase(entry); } else { batch.Write(entry, it->second.coin); } changed++; } count++; CCoinsMap::iterator itOld = it++; mapCoins.erase(itOld); if (batch.SizeEstimate() > batch_size) { LogPrint(BCLog::COINDB, "Writing partial batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); db.WriteBatch(batch); batch.Clear(); if (crash_simulate) { static FastRandomContext rng; if (rng.randrange(crash_simulate) == 0) { LogPrintf("Simulating a crash. Goodbye.\n"); _Exit(0); } } } } // In the last batch, mark the database as consistent with hashBlock again. batch.Erase(DB_HEAD_BLOCKS); batch.Write(DB_BEST_BLOCK, hashBlock); LogPrint(BCLog::COINDB, "Writing final batch of %.2f MiB\n", batch.SizeEstimate() * (1.0 / 1048576.0)); bool ret = db.WriteBatch(batch); LogPrint(BCLog::COINDB, "Committed %u changed transaction outputs (out of " "%u) to coin database...\n", (unsigned int)changed, (unsigned int)count); return ret; } size_t CCoinsViewDB::EstimateSize() const { return db.EstimateSize(DB_COIN, char(DB_COIN + 1)); } CBlockTreeDB::CBlockTreeDB(size_t nCacheSize, bool fMemory, bool fWipe) : CDBWrapper(GetDataDir() / "blocks" / "index", nCacheSize, fMemory, fWipe) {} bool CBlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo &info) { return Read(std::make_pair(DB_BLOCK_FILES, nFile), info); } bool CBlockTreeDB::WriteReindexing(bool fReindexing) { if (fReindexing) return Write(DB_REINDEX_FLAG, '1'); else return Erase(DB_REINDEX_FLAG); } bool CBlockTreeDB::ReadReindexing(bool &fReindexing) { fReindexing = Exists(DB_REINDEX_FLAG); return true; } bool CBlockTreeDB::ReadLastBlockFile(int &nFile) { return Read(DB_LAST_BLOCK, nFile); } CCoinsViewCursor *CCoinsViewDB::Cursor() const { CCoinsViewDBCursor *i = new CCoinsViewDBCursor( const_cast(db).NewIterator(), GetBestBlock()); /** * It seems that there are no "const iterators" for LevelDB. Since we only * need read operations on it, use a const-cast to get around that * restriction. */ i->pcursor->Seek(DB_COIN); // Cache key of first record if (i->pcursor->Valid()) { CoinEntry entry(&i->keyTmp.second); i->pcursor->GetKey(entry); i->keyTmp.first = entry.key; } else { // Make sure Valid() and GetKey() return false i->keyTmp.first = 0; } return i; } bool CCoinsViewDBCursor::GetKey(COutPoint &key) const { // Return cached key if (keyTmp.first == DB_COIN) { key = keyTmp.second; return true; } return false; } bool CCoinsViewDBCursor::GetValue(Coin &coin) const { return pcursor->GetValue(coin); } unsigned int CCoinsViewDBCursor::GetValueSize() const { return pcursor->GetValueSize(); } bool CCoinsViewDBCursor::Valid() const { return keyTmp.first == DB_COIN; } void CCoinsViewDBCursor::Next() { pcursor->Next(); CoinEntry entry(&keyTmp.second); if (!pcursor->Valid() || !pcursor->GetKey(entry)) { // Invalidate cached key after last record so that Valid() and GetKey() // return false keyTmp.first = 0; } else { keyTmp.first = entry.key; } } bool CBlockTreeDB::WriteBatchSync( const std::vector> &fileInfo, int nLastFile, const std::vector &blockinfo) { CDBBatch batch(*this); for (std::vector>::const_iterator it = fileInfo.begin(); it != fileInfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_FILES, it->first), *it->second); } batch.Write(DB_LAST_BLOCK, nLastFile); for (std::vector::const_iterator it = blockinfo.begin(); it != blockinfo.end(); it++) { batch.Write(std::make_pair(DB_BLOCK_INDEX, (*it)->GetBlockHash()), CDiskBlockIndex(*it)); } return WriteBatch(batch, true); } bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) { return Read(std::make_pair(DB_TXINDEX, txid), pos); } bool CBlockTreeDB::WriteTxIndex( const std::vector> &vect) { CDBBatch batch(*this); for (std::vector>::const_iterator it = vect.begin(); it != vect.end(); it++) batch.Write(std::make_pair(DB_TXINDEX, it->first), it->second); return WriteBatch(batch); } bool CBlockTreeDB::WriteFlag(const std::string &name, bool fValue) { return Write(std::make_pair(DB_FLAG, name), fValue ? '1' : '0'); } bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) { char ch; if (!Read(std::make_pair(DB_FLAG, name), ch)) return false; fValue = ch == '1'; return true; } bool CBlockTreeDB::LoadBlockIndexGuts( const Config &config, std::function insertBlockIndex) { std::unique_ptr pcursor(NewIterator()); pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); // Load mapBlockIndex while (pcursor->Valid()) { boost::this_thread::interruption_point(); std::pair key; if (!pcursor->GetKey(key) || key.first != DB_BLOCK_INDEX) { break; } CDiskBlockIndex diskindex; if (!pcursor->GetValue(diskindex)) { return error("LoadBlockIndex() : failed to read value"); } // Construct block index object CBlockIndex *pindexNew = insertBlockIndex(diskindex.GetBlockHash()); pindexNew->pprev = insertBlockIndex(diskindex.hashPrev); pindexNew->nHeight = diskindex.nHeight; pindexNew->nFile = diskindex.nFile; pindexNew->nDataPos = diskindex.nDataPos; pindexNew->nUndoPos = diskindex.nUndoPos; pindexNew->nVersion = diskindex.nVersion; pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot; pindexNew->nTime = diskindex.nTime; pindexNew->nBits = diskindex.nBits; pindexNew->nNonce = diskindex.nNonce; pindexNew->nStatus = diskindex.nStatus; pindexNew->nTx = diskindex.nTx; if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, config)) { return error("LoadBlockIndex(): CheckProofOfWork failed: %s", pindexNew->ToString()); } pcursor->Next(); } return true; } namespace { //! Legacy class to deserialize pre-pertxout database entries without reindex. class CCoins { public: //! whether transaction is a coinbase bool fCoinBase; //! unspent transaction outputs; spent outputs are .IsNull(); spent outputs //! at the end of the array are dropped std::vector vout; //! at which height this transaction was included in the active block chain int nHeight; //! empty constructor CCoins() : fCoinBase(false), vout(0), nHeight(0) {} template void Unserialize(Stream &s) { uint32_t nCode = 0; // version int nVersionDummy; ::Unserialize(s, VARINT(nVersionDummy)); // header code ::Unserialize(s, VARINT(nCode)); fCoinBase = nCode & 1; std::vector vAvail(2, false); vAvail[0] = (nCode & 2) != 0; vAvail[1] = (nCode & 4) != 0; uint32_t nMaskCode = (nCode / 8) + ((nCode & 6) != 0 ? 0 : 1); // spentness bitmask while (nMaskCode > 0) { uint8_t chAvail = 0; ::Unserialize(s, chAvail); for (unsigned int p = 0; p < 8; p++) { bool f = (chAvail & (1 << p)) != 0; vAvail.push_back(f); } if (chAvail != 0) { nMaskCode--; } } // txouts themself vout.assign(vAvail.size(), CTxOut()); for (size_t i = 0; i < vAvail.size(); i++) { if (vAvail[i]) { ::Unserialize(s, REF(CTxOutCompressor(vout[i]))); } } // coinbase height ::Unserialize(s, VARINT(nHeight)); } }; } // namespace /** * Upgrade the database from older formats. * * Currently implemented: from the per-tx utxo model (0.8..0.14.x) to per-txout. */ bool CCoinsViewDB::Upgrade() { std::unique_ptr pcursor(db.NewIterator()); pcursor->Seek(std::make_pair(DB_COINS, uint256())); if (!pcursor->Valid()) { return true; } int64_t count = 0; LogPrintf("Upgrading utxo-set database...\n"); LogPrintf("[0%%]..."); uiInterface.ShowProgress(_("Upgrading UTXO database"), 0, true); size_t batch_size = 1 << 24; CDBBatch batch(db); int reportDone = 0; std::pair key; std::pair prev_key = {DB_COINS, uint256()}; while (pcursor->Valid()) { boost::this_thread::interruption_point(); if (ShutdownRequested()) { break; } if (!pcursor->GetKey(key) || key.first != DB_COINS) { break; } if (count++ % 256 == 0) { uint32_t high = 0x100 * *key.second.begin() + *(key.second.begin() + 1); int percentageDone = (int)(high * 100.0 / 65536.0 + 0.5); uiInterface.ShowProgress(_("Upgrading UTXO database"), percentageDone, true); if (reportDone < percentageDone / 10) { // report max. every 10% step LogPrintf("[%d%%]...", percentageDone); reportDone = percentageDone / 10; } } CCoins old_coins; if (!pcursor->GetValue(old_coins)) { return error("%s: cannot parse CCoins record", __func__); } TxId id(key.second); for (size_t i = 0; i < old_coins.vout.size(); ++i) { if (!old_coins.vout[i].IsNull() && !old_coins.vout[i].scriptPubKey.IsUnspendable()) { Coin newcoin(std::move(old_coins.vout[i]), old_coins.nHeight, old_coins.fCoinBase); COutPoint outpoint(id, i); CoinEntry entry(&outpoint); batch.Write(entry, newcoin); } } batch.Erase(key); if (batch.SizeEstimate() > batch_size) { db.WriteBatch(batch); batch.Clear(); db.CompactRange(prev_key, key); prev_key = key; } pcursor->Next(); } db.WriteBatch(batch); db.CompactRange({DB_COINS, uint256()}, key); uiInterface.ShowProgress("", 100, false); LogPrintf("[%s].\n", ShutdownRequested() ? "CANCELLED" : "DONE"); return !ShutdownRequested(); } diff --git a/src/txdb.h b/src/txdb.h index 94a12b368..d71a4b14a 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -1,137 +1,137 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_TXDB_H #define BITCOIN_TXDB_H -#include "blockfileinfo.h" -#include "chain.h" -#include "coins.h" -#include "dbwrapper.h" -#include "diskblockpos.h" +#include +#include +#include +#include +#include #include #include #include #include class CBlockIndex; class CCoinsViewDBCursor; class uint256; class Config; //! No need to periodic flush if at least this much space still available. static constexpr int MAX_BLOCK_COINSDB_USAGE = 10; //! -dbcache default (MiB) static const int64_t nDefaultDbCache = 450; //! -dbbatchsize default (bytes) static const int64_t nDefaultDbBatchSize = 16 << 20; //! max. -dbcache (MiB) static const int64_t nMaxDbCache = sizeof(void *) > 4 ? 16384 : 1024; //! min. -dbcache (MiB) static const int64_t nMinDbCache = 4; //! Max memory allocated to block tree DB specific cache, if no -txindex (MiB) static const int64_t nMaxBlockDBCache = 2; //! Max memory allocated to block tree DB specific cache, if -txindex (MiB) // Unlike for the UTXO database, for the txindex scenario the leveldb cache make // a meaningful difference: // https://github.com/bitcoin/bitcoin/pull/8273#issuecomment-229601991 static const int64_t nMaxBlockDBAndTxIndexCache = 1024; //! Max memory allocated to coin DB specific cache (MiB) static const int64_t nMaxCoinsDBCache = 8; struct CDiskTxPos : public CDiskBlockPos { unsigned int nTxOffset; // after header ADD_SERIALIZE_METHODS; template inline void SerializationOp(Stream &s, Operation ser_action) { READWRITE(*static_cast(this)); READWRITE(VARINT(nTxOffset)); } CDiskTxPos(const CDiskBlockPos &blockIn, unsigned int nTxOffsetIn) : CDiskBlockPos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {} CDiskTxPos() { SetNull(); } void SetNull() { CDiskBlockPos::SetNull(); nTxOffset = 0; } }; /** CCoinsView backed by the coin database (chainstate/) */ class CCoinsViewDB final : public CCoinsView { protected: CDBWrapper db; public: explicit CCoinsViewDB(size_t nCacheSize, bool fMemory = false, bool fWipe = false); bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; bool HaveCoin(const COutPoint &outpoint) const override; uint256 GetBestBlock() const override; std::vector GetHeadBlocks() const override; bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; CCoinsViewCursor *Cursor() const override; //! Attempt to update from an older database format. //! Returns whether an error occurred. bool Upgrade(); size_t EstimateSize() const override; }; /** Specialization of CCoinsViewCursor to iterate over a CCoinsViewDB */ class CCoinsViewDBCursor : public CCoinsViewCursor { public: ~CCoinsViewDBCursor() {} bool GetKey(COutPoint &key) const override; bool GetValue(Coin &coin) const override; unsigned int GetValueSize() const override; bool Valid() const override; void Next() override; private: CCoinsViewDBCursor(CDBIterator *pcursorIn, const uint256 &hashBlockIn) : CCoinsViewCursor(hashBlockIn), pcursor(pcursorIn) {} std::unique_ptr pcursor; std::pair keyTmp; friend class CCoinsViewDB; }; /** Access to the block database (blocks/index/) */ class CBlockTreeDB : public CDBWrapper { public: explicit CBlockTreeDB(size_t nCacheSize, bool fMemory = false, bool fWipe = false); private: CBlockTreeDB(const CBlockTreeDB &); void operator=(const CBlockTreeDB &); public: bool WriteBatchSync( const std::vector> &fileInfo, int nLastFile, const std::vector &blockinfo); bool ReadBlockFileInfo(int nFile, CBlockFileInfo &info); bool ReadLastBlockFile(int &nFile); bool WriteReindexing(bool fReindexing); bool ReadReindexing(bool &fReindexing); bool ReadTxIndex(const uint256 &txid, CDiskTxPos &pos); bool WriteTxIndex(const std::vector> &vect); bool WriteFlag(const std::string &name, bool fValue); bool ReadFlag(const std::string &name, bool &fValue); bool LoadBlockIndexGuts( const Config &config, std::function insertBlockIndex); }; #endif // BITCOIN_TXDB_H