diff --git a/src/cuckoocache.h b/src/cuckoocache.h index 9bdf1d194..8ce1800dc 100644 --- a/src/cuckoocache.h +++ b/src/cuckoocache.h @@ -1,576 +1,577 @@ // Copyright (c) 2016 Jeremy Rubin // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_CUCKOOCACHE_H #define BITCOIN_CUCKOOCACHE_H -#include +#include // std::find #include #include #include #include #include +#include #include /** * High-performance cache primitives. * * Summary: * * 1. @ref bit_packed_atomic_flags is bit-packed atomic flags for garbage * collection * * 2. @ref cache is a cache which is performant in memory usage and lookup * speed. It is lockfree for erase operations. Elements are lazily erased on the * next insert. */ namespace CuckooCache { /** * @ref bit_packed_atomic_flags implements a container for garbage collection * flags that is only thread unsafe on calls to setup. This class bit-packs * collection flags for memory efficiency. * * All operations are `std::memory_order_relaxed` so external mechanisms must * ensure that writes and reads are properly synchronized. * * On setup(n), all bits up to `n` are marked as collected. * * Under the hood, because it is an 8-bit type, it makes sense to use a multiple * of 8 for setup, but it will be safe if that is not the case as well. */ class bit_packed_atomic_flags { std::unique_ptr[]> mem; public: /** No default constructor, as there must be some size. */ bit_packed_atomic_flags() = delete; /** * bit_packed_atomic_flags constructor creates memory to sufficiently * keep track of garbage collection information for `size` entries. * * @param size the number of elements to allocate space for * * @post bit_set, bit_unset, and bit_is_set function properly forall x. x < * size * @post All calls to bit_is_set (without subsequent bit_unset) will return * true. */ explicit bit_packed_atomic_flags(uint32_t size) { // pad out the size if needed size = (size + 7) / 8; mem.reset(new std::atomic[size]); for (uint32_t i = 0; i < size; ++i) { mem[i].store(0xFF); } }; /** * setup marks all entries and ensures that bit_packed_atomic_flags can * store at least `b` entries. * * @param b the number of elements to allocate space for * @post bit_set, bit_unset, and bit_is_set function properly forall x. x < * b * @post All calls to bit_is_set (without subsequent bit_unset) will return * true. */ inline void setup(uint32_t b) { bit_packed_atomic_flags d(b); std::swap(mem, d.mem); } /** * bit_set sets an entry as discardable. * * @param s the index of the entry to bit_set * @post immediately subsequent call (assuming proper external memory * ordering) to bit_is_set(s) == true. */ inline void bit_set(uint32_t s) { mem[s >> 3].fetch_or(1 << (s & 7), std::memory_order_relaxed); } /** * bit_unset marks an entry as something that should not be overwritten. * * @param s the index of the entry to bit_unset * @post immediately subsequent call (assuming proper external memory * ordering) to bit_is_set(s) == false. */ inline void bit_unset(uint32_t s) { mem[s >> 3].fetch_and(~(1 << (s & 7)), std::memory_order_relaxed); } /** * bit_is_set queries the table for discardability at `s`. * * @param s the index of the entry to read * @returns true if the bit at index `s` was set, false otherwise * */ inline bool bit_is_set(uint32_t s) const { return (1 << (s & 7)) & mem[s >> 3].load(std::memory_order_relaxed); } }; /** * @ref cache implements a cache with properties similar to a cuckoo-set. * * The cache is able to hold up to `(~(uint32_t)0) - 1` elements. * * Read Operations: * - contains() for `erase=false` * - get() for `erase=false` * * Read+Erase Operations: * - contains() for `erase=true` * - get() for `erase=true` * * Erase Operations: * - allow_erase() * * Write Operations: * - setup() * - setup_bytes() * - insert() * - please_keep() * * Synchronization Free Operations: * - invalid() * - compute_hashes() * * User Must Guarantee: * * 1. Write requires synchronized access (e.g. a lock) * 2. Read requires no concurrent Write, synchronized with last insert. * 3. Erase requires no concurrent Write, synchronized with last insert. * 4. An Erase caller must release all memory before allowing a new Writer. * * * Note on function names: * - The name "allow_erase" is used because the real discard happens later. * - The name "please_keep" is used because elements may be erased anyways on * insert. * * @tparam Element should be a movable and copyable type * @tparam Hash should be a function/callable which takes a template parameter * hash_select and an Element and extracts a hash from it. Should return * high-entropy uint32_t hashes for `Hash h; h<0>(e) ... h<7>(e)`. */ template class cache { private: /** table stores all the elements */ std::vector table; /** size stores the total available slots in the hash table */ uint32_t size; /** * The bit_packed_atomic_flags array is marked mutable because we want * garbage collection to be allowed to occur from const methods. */ mutable bit_packed_atomic_flags collection_flags; /** * epoch_flags tracks how recently an element was inserted into the cache. * true denotes recent, false denotes not-recent. See insert() method for * full semantics. */ mutable std::vector epoch_flags; /** * epoch_heuristic_counter is used to determine when an epoch might be aged * & an expensive scan should be done. epoch_heuristic_counter is * decremented on insert and reset to the new number of inserts which would * cause the epoch to reach epoch_size when it reaches zero. */ uint32_t epoch_heuristic_counter; /** * epoch_size is set to be the number of elements supposed to be in a epoch. * When the number of non-erased elements in an epoch exceeds epoch_size, a * new epoch should be started and all current entries demoted. epoch_size * is set to be 45% of size because we want to keep load around 90%, and we * support 3 epochs at once -- one "dead" which has been erased, one "dying" * which has been marked to be erased next, and one "living" which new * inserts add to. */ uint32_t epoch_size; /** * depth_limit determines how many elements insert should try to replace. * Should be set to log2(n). */ uint8_t depth_limit; /** * hash_function is a const instance of the hash function. It cannot be * static or initialized at call time as it may have internal state (such as * a nonce). */ const Hash hash_function; /** * Key is the key type for this map or set. */ using Key = typename Element::KeyType; /** * compute_hashes is convenience for not having to write out this expression * everywhere we use the hash values of an Element. * * We need to map the 32-bit input hash onto a hash bucket in a range [0, * size) in a manner which preserves as much of the hash's uniformity as * possible. Ideally this would be done by bitmasking but the size is * usually not a power of two. * * The naive approach would be to use a mod -- which isn't perfectly uniform * but so long as the hash is much larger than size it is not that bad. * Unfortunately, mod/division is fairly slow on ordinary microprocessors * (e.g. 90-ish cycles on haswell, ARM doesn't even have an instruction for * it.); when the divisor is a constant the compiler will do clever tricks * to turn it into a multiply+add+shift, but size is a run-time value so the * compiler can't do that here. * * One option would be to implement the same trick the compiler uses and * compute the constants for exact division based on the size, as described * in "{N}-bit Unsigned Division via {N}-bit Multiply-Add" by Arch D. * Robison in 2005. But that code is somewhat complicated and the result is * still slower than other options: * * Instead we treat the 32-bit random number as a Q32 fixed-point number in * the range [0, 1) and simply multiply it by the size. Then we just shift * the result down by 32-bits to get our bucket number. The result has * non-uniformity the same as a mod, but it is much faster to compute. More * about this technique can be found at * http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ * . * * The resulting non-uniformity is also more equally distributed which would * be advantageous for something like linear probing, though it shouldn't * matter one way or the other for a cuckoo table. * * The primary disadvantage of this approach is increased intermediate * precision is required but for a 32-bit random number we only need the * high 32 bits of a 32*32->64 multiply, which means the operation is * reasonably fast even on a typical 32-bit processor. * * @param e The element whose hashes will be returned * @returns Deterministic hashes derived from `e` uniformly mapped onto the * range [0, size) */ inline std::array compute_hashes(const Key &k) const { return {{uint32_t(uint64_t(hash_function.template operator()<0>(k)) * uint64_t(size) >> 32), uint32_t(uint64_t(hash_function.template operator()<1>(k)) * uint64_t(size) >> 32), uint32_t(uint64_t(hash_function.template operator()<2>(k)) * uint64_t(size) >> 32), uint32_t(uint64_t(hash_function.template operator()<3>(k)) * uint64_t(size) >> 32), uint32_t(uint64_t(hash_function.template operator()<4>(k)) * uint64_t(size) >> 32), uint32_t(uint64_t(hash_function.template operator()<5>(k)) * uint64_t(size) >> 32), uint32_t(uint64_t(hash_function.template operator()<6>(k)) * uint64_t(size) >> 32), uint32_t(uint64_t(hash_function.template operator()<7>(k)) * uint64_t(size) >> 32)}}; } /** * invalid returns a special index that can never be inserted to * @returns the special constexpr index that can never be inserted to */ constexpr uint32_t invalid() const { return ~uint32_t(0); } /** * allow_erase marks the element at index `n` as discardable. Threadsafe * without any concurrent insert. * @param n the index to allow erasure of */ inline void allow_erase(uint32_t n) const { collection_flags.bit_set(n); } /** * please_keep marks the element at index `n` as an entry that should be * kept. Threadsafe without any concurrent insert. * @param n the index to prioritize keeping */ inline void please_keep(uint32_t n) const { collection_flags.bit_unset(n); } /** * epoch_check handles the changing of epochs for elements stored in the * cache. epoch_check should be run before every insert. * * First, epoch_check decrements and checks the cheap heuristic, and then * does a more expensive scan if the cheap heuristic runs out. If the * expensive scan succeeds, the epochs are aged and old elements are * allow_erased. The cheap heuristic is reset to retrigger after the worst * case growth of the current epoch's elements would exceed the epoch_size. */ void epoch_check() { if (epoch_heuristic_counter != 0) { --epoch_heuristic_counter; return; } // count the number of elements from the latest epoch which have not // been erased. uint32_t epoch_unused_count = 0; for (uint32_t i = 0; i < size; ++i) { epoch_unused_count += epoch_flags[i] && !collection_flags.bit_is_set(i); } // If there are more non-deleted entries in the current epoch than the // epoch size, then allow_erase on all elements in the old epoch (marked // false) and move all elements in the current epoch to the old epoch // but do not call allow_erase on their indices. if (epoch_unused_count >= epoch_size) { for (uint32_t i = 0; i < size; ++i) { if (epoch_flags[i]) { epoch_flags[i] = false; } else { allow_erase(i); } } epoch_heuristic_counter = epoch_size; } else { // reset the epoch_heuristic_counter to next do a scan when worst // case behavior (no intermittent erases) would exceed epoch size, // with a reasonable minimum scan size. Ordinarily, we would have to // sanity check std::min(epoch_size, epoch_unused_count), but we // already know that `epoch_unused_count < epoch_size` in this // branch epoch_heuristic_counter = std::max( 1u, std::max(epoch_size / 16, epoch_size - epoch_unused_count)); } } public: /** * You must always construct a cache with some elements via a subsequent * call to setup or setup_bytes, otherwise operations may segfault. */ cache() : table(), size(), collection_flags(0), epoch_flags(), epoch_heuristic_counter(), epoch_size(), depth_limit(0), hash_function() {} /** * setup initializes the container to store no more than new_size * elements. * * setup should only be called once. * * @param new_size the desired number of elements to store * @returns the maximum number of elements storable */ uint32_t setup(uint32_t new_size) { // depth_limit must be at least one otherwise errors can occur. depth_limit = static_cast( std::log2(static_cast(std::max((uint32_t)2, new_size)))); size = std::max(2, new_size); table.resize(size); collection_flags.setup(size); epoch_flags.resize(size); // Set to 45% as described above epoch_size = std::max((uint32_t)1, (45 * size) / 100); // Initially set to wait for a whole epoch epoch_heuristic_counter = epoch_size; return size; } /** * setup_bytes is a convenience function which accounts for internal memory * usage when deciding how many elements to store. It isn't perfect because * it doesn't account for any overhead (struct size, MallocUsage, collection * and epoch flags). This was done to simplify selecting a power of two * size. In the expected use case, an extra two bits per entry should be * negligible compared to the size of the elements. * * @param bytes the approximate number of bytes to use for this data * structure * @returns the maximum number of elements storable (see setup() * documentation for more detail) */ uint32_t setup_bytes(size_t bytes) { return setup(bytes / sizeof(Element)); } /** * insert loops at most depth_limit times trying to insert a hash at various * locations in the table via a variant of the Cuckoo Algorithm with eight * hash locations. * * It drops the last tried element if it runs out of depth before * encountering an open slot. * * Thus: * * ``` * insert(x); * return contains(x, false); * ``` * * is not guaranteed to return true. * * @param e the element to insert * @param weither to replace if an existing element with the same key is * found. * @post one of the following: All previously inserted elements and e are * now in the table, one previously inserted element is evicted from the * table, the entry attempted to be inserted is evicted. If replace is true * and a matching element already exists, it is updated accordingly. */ inline void insert(Element e, bool replace = false) { epoch_check(); uint32_t last_loc = invalid(); bool last_epoch = true; std::array locs = compute_hashes(e.getKey()); // Make sure we have not already inserted this element. // If we have, make sure that it does not get deleted. for (const uint32_t loc : locs) { if (table[loc].getKey() == e.getKey()) { if (replace) { table[loc] = std::move(e); } please_keep(loc); epoch_flags[loc] = last_epoch; return; } } for (uint8_t depth = 0; depth < depth_limit; ++depth) { // First try to insert to an empty slot, if one exists for (const uint32_t loc : locs) { if (!collection_flags.bit_is_set(loc)) { continue; } table[loc] = std::move(e); please_keep(loc); epoch_flags[loc] = last_epoch; return; } /** * Swap with the element at the location that was not the last one * looked at. Example: * * 1. On first iteration, last_loc == invalid(), find returns last, * so last_loc defaults to locs[0]. * 2. On further iterations, where last_loc == locs[k], last_loc * will go to locs[k+1 % 8], i.e., next of the 8 indices wrapping * around to 0 if needed. * * This prevents moving the element we just put in. * * The swap is not a move -- we must switch onto the evicted element * for the next iteration. */ last_loc = locs[(1 + (std::find(locs.begin(), locs.end(), last_loc) - locs.begin())) & 7]; std::swap(table[last_loc], e); // Can't std::swap a std::vector::reference and a bool&. bool epoch = last_epoch; last_epoch = epoch_flags[last_loc]; epoch_flags[last_loc] = epoch; // Recompute the locs -- unfortunately happens one too many times! locs = compute_hashes(e.getKey()); } } /** * contains iterates through the hash locations for a given element and * checks to see if it is present. * * contains does not check garbage collected state (in other words, garbage * is only collected when the space is needed), so: * * ``` * insert(x); * if (contains(x, true)) * return contains(x, false); * else * return true; * ``` * * executed on a single thread will always return true! * * This is a great property for re-org performance for example. * * contains returns a bool set true if the element was found. * * @param k the key to check * @param erase whether to attempt setting the garbage collect flag * * @post if erase is true and the element is found, then the garbage collect * flag is set * @returns true if the element is found, false otherwise */ bool contains(const Key &k, const bool erase) const { return find(k, erase) != nullptr; } /** * get is almost identical to contains(), with the difference that it * obtains the found element (for Elements that contain key and value, * this has the effect of obtaining the found value). * * @param e the element to check * @param erase * * @post If the element is found, it is copied into e. If erase is true * and the element is found, then the garbage collect flag is set. * @returns true if the element is found, false otherwise */ bool get(Element &e, const bool erase) const { if (const Element *eptr = find(e.getKey(), erase)) { e = *eptr; return true; } return false; } private: const Element *find(const Key &k, const bool erase) const { std::array locs = compute_hashes(k); for (const uint32_t loc : locs) { if (table[loc].getKey() == k) { if (erase) { allow_erase(loc); } return &table[loc]; } } return nullptr; } }; /** * Helper class used when we only want the cache to be a set rather than a map. */ template struct KeyOnly : public T { // For contains. using KeyType = T; // Ensure implicit conversion from T. KeyOnly() = default; KeyOnly(const T &x) : T(x) {} // Implement required features. const T &getKey() const { return *this; } }; } // namespace CuckooCache #endif // BITCOIN_CUCKOOCACHE_H diff --git a/src/prevector.h b/src/prevector.h index 66c1d2878..b2a747a21 100644 --- a/src/prevector.h +++ b/src/prevector.h @@ -1,594 +1,595 @@ // Copyright (c) 2015-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_PREVECTOR_H #define BITCOIN_PREVECTOR_H #include #include #include #include #include #include #include +#include /** * Implements a drop-in replacement for std::vector which stores up to N * elements directly (without heap allocation). The types Size and Diff are used * to store element counts, and can be any unsigned + signed type. * * Storage layout is either: * - Direct allocation: * - Size _size: the number of used elements (between 0 and N) * - T direct[N]: an array of N elements of type T * (only the first _size are initialized). * - Indirect allocation: * - Size _size: the number of used elements plus N + 1 * - Size capacity: the number of allocated elements * - T* indirect: a pointer to an array of capacity elements of type T * (only the first _size are initialized). * * The data type T must be movable by memmove/realloc(). Once we switch to C++, * move constructors can be used instead. */ template class prevector { public: typedef Size size_type; typedef Diff difference_type; typedef T value_type; typedef value_type &reference; typedef const value_type &const_reference; typedef value_type *pointer; typedef const value_type *const_pointer; class iterator { T *ptr; public: typedef Diff difference_type; typedef T value_type; typedef T *pointer; typedef T &reference; typedef std::random_access_iterator_tag iterator_category; iterator() : ptr(nullptr) {} iterator(T *ptr_) : ptr(ptr_) {} T &operator*() const { return *ptr; } T *operator->() const { return ptr; } T &operator[](size_type pos) { return ptr[pos]; } const T &operator[](size_type pos) const { return ptr[pos]; } iterator &operator++() { ptr++; return *this; } iterator &operator--() { ptr--; return *this; } iterator operator++(int) { iterator copy(*this); ++(*this); return copy; } iterator operator--(int) { iterator copy(*this); --(*this); return copy; } difference_type friend operator-(iterator a, iterator b) { return (&(*a) - &(*b)); } iterator operator+(size_type n) { return iterator(ptr + n); } iterator &operator+=(size_type n) { ptr += n; return *this; } iterator operator-(size_type n) { return iterator(ptr - n); } iterator &operator-=(size_type n) { ptr -= n; return *this; } bool operator==(iterator x) const { return ptr == x.ptr; } bool operator!=(iterator x) const { return ptr != x.ptr; } bool operator>=(iterator x) const { return ptr >= x.ptr; } bool operator<=(iterator x) const { return ptr <= x.ptr; } bool operator>(iterator x) const { return ptr > x.ptr; } bool operator<(iterator x) const { return ptr < x.ptr; } }; class reverse_iterator { T *ptr; public: typedef Diff difference_type; typedef T value_type; typedef T *pointer; typedef T &reference; typedef std::bidirectional_iterator_tag iterator_category; reverse_iterator() : ptr(nullptr) {} reverse_iterator(T *ptr_) : ptr(ptr_) {} T &operator*() { return *ptr; } const T &operator*() const { return *ptr; } T *operator->() { return ptr; } const T *operator->() const { return ptr; } reverse_iterator &operator--() { ptr++; return *this; } reverse_iterator &operator++() { ptr--; return *this; } reverse_iterator operator++(int) { reverse_iterator copy(*this); ++(*this); return copy; } reverse_iterator operator--(int) { reverse_iterator copy(*this); --(*this); return copy; } bool operator==(reverse_iterator x) const { return ptr == x.ptr; } bool operator!=(reverse_iterator x) const { return ptr != x.ptr; } }; class const_iterator { const T *ptr; public: typedef Diff difference_type; typedef const T value_type; typedef const T *pointer; typedef const T &reference; typedef std::random_access_iterator_tag iterator_category; const_iterator() : ptr(nullptr) {} const_iterator(const T *ptr_) : ptr(ptr_) {} const_iterator(iterator x) : ptr(&(*x)) {} const T &operator*() const { return *ptr; } const T *operator->() const { return ptr; } const T &operator[](size_type pos) const { return ptr[pos]; } const_iterator &operator++() { ptr++; return *this; } const_iterator &operator--() { ptr--; return *this; } const_iterator operator++(int) { const_iterator copy(*this); ++(*this); return copy; } const_iterator operator--(int) { const_iterator copy(*this); --(*this); return copy; } difference_type friend operator-(const_iterator a, const_iterator b) { return (&(*a) - &(*b)); } const_iterator operator+(size_type n) { return const_iterator(ptr + n); } const_iterator &operator+=(size_type n) { ptr += n; return *this; } const_iterator operator-(size_type n) { return const_iterator(ptr - n); } const_iterator &operator-=(size_type n) { ptr -= n; return *this; } bool operator==(const_iterator x) const { return ptr == x.ptr; } bool operator!=(const_iterator x) const { return ptr != x.ptr; } bool operator>=(const_iterator x) const { return ptr >= x.ptr; } bool operator<=(const_iterator x) const { return ptr <= x.ptr; } bool operator>(const_iterator x) const { return ptr > x.ptr; } bool operator<(const_iterator x) const { return ptr < x.ptr; } }; class const_reverse_iterator { const T *ptr; public: typedef Diff difference_type; typedef const T value_type; typedef const T *pointer; typedef const T &reference; typedef std::bidirectional_iterator_tag iterator_category; const_reverse_iterator() : ptr(nullptr) {} const_reverse_iterator(const T *ptr_) : ptr(ptr_) {} const_reverse_iterator(reverse_iterator x) : ptr(&(*x)) {} const T &operator*() const { return *ptr; } const T *operator->() const { return ptr; } const_reverse_iterator &operator--() { ptr++; return *this; } const_reverse_iterator &operator++() { ptr--; return *this; } const_reverse_iterator operator++(int) { const_reverse_iterator copy(*this); ++(*this); return copy; } const_reverse_iterator operator--(int) { const_reverse_iterator copy(*this); --(*this); return copy; } bool operator==(const_reverse_iterator x) const { return ptr == x.ptr; } bool operator!=(const_reverse_iterator x) const { return ptr != x.ptr; } }; private: #pragma pack(push, 1) union direct_or_indirect { char direct[sizeof(T) * N]; struct { char *indirect; size_type capacity; }; }; #pragma pack(pop) alignas(char *) direct_or_indirect _union = {}; size_type _size = 0; static_assert(alignof(char *) % alignof(size_type) == 0 && sizeof(char *) % alignof(size_type) == 0, "size_type cannot have more restrictive alignment " "requirement than pointer"); static_assert(alignof(char *) % alignof(T) == 0, "value_type T cannot have more restrictive alignment " "requirement than pointer"); T *direct_ptr(difference_type pos) { return reinterpret_cast(_union.direct) + pos; } const T *direct_ptr(difference_type pos) const { return reinterpret_cast(_union.direct) + pos; } T *indirect_ptr(difference_type pos) { return reinterpret_cast(_union.indirect) + pos; } const T *indirect_ptr(difference_type pos) const { return reinterpret_cast(_union.indirect) + pos; } bool is_direct() const { return _size <= N; } void change_capacity(size_type new_capacity) { if (new_capacity <= N) { if (!is_direct()) { T *indirect = indirect_ptr(0); T *src = indirect; T *dst = direct_ptr(0); memcpy(dst, src, size() * sizeof(T)); free(indirect); _size -= N + 1; } } else { if (!is_direct()) { // FIXME: Because malloc/realloc here won't call new_handler if // allocation fails, assert success. These should instead use an // allocator or new/delete so that handlers are called as // necessary, but performance would be slightly degraded by // doing so. _union.indirect = static_cast(realloc( _union.indirect, ((size_t)sizeof(T)) * new_capacity)); assert(_union.indirect); _union.capacity = new_capacity; } else { char *new_indirect = static_cast( malloc(((size_t)sizeof(T)) * new_capacity)); assert(new_indirect); T *src = direct_ptr(0); T *dst = reinterpret_cast(new_indirect); memcpy(dst, src, size() * sizeof(T)); _union.indirect = new_indirect; _union.capacity = new_capacity; _size += N + 1; } } } T *item_ptr(difference_type pos) { return is_direct() ? direct_ptr(pos) : indirect_ptr(pos); } const T *item_ptr(difference_type pos) const { return is_direct() ? direct_ptr(pos) : indirect_ptr(pos); } void fill(T *dst, ptrdiff_t count, const T &value = T{}) { std::fill_n(dst, count, value); } template void fill(T *dst, InputIterator first, InputIterator last) { while (first != last) { new (static_cast(dst)) T(*first); ++dst; ++first; } } public: void assign(size_type n, const T &val) { clear(); if (capacity() < n) { change_capacity(n); } _size += n; fill(item_ptr(0), n, val); } template void assign(InputIterator first, InputIterator last) { size_type n = last - first; clear(); if (capacity() < n) { change_capacity(n); } _size += n; fill(item_ptr(0), first, last); } prevector() {} explicit prevector(size_type n) { resize(n); } explicit prevector(size_type n, const T &val) { change_capacity(n); _size += n; fill(item_ptr(0), n, val); } template prevector(InputIterator first, InputIterator last) { size_type n = last - first; change_capacity(n); _size += n; fill(item_ptr(0), first, last); } prevector(const prevector &other) { size_type n = other.size(); change_capacity(n); _size += n; fill(item_ptr(0), other.begin(), other.end()); } prevector(prevector &&other) { swap(other); } prevector &operator=(const prevector &other) { if (&other == this) { return *this; } assign(other.begin(), other.end()); return *this; } prevector &operator=(prevector &&other) { swap(other); return *this; } size_type size() const { return is_direct() ? _size : _size - N - 1; } bool empty() const { return size() == 0; } iterator begin() { return iterator(item_ptr(0)); } const_iterator begin() const { return const_iterator(item_ptr(0)); } iterator end() { return iterator(item_ptr(size())); } const_iterator end() const { return const_iterator(item_ptr(size())); } reverse_iterator rbegin() { return reverse_iterator(item_ptr(size() - 1)); } const_reverse_iterator rbegin() const { return const_reverse_iterator(item_ptr(size() - 1)); } reverse_iterator rend() { return reverse_iterator(item_ptr(-1)); } const_reverse_iterator rend() const { return const_reverse_iterator(item_ptr(-1)); } size_t capacity() const { if (is_direct()) { return N; } else { return _union.capacity; } } T &operator[](size_type pos) { return *item_ptr(pos); } const T &operator[](size_type pos) const { return *item_ptr(pos); } void resize(size_type new_size) { size_type cur_size = size(); if (cur_size == new_size) { return; } if (cur_size > new_size) { erase(item_ptr(new_size), end()); return; } if (new_size > capacity()) { change_capacity(new_size); } ptrdiff_t increase = new_size - cur_size; fill(item_ptr(cur_size), increase); _size += increase; } void reserve(size_type new_capacity) { if (new_capacity > capacity()) { change_capacity(new_capacity); } } void shrink_to_fit() { change_capacity(size()); } void clear() { resize(0); } iterator insert(iterator pos, const T &value) { size_type p = pos - begin(); size_type new_size = size() + 1; if (capacity() < new_size) { change_capacity(new_size + (new_size >> 1)); } T *ptr = item_ptr(p); memmove(ptr + 1, ptr, (size() - p) * sizeof(T)); _size++; new (static_cast(ptr)) T(value); return iterator(ptr); } void insert(iterator pos, size_type count, const T &value) { size_type p = pos - begin(); size_type new_size = size() + count; if (capacity() < new_size) { change_capacity(new_size + (new_size >> 1)); } T *ptr = item_ptr(p); memmove(ptr + count, ptr, (size() - p) * sizeof(T)); _size += count; fill(item_ptr(p), count, value); } template void insert(iterator pos, InputIterator first, InputIterator last) { size_type p = pos - begin(); difference_type count = last - first; size_type new_size = size() + count; if (capacity() < new_size) { change_capacity(new_size + (new_size >> 1)); } T *ptr = item_ptr(p); memmove(ptr + count, ptr, (size() - p) * sizeof(T)); _size += count; fill(ptr, first, last); } iterator erase(iterator pos) { return erase(pos, pos + 1); } iterator erase(iterator first, iterator last) { // Erase is not allowed to the change the object's capacity. That means // that when starting with an indirectly allocated prevector with // size and capacity > N, the result may be a still indirectly allocated // prevector with size <= N and capacity > N. A shrink_to_fit() call is // necessary to switch to the (more efficient) directly allocated // representation (with capacity N and size <= N). iterator p = first; char *endp = (char *)&(*end()); if (!std::is_trivially_destructible::value) { while (p != last) { (*p).~T(); _size--; ++p; } } else { _size -= last - p; } memmove(&(*first), &(*last), endp - ((char *)(&(*last)))); return first; } void push_back(const T &value) { size_type new_size = size() + 1; if (capacity() < new_size) { change_capacity(new_size + (new_size >> 1)); } new (item_ptr(size())) T(value); _size++; } void pop_back() { erase(end() - 1, end()); } T &front() { return *item_ptr(0); } const T &front() const { return *item_ptr(0); } T &back() { return *item_ptr(size() - 1); } const T &back() const { return *item_ptr(size() - 1); } void swap(prevector &other) { std::swap(_union, other._union); std::swap(_size, other._size); } ~prevector() { if (!std::is_trivially_destructible::value) { clear(); } if (!is_direct()) { free(_union.indirect); _union.indirect = nullptr; } } bool operator==(const prevector &other) const { if (other.size() != size()) { return false; } const_iterator b1 = begin(); const_iterator b2 = other.begin(); const_iterator e1 = end(); while (b1 != e1) { if ((*b1) != (*b2)) { return false; } ++b1; ++b2; } return true; } bool operator!=(const prevector &other) const { return !(*this == other); } bool operator<(const prevector &other) const { if (size() < other.size()) { return true; } if (size() > other.size()) { return false; } const_iterator b1 = begin(); const_iterator b2 = other.begin(); const_iterator e1 = end(); while (b1 != e1) { if ((*b1) < (*b2)) { return true; } if ((*b2) < (*b1)) { return false; } ++b1; ++b2; } return false; } size_t allocated_memory() const { if (is_direct()) { return 0; } else { return ((size_t)(sizeof(T))) * _union.capacity; } } value_type *data() { return item_ptr(0); } const value_type *data() const { return item_ptr(0); } }; #endif // BITCOIN_PREVECTOR_H diff --git a/src/qt/bantablemodel.cpp b/src/qt/bantablemodel.cpp index b1c9ec7a2..21a198670 100644 --- a/src/qt/bantablemodel.cpp +++ b/src/qt/bantablemodel.cpp @@ -1,167 +1,167 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include // For banmap_t #include -#include +#include #include #include bool BannedNodeLessThan::operator()(const CCombinedBan &left, const CCombinedBan &right) const { const CCombinedBan *pLeft = &left; const CCombinedBan *pRight = &right; if (order == Qt::DescendingOrder) { std::swap(pLeft, pRight); } switch (column) { case BanTableModel::Address: return pLeft->subnet.ToString().compare(pRight->subnet.ToString()) < 0; case BanTableModel::Bantime: return pLeft->banEntry.nBanUntil < pRight->banEntry.nBanUntil; } return false; } // private implementation class BanTablePriv { public: /** Local cache of peer information */ QList cachedBanlist; /** Column to sort nodes by (default to unsorted) */ int sortColumn{-1}; /** Order (ascending or descending) to sort nodes by */ Qt::SortOrder sortOrder; /** Pull a full list of banned nodes from CNode into our cache */ void refreshBanlist(interfaces::Node &node) { banmap_t banMap; node.getBanned(banMap); cachedBanlist.clear(); cachedBanlist.reserve(banMap.size()); for (const auto &entry : banMap) { CCombinedBan banEntry; banEntry.subnet = entry.first; banEntry.banEntry = entry.second; cachedBanlist.append(banEntry); } if (sortColumn >= 0) { // sort cachedBanlist (use stable sort to prevent rows jumping // around unnecessarily) std::stable_sort(cachedBanlist.begin(), cachedBanlist.end(), BannedNodeLessThan(sortColumn, sortOrder)); } } int size() const { return cachedBanlist.size(); } CCombinedBan *index(int idx) { if (idx >= 0 && idx < cachedBanlist.size()) { return &cachedBanlist[idx]; } return nullptr; } }; BanTableModel::BanTableModel(interfaces::Node &node, ClientModel *parent) : QAbstractTableModel(parent), m_node(node), clientModel(parent) { columns << tr("IP/Netmask") << tr("Banned Until"); priv.reset(new BanTablePriv()); // load initial data refresh(); } BanTableModel::~BanTableModel() { // Intentionally left empty } int BanTableModel::rowCount(const QModelIndex &parent) const { Q_UNUSED(parent); return priv->size(); } int BanTableModel::columnCount(const QModelIndex &parent) const { Q_UNUSED(parent); return columns.length(); } QVariant BanTableModel::data(const QModelIndex &index, int role) const { if (!index.isValid()) { return QVariant(); } CCombinedBan *rec = static_cast(index.internalPointer()); if (role == Qt::DisplayRole) { switch (index.column()) { case Address: return QString::fromStdString(rec->subnet.ToString()); case Bantime: QDateTime date = QDateTime::fromMSecsSinceEpoch(0); date = date.addSecs(rec->banEntry.nBanUntil); return date.toString(Qt::SystemLocaleLongDate); } } return QVariant(); } QVariant BanTableModel::headerData(int section, Qt::Orientation orientation, int role) const { if (orientation == Qt::Horizontal) { if (role == Qt::DisplayRole && section < columns.size()) { return columns[section]; } } return QVariant(); } Qt::ItemFlags BanTableModel::flags(const QModelIndex &index) const { if (!index.isValid()) { return Qt::NoItemFlags; } Qt::ItemFlags retval = Qt::ItemIsSelectable | Qt::ItemIsEnabled; return retval; } QModelIndex BanTableModel::index(int row, int column, const QModelIndex &parent) const { Q_UNUSED(parent); CCombinedBan *data = priv->index(row); if (data) { return createIndex(row, column, data); } return QModelIndex(); } void BanTableModel::refresh() { Q_EMIT layoutAboutToBeChanged(); priv->refreshBanlist(m_node); Q_EMIT layoutChanged(); } void BanTableModel::sort(int column, Qt::SortOrder order) { priv->sortColumn = column; priv->sortOrder = order; refresh(); } bool BanTableModel::shouldShow() { return priv->size() > 0; } diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp index 8ad013675..ed7f3e1e6 100644 --- a/src/qt/peertablemodel.cpp +++ b/src/qt/peertablemodel.cpp @@ -1,230 +1,230 @@ // Copyright (c) 2011-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include -#include +#include #include #include #include bool NodeLessThan::operator()(const CNodeCombinedStats &left, const CNodeCombinedStats &right) const { const CNodeStats *pLeft = &(left.nodeStats); const CNodeStats *pRight = &(right.nodeStats); if (order == Qt::DescendingOrder) { std::swap(pLeft, pRight); } switch (column) { case PeerTableModel::NetNodeId: return pLeft->nodeid < pRight->nodeid; case PeerTableModel::Address: return pLeft->addrName.compare(pRight->addrName) < 0; case PeerTableModel::Subversion: return pLeft->cleanSubVer.compare(pRight->cleanSubVer) < 0; case PeerTableModel::Ping: return pLeft->m_min_ping_usec < pRight->m_min_ping_usec; case PeerTableModel::Sent: return pLeft->nSendBytes < pRight->nSendBytes; case PeerTableModel::Received: return pLeft->nRecvBytes < pRight->nRecvBytes; } return false; } // private implementation class PeerTablePriv { public: /** Local cache of peer information */ QList cachedNodeStats; /** Column to sort nodes by (default to unsorted) */ int sortColumn{-1}; /** Order (ascending or descending) to sort nodes by */ Qt::SortOrder sortOrder; /** Index of rows by node ID */ std::map mapNodeRows; /** Pull a full list of peers from vNodes into our cache */ void refreshPeers(interfaces::Node &node) { { cachedNodeStats.clear(); interfaces::Node::NodesStats nodes_stats; node.getNodesStats(nodes_stats); cachedNodeStats.reserve(nodes_stats.size()); for (const auto &node_stats : nodes_stats) { CNodeCombinedStats stats; stats.nodeStats = std::get<0>(node_stats); stats.fNodeStateStatsAvailable = std::get<1>(node_stats); stats.nodeStateStats = std::get<2>(node_stats); cachedNodeStats.append(stats); } } if (sortColumn >= 0) { // sort cacheNodeStats (use stable sort to prevent rows jumping // around unnecessarily) std::stable_sort(cachedNodeStats.begin(), cachedNodeStats.end(), NodeLessThan(sortColumn, sortOrder)); } // build index map mapNodeRows.clear(); int row = 0; for (const CNodeCombinedStats &stats : cachedNodeStats) { mapNodeRows.insert( std::pair(stats.nodeStats.nodeid, row++)); } } int size() const { return cachedNodeStats.size(); } CNodeCombinedStats *index(int idx) { if (idx >= 0 && idx < cachedNodeStats.size()) { return &cachedNodeStats[idx]; } return nullptr; } }; PeerTableModel::PeerTableModel(interfaces::Node &node, ClientModel *parent) : QAbstractTableModel(parent), m_node(node), clientModel(parent), timer(nullptr) { columns << tr("NodeId") << tr("Node/Service") << tr("Ping") << tr("Sent") << tr("Received") << tr("User Agent"); priv.reset(new PeerTablePriv()); // set up timer for auto refresh timer = new QTimer(this); connect(timer, &QTimer::timeout, this, &PeerTableModel::refresh); timer->setInterval(MODEL_UPDATE_DELAY); // load initial data refresh(); } PeerTableModel::~PeerTableModel() { // Intentionally left empty } void PeerTableModel::startAutoRefresh() { timer->start(); } void PeerTableModel::stopAutoRefresh() { timer->stop(); } int PeerTableModel::rowCount(const QModelIndex &parent) const { Q_UNUSED(parent); return priv->size(); } int PeerTableModel::columnCount(const QModelIndex &parent) const { Q_UNUSED(parent); return columns.length(); } QVariant PeerTableModel::data(const QModelIndex &index, int role) const { if (!index.isValid()) { return QVariant(); } CNodeCombinedStats *rec = static_cast(index.internalPointer()); if (role == Qt::DisplayRole) { switch (index.column()) { case NetNodeId: return (qint64)rec->nodeStats.nodeid; case Address: return QString::fromStdString(rec->nodeStats.addrName); case Subversion: return QString::fromStdString(rec->nodeStats.cleanSubVer); case Ping: return GUIUtil::formatPingTime(rec->nodeStats.m_min_ping_usec); case Sent: return GUIUtil::formatBytes(rec->nodeStats.nSendBytes); case Received: return GUIUtil::formatBytes(rec->nodeStats.nRecvBytes); } } else if (role == Qt::TextAlignmentRole) { switch (index.column()) { case Ping: case Sent: case Received: return QVariant(Qt::AlignRight | Qt::AlignVCenter); default: return QVariant(); } } return QVariant(); } QVariant PeerTableModel::headerData(int section, Qt::Orientation orientation, int role) const { if (orientation == Qt::Horizontal) { if (role == Qt::DisplayRole && section < columns.size()) { return columns[section]; } } return QVariant(); } Qt::ItemFlags PeerTableModel::flags(const QModelIndex &index) const { if (!index.isValid()) { return Qt::NoItemFlags; } Qt::ItemFlags retval = Qt::ItemIsSelectable | Qt::ItemIsEnabled; return retval; } QModelIndex PeerTableModel::index(int row, int column, const QModelIndex &parent) const { Q_UNUSED(parent); CNodeCombinedStats *data = priv->index(row); if (data) { return createIndex(row, column, data); } return QModelIndex(); } const CNodeCombinedStats *PeerTableModel::getNodeStats(int idx) { return priv->index(idx); } void PeerTableModel::refresh() { Q_EMIT layoutAboutToBeChanged(); priv->refreshPeers(m_node); Q_EMIT layoutChanged(); } int PeerTableModel::getRowByNodeId(NodeId nodeid) { std::map::iterator it = priv->mapNodeRows.find(nodeid); if (it == priv->mapNodeRows.end()) { return -1; } return it->second; } void PeerTableModel::sort(int column, Qt::SortOrder order) { priv->sortColumn = column; priv->sortOrder = order; refresh(); } diff --git a/src/test/checkqueue_tests.cpp b/src/test/checkqueue_tests.cpp index e439e5743..0db0b5fa1 100644 --- a/src/test/checkqueue_tests.cpp +++ b/src/test/checkqueue_tests.cpp @@ -1,421 +1,422 @@ // Copyright (c) 2012-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include +#include BOOST_FIXTURE_TEST_SUITE(checkqueue_tests, TestingSetup) static const unsigned int QUEUE_BATCH_SIZE = 128; static const int SCRIPT_CHECK_THREADS = 3; struct FakeCheck { bool operator()() { return true; } void swap(FakeCheck &x){}; }; struct FakeCheckCheckCompletion { static std::atomic n_calls; bool operator()() { n_calls.fetch_add(1, std::memory_order_relaxed); return true; } void swap(FakeCheckCheckCompletion &x){}; }; struct FailingCheck { bool fails; FailingCheck(bool _fails) : fails(_fails){}; FailingCheck() : fails(true){}; bool operator()() { return !fails; } void swap(FailingCheck &x) { std::swap(fails, x.fails); }; }; struct UniqueCheck { static Mutex m; static std::unordered_multiset results GUARDED_BY(m); size_t check_id; UniqueCheck(size_t check_id_in) : check_id(check_id_in){}; UniqueCheck() : check_id(0){}; bool operator()() { LOCK(m); results.insert(check_id); return true; } void swap(UniqueCheck &x) { std::swap(x.check_id, check_id); }; }; struct MemoryCheck { static std::atomic fake_allocated_memory; bool b{false}; bool operator()() { return true; } MemoryCheck(){}; MemoryCheck(const MemoryCheck &x) { // We have to do this to make sure that destructor calls are paired // // Really, copy constructor should be deletable, but CCheckQueue breaks // if it is deleted because of internal push_back. fake_allocated_memory.fetch_add(b, std::memory_order_relaxed); }; MemoryCheck(bool b_) : b(b_) { fake_allocated_memory.fetch_add(b, std::memory_order_relaxed); }; ~MemoryCheck() { fake_allocated_memory.fetch_sub(b, std::memory_order_relaxed); }; void swap(MemoryCheck &x) { std::swap(b, x.b); }; }; struct FrozenCleanupCheck { static std::atomic nFrozen; static std::condition_variable cv; static std::mutex m; // Freezing can't be the default initialized behavior given how the queue // swaps in default initialized Checks. bool should_freeze{false}; bool operator()() { return true; } FrozenCleanupCheck() {} ~FrozenCleanupCheck() { if (should_freeze) { std::unique_lock l(m); nFrozen.store(1, std::memory_order_relaxed); cv.notify_one(); cv.wait( l, [] { return nFrozen.load(std::memory_order_relaxed) == 0; }); } } void swap(FrozenCleanupCheck &x) { std::swap(should_freeze, x.should_freeze); }; }; // Static Allocations std::mutex FrozenCleanupCheck::m{}; std::atomic FrozenCleanupCheck::nFrozen{0}; std::condition_variable FrozenCleanupCheck::cv{}; Mutex UniqueCheck::m; std::unordered_multiset UniqueCheck::results; std::atomic FakeCheckCheckCompletion::n_calls{0}; std::atomic MemoryCheck::fake_allocated_memory{0}; // Queue Typedefs typedef CCheckQueue Correct_Queue; typedef CCheckQueue Standard_Queue; typedef CCheckQueue Failing_Queue; typedef CCheckQueue Unique_Queue; typedef CCheckQueue Memory_Queue; typedef CCheckQueue FrozenCleanup_Queue; /** This test case checks that the CCheckQueue works properly * with each specified size_t Checks pushed. */ static void Correct_Queue_range(std::vector range) { auto small_queue = std::make_unique(QUEUE_BATCH_SIZE); boost::thread_group tg; for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) { tg.create_thread([&] { small_queue->Thread(); }); } // Make vChecks here to save on malloc (this test can be slow...) std::vector vChecks; for (const size_t i : range) { size_t total = i; FakeCheckCheckCompletion::n_calls = 0; CCheckQueueControl control(small_queue.get()); while (total) { vChecks.resize(std::min(total, (size_t)InsecureRandRange(10))); total -= vChecks.size(); control.Add(vChecks); } BOOST_REQUIRE(control.Wait()); if (FakeCheckCheckCompletion::n_calls != i) { BOOST_REQUIRE_EQUAL(FakeCheckCheckCompletion::n_calls, i); } } tg.interrupt_all(); tg.join_all(); } /** Test that 0 checks is correct */ BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_Zero) { std::vector range; range.push_back((size_t)0); Correct_Queue_range(range); } /** Test that 1 check is correct */ BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_One) { std::vector range; range.push_back((size_t)1); Correct_Queue_range(range); } /** Test that MAX check is correct */ BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_Max) { std::vector range; range.push_back(100000); Correct_Queue_range(range); } /** Test that random numbers of checks are correct */ BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_Random) { std::vector range; range.reserve(100000 / 1000); for (size_t i = 2; i < 100000; i += std::max((size_t)1, (size_t)InsecureRandRange(std::min( (size_t)1000, ((size_t)100000) - i)))) { range.push_back(i); } Correct_Queue_range(range); } /** Test that failing checks are caught */ BOOST_AUTO_TEST_CASE(test_CheckQueue_Catches_Failure) { auto fail_queue = std::make_unique(QUEUE_BATCH_SIZE); boost::thread_group tg; for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) { tg.create_thread([&] { fail_queue->Thread(); }); } for (size_t i = 0; i < 1001; ++i) { CCheckQueueControl control(fail_queue.get()); size_t remaining = i; while (remaining) { size_t r = InsecureRandRange(10); std::vector vChecks; vChecks.reserve(r); for (size_t k = 0; k < r && remaining; k++, remaining--) { vChecks.emplace_back(remaining == 1); } control.Add(vChecks); } bool success = control.Wait(); if (i > 0) { BOOST_REQUIRE(!success); } else if (i == 0) { BOOST_REQUIRE(success); } } tg.interrupt_all(); tg.join_all(); } // Test that a block validation which fails does not interfere with // future blocks, ie, the bad state is cleared. BOOST_AUTO_TEST_CASE(test_CheckQueue_Recovers_From_Failure) { auto fail_queue = std::make_unique(QUEUE_BATCH_SIZE); boost::thread_group tg; for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) { tg.create_thread([&] { fail_queue->Thread(); }); } for (auto times = 0; times < 10; ++times) { for (const bool end_fails : {true, false}) { CCheckQueueControl control(fail_queue.get()); { std::vector vChecks; vChecks.resize(100, false); vChecks[99] = end_fails; control.Add(vChecks); } bool r = control.Wait(); BOOST_REQUIRE(r != end_fails); } } tg.interrupt_all(); tg.join_all(); } // Test that unique checks are actually all called individually, rather than // just one check being called repeatedly. Test that checks are not called // more than once as well BOOST_AUTO_TEST_CASE(test_CheckQueue_UniqueCheck) { auto queue = std::make_unique(QUEUE_BATCH_SIZE); boost::thread_group tg; for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) { tg.create_thread([&] { queue->Thread(); }); } size_t COUNT = 100000; size_t total = COUNT; { CCheckQueueControl control(queue.get()); while (total) { size_t r = InsecureRandRange(10); std::vector vChecks; for (size_t k = 0; k < r && total; k++) { vChecks.emplace_back(--total); } control.Add(vChecks); } } { LOCK(UniqueCheck::m); bool r = true; BOOST_REQUIRE_EQUAL(UniqueCheck::results.size(), COUNT); for (size_t i = 0; i < COUNT; ++i) { r = r && UniqueCheck::results.count(i) == 1; } BOOST_REQUIRE(r); } tg.interrupt_all(); tg.join_all(); } // Test that blocks which might allocate lots of memory free their memory // aggressively. // // This test attempts to catch a pathological case where by lazily freeing // checks might mean leaving a check un-swapped out, and decreasing by 1 each // time could leave the data hanging across a sequence of blocks. BOOST_AUTO_TEST_CASE(test_CheckQueue_Memory) { auto queue = std::make_unique(QUEUE_BATCH_SIZE); boost::thread_group tg; for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) { tg.create_thread([&] { queue->Thread(); }); } for (size_t i = 0; i < 1000; ++i) { size_t total = i; { CCheckQueueControl control(queue.get()); while (total) { size_t r = InsecureRandRange(10); std::vector vChecks; for (size_t k = 0; k < r && total; k++) { total--; // Each iteration leaves data at the front, back, and middle // to catch any sort of deallocation failure vChecks.emplace_back(total == 0 || total == i || total == i / 2); } control.Add(vChecks); } } BOOST_REQUIRE_EQUAL(MemoryCheck::fake_allocated_memory, 0U); } tg.interrupt_all(); tg.join_all(); } // Test that a new verification cannot occur until all checks // have been destructed BOOST_AUTO_TEST_CASE(test_CheckQueue_FrozenCleanup) { auto queue = std::make_unique(QUEUE_BATCH_SIZE); boost::thread_group tg; bool fails = false; for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) { tg.create_thread([&] { queue->Thread(); }); } std::thread t0([&]() { CCheckQueueControl control(queue.get()); std::vector vChecks(1); // Freezing can't be the default initialized behavior given how the // queue // swaps in default initialized Checks (otherwise freezing destructor // would get called twice). vChecks[0].should_freeze = true; control.Add(vChecks); // Hangs here bool waitResult = control.Wait(); assert(waitResult); }); { std::unique_lock l(FrozenCleanupCheck::m); // Wait until the queue has finished all jobs and frozen FrozenCleanupCheck::cv.wait( l, []() { return FrozenCleanupCheck::nFrozen == 1; }); } // Try to get control of the queue a bunch of times for (auto x = 0; x < 100 && !fails; ++x) { fails = queue->ControlMutex.try_lock(); } { // Unfreeze (we need lock n case of spurious wakeup) std::unique_lock l(FrozenCleanupCheck::m); FrozenCleanupCheck::nFrozen = 0; } // Awaken frozen destructor FrozenCleanupCheck::cv.notify_one(); // Wait for control to finish t0.join(); tg.interrupt_all(); tg.join_all(); BOOST_REQUIRE(!fails); } /** Test that CCheckQueueControl is threadsafe */ BOOST_AUTO_TEST_CASE(test_CheckQueueControl_Locks) { auto queue = std::make_unique(QUEUE_BATCH_SIZE); { boost::thread_group tg; std::atomic nThreads{0}; std::atomic fails{0}; for (size_t i = 0; i < 3; ++i) { tg.create_thread([&] { CCheckQueueControl control(queue.get()); // While sleeping, no other thread should execute to this point auto observed = ++nThreads; UninterruptibleSleep(std::chrono::milliseconds{10}); fails += observed != nThreads; }); } tg.join_all(); BOOST_REQUIRE_EQUAL(fails, 0); } { boost::thread_group tg; std::mutex m; std::condition_variable cv; bool has_lock{false}; bool has_tried{false}; bool done{false}; bool done_ack{false}; { std::unique_lock l(m); tg.create_thread([&] { CCheckQueueControl control(queue.get()); std::unique_lock ll(m); has_lock = true; cv.notify_one(); cv.wait(ll, [&] { return has_tried; }); done = true; cv.notify_one(); // Wait until the done is acknowledged // cv.wait(ll, [&] { return done_ack; }); }); // Wait for thread to get the lock cv.wait(l, [&]() { return has_lock; }); bool fails = false; for (auto x = 0; x < 100 && !fails; ++x) { fails = queue->ControlMutex.try_lock(); } has_tried = true; cv.notify_one(); cv.wait(l, [&]() { return done; }); // Acknowledge the done done_ack = true; cv.notify_one(); BOOST_REQUIRE(!fails); } tg.join_all(); } } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/validation.h b/src/validation.h index 6be8f5a62..1b75dc3dd 100644 --- a/src/validation.h +++ b/src/validation.h @@ -1,1120 +1,1119 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Copyright (c) 2017-2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_VALIDATION_H #define BITCOIN_VALIDATION_H #if defined(HAVE_CONFIG_H) #include #endif #include #include #include #include #include #include #include #include #include // For CMessageHeader::MessageMagic #include