diff --git a/src/cuckoocache.h b/src/cuckoocache.h
index 9bdf1d194..8ce1800dc 100644
--- a/src/cuckoocache.h
+++ b/src/cuckoocache.h
@@ -1,576 +1,577 @@
 // Copyright (c) 2016 Jeremy Rubin
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_CUCKOOCACHE_H
 #define BITCOIN_CUCKOOCACHE_H
 
-#include <algorithm>
+#include <algorithm> // std::find
 #include <array>
 #include <atomic>
 #include <cmath>
 #include <cstring>
 #include <memory>
+#include <utility>
 #include <vector>
 
 /**
  * High-performance cache primitives.
  *
  * Summary:
  *
  * 1. @ref bit_packed_atomic_flags is bit-packed atomic flags for garbage
  * collection
  *
  * 2. @ref cache is a cache which is performant in memory usage and lookup
  * speed. It is lockfree for erase operations. Elements are lazily erased on the
  * next insert.
  */
 namespace CuckooCache {
 /**
  * @ref bit_packed_atomic_flags implements a container for garbage collection
  * flags that is only thread unsafe on calls to setup. This class bit-packs
  * collection flags for memory efficiency.
  *
  * All operations are `std::memory_order_relaxed` so external mechanisms must
  * ensure that writes and reads are properly synchronized.
  *
  * On setup(n), all bits up to `n` are marked as collected.
  *
  * Under the hood, because it is an 8-bit type, it makes sense to use a multiple
  * of 8 for setup, but it will be safe if that is not the case as well.
  */
 class bit_packed_atomic_flags {
     std::unique_ptr<std::atomic<uint8_t>[]> mem;
 
 public:
     /** No default constructor, as there must be some size. */
     bit_packed_atomic_flags() = delete;
 
     /**
      * bit_packed_atomic_flags constructor creates memory to sufficiently
      * keep track of garbage collection information for `size` entries.
      *
      * @param size the number of elements to allocate space for
      *
      * @post bit_set, bit_unset, and bit_is_set function properly forall x. x <
      * size
      * @post All calls to bit_is_set (without subsequent bit_unset) will return
      * true.
      */
     explicit bit_packed_atomic_flags(uint32_t size) {
         // pad out the size if needed
         size = (size + 7) / 8;
         mem.reset(new std::atomic<uint8_t>[size]);
         for (uint32_t i = 0; i < size; ++i) {
             mem[i].store(0xFF);
         }
     };
 
     /**
      * setup marks all entries and ensures that bit_packed_atomic_flags can
      * store at least `b` entries.
      *
      * @param b the number of elements to allocate space for
      * @post bit_set, bit_unset, and bit_is_set function properly forall x. x <
      * b
      * @post All calls to bit_is_set (without subsequent bit_unset) will return
      * true.
      */
     inline void setup(uint32_t b) {
         bit_packed_atomic_flags d(b);
         std::swap(mem, d.mem);
     }
 
     /**
      * bit_set sets an entry as discardable.
      *
      * @param s the index of the entry to bit_set
      * @post immediately subsequent call (assuming proper external memory
      * ordering) to bit_is_set(s) == true.
      */
     inline void bit_set(uint32_t s) {
         mem[s >> 3].fetch_or(1 << (s & 7), std::memory_order_relaxed);
     }
 
     /**
      * bit_unset marks an entry as something that should not be overwritten.
      *
      * @param s the index of the entry to bit_unset
      * @post immediately subsequent call (assuming proper external memory
      * ordering) to bit_is_set(s) == false.
      */
     inline void bit_unset(uint32_t s) {
         mem[s >> 3].fetch_and(~(1 << (s & 7)), std::memory_order_relaxed);
     }
 
     /**
      * bit_is_set queries the table for discardability at `s`.
      *
      * @param s the index of the entry to read
      * @returns true if the bit at index `s` was set, false otherwise
      * */
     inline bool bit_is_set(uint32_t s) const {
         return (1 << (s & 7)) & mem[s >> 3].load(std::memory_order_relaxed);
     }
 };
 
 /**
  * @ref cache implements a cache with properties similar to a cuckoo-set.
  *
  *  The cache is able to hold up to `(~(uint32_t)0) - 1` elements.
  *
  *  Read Operations:
  *      - contains() for `erase=false`
  *      - get() for `erase=false`
  *
  *  Read+Erase Operations:
  *      - contains() for `erase=true`
  *      - get() for `erase=true`
  *
  *  Erase Operations:
  *      - allow_erase()
  *
  *  Write Operations:
  *      - setup()
  *      - setup_bytes()
  *      - insert()
  *      - please_keep()
  *
  *  Synchronization Free Operations:
  *      - invalid()
  *      - compute_hashes()
  *
  * User Must Guarantee:
  *
  * 1. Write requires synchronized access (e.g. a lock)
  * 2. Read requires no concurrent Write, synchronized with last insert.
  * 3. Erase requires no concurrent Write, synchronized with last insert.
  * 4. An Erase caller must release all memory before allowing a new Writer.
  *
  *
  * Note on function names:
  *   - The name "allow_erase" is used because the real discard happens later.
  *   - The name "please_keep" is used because elements may be erased anyways on
  * insert.
  *
  * @tparam Element should be a movable and copyable type
  * @tparam Hash should be a function/callable which takes a template parameter
  * hash_select and an Element and extracts a hash from it. Should return
  * high-entropy uint32_t hashes for `Hash h; h<0>(e) ... h<7>(e)`.
  */
 template <typename Element, typename Hash> class cache {
 private:
     /** table stores all the elements */
     std::vector<Element> table;
 
     /** size stores the total available slots in the hash table */
     uint32_t size;
 
     /**
      * The bit_packed_atomic_flags array is marked mutable because we want
      * garbage collection to be allowed to occur from const methods.
      */
     mutable bit_packed_atomic_flags collection_flags;
 
     /**
      * epoch_flags tracks how recently an element was inserted into the cache.
      * true denotes recent, false denotes not-recent. See insert() method for
      * full semantics.
      */
     mutable std::vector<bool> epoch_flags;
 
     /**
      * epoch_heuristic_counter is used to determine when an epoch might be aged
      * & an expensive scan should be done. epoch_heuristic_counter is
      * decremented on insert and reset to the new number of inserts which would
      * cause the epoch to reach epoch_size when it reaches zero.
      */
     uint32_t epoch_heuristic_counter;
 
     /**
      * epoch_size is set to be the number of elements supposed to be in a epoch.
      * When the number of non-erased elements in an epoch exceeds epoch_size, a
      * new epoch should be started and all current entries demoted. epoch_size
      * is set to be 45% of size because we want to keep load around 90%, and we
      * support 3 epochs at once -- one "dead" which has been erased, one "dying"
      * which has been marked to be erased next, and one "living" which new
      * inserts add to.
      */
     uint32_t epoch_size;
 
     /**
      * depth_limit determines how many elements insert should try to replace.
      * Should be set to log2(n).
      */
     uint8_t depth_limit;
 
     /**
      * hash_function is a const instance of the hash function. It cannot be
      * static or initialized at call time as it may have internal state (such as
      * a nonce).
      */
     const Hash hash_function;
 
     /**
      * Key is the key type for this map or set.
      */
     using Key = typename Element::KeyType;
 
     /**
      * compute_hashes is convenience for not having to write out this expression
      * everywhere we use the hash values of an Element.
      *
      * We need to map the 32-bit input hash onto a hash bucket in a range [0,
      * size) in a manner which preserves as much of the hash's uniformity as
      * possible. Ideally this would be done by bitmasking but the size is
      * usually not a power of two.
      *
      * The naive approach would be to use a mod -- which isn't perfectly uniform
      * but so long as the hash is much larger than size it is not that bad.
      * Unfortunately, mod/division is fairly slow on ordinary microprocessors
      * (e.g. 90-ish cycles on haswell, ARM doesn't even have an instruction for
      * it.); when the divisor is a constant the compiler will do clever tricks
      * to turn it into a multiply+add+shift, but size is a run-time value so the
      * compiler can't do that here.
      *
      * One option would be to implement the same trick the compiler uses and
      * compute the constants for exact division based on the size, as described
      * in "{N}-bit Unsigned Division via {N}-bit Multiply-Add" by Arch D.
      * Robison in 2005. But that code is somewhat complicated and the result is
      * still slower than other options:
      *
      * Instead we treat the 32-bit random number as a Q32 fixed-point number in
      * the range [0, 1) and simply multiply it by the size. Then we just shift
      * the result down by 32-bits to get our bucket number. The result has
      * non-uniformity the same as a mod, but it is much faster to compute. More
      * about this technique can be found at
      *  http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
      * .
      *
      * The resulting non-uniformity is also more equally distributed which would
      * be advantageous for something like linear probing, though it shouldn't
      * matter one way or the other for a cuckoo table.
      *
      * The primary disadvantage of this approach is increased intermediate
      * precision is required but for a 32-bit random number we only need the
      * high 32 bits of a 32*32->64 multiply, which means the operation is
      * reasonably fast even on a typical 32-bit processor.
      *
      * @param e The element whose hashes will be returned
      * @returns Deterministic hashes derived from `e` uniformly mapped onto the
      * range [0, size)
      */
     inline std::array<uint32_t, 8> compute_hashes(const Key &k) const {
         return {{uint32_t(uint64_t(hash_function.template operator()<0>(k)) *
                               uint64_t(size) >>
                           32),
                  uint32_t(uint64_t(hash_function.template operator()<1>(k)) *
                               uint64_t(size) >>
                           32),
                  uint32_t(uint64_t(hash_function.template operator()<2>(k)) *
                               uint64_t(size) >>
                           32),
                  uint32_t(uint64_t(hash_function.template operator()<3>(k)) *
                               uint64_t(size) >>
                           32),
                  uint32_t(uint64_t(hash_function.template operator()<4>(k)) *
                               uint64_t(size) >>
                           32),
                  uint32_t(uint64_t(hash_function.template operator()<5>(k)) *
                               uint64_t(size) >>
                           32),
                  uint32_t(uint64_t(hash_function.template operator()<6>(k)) *
                               uint64_t(size) >>
                           32),
                  uint32_t(uint64_t(hash_function.template operator()<7>(k)) *
                               uint64_t(size) >>
                           32)}};
     }
 
     /**
      * invalid returns a special index that can never be inserted to
      * @returns the special constexpr index that can never be inserted to
      */
     constexpr uint32_t invalid() const { return ~uint32_t(0); }
 
     /**
      * allow_erase marks the element at index `n` as discardable. Threadsafe
      * without any concurrent insert.
      * @param n the index to allow erasure of
      */
     inline void allow_erase(uint32_t n) const { collection_flags.bit_set(n); }
 
     /**
      * please_keep marks the element at index `n` as an entry that should be
      * kept. Threadsafe without any concurrent insert.
      * @param n the index to prioritize keeping
      */
     inline void please_keep(uint32_t n) const { collection_flags.bit_unset(n); }
 
     /**
      * epoch_check handles the changing of epochs for elements stored in the
      * cache. epoch_check should be run before every insert.
      *
      * First, epoch_check decrements and checks the cheap heuristic, and then
      * does a more expensive scan if the cheap heuristic runs out. If the
      * expensive scan succeeds, the epochs are aged and old elements are
      * allow_erased. The cheap heuristic is reset to retrigger after the worst
      * case growth of the current epoch's elements would exceed the epoch_size.
      */
     void epoch_check() {
         if (epoch_heuristic_counter != 0) {
             --epoch_heuristic_counter;
             return;
         }
         // count the number of elements from the latest epoch which have not
         // been erased.
         uint32_t epoch_unused_count = 0;
         for (uint32_t i = 0; i < size; ++i) {
             epoch_unused_count +=
                 epoch_flags[i] && !collection_flags.bit_is_set(i);
         }
         // If there are more non-deleted entries in the current epoch than the
         // epoch size, then allow_erase on all elements in the old epoch (marked
         // false) and move all elements in the current epoch to the old epoch
         // but do not call allow_erase on their indices.
         if (epoch_unused_count >= epoch_size) {
             for (uint32_t i = 0; i < size; ++i) {
                 if (epoch_flags[i]) {
                     epoch_flags[i] = false;
                 } else {
                     allow_erase(i);
                 }
             }
             epoch_heuristic_counter = epoch_size;
         } else {
             // reset the epoch_heuristic_counter to next do a scan when worst
             // case behavior (no intermittent erases) would exceed epoch size,
             // with a reasonable minimum scan size. Ordinarily, we would have to
             // sanity check std::min(epoch_size, epoch_unused_count), but we
             // already know that `epoch_unused_count < epoch_size` in this
             // branch
             epoch_heuristic_counter = std::max(
                 1u, std::max(epoch_size / 16, epoch_size - epoch_unused_count));
         }
     }
 
 public:
     /**
      * You must always construct a cache with some elements via a subsequent
      * call to setup or setup_bytes, otherwise operations may segfault.
      */
     cache()
         : table(), size(), collection_flags(0), epoch_flags(),
           epoch_heuristic_counter(), epoch_size(), depth_limit(0),
           hash_function() {}
 
     /**
      * setup initializes the container to store no more than new_size
      * elements.
      *
      * setup should only be called once.
      *
      * @param new_size the desired number of elements to store
      * @returns the maximum number of elements storable
      */
     uint32_t setup(uint32_t new_size) {
         // depth_limit must be at least one otherwise errors can occur.
         depth_limit = static_cast<uint8_t>(
             std::log2(static_cast<float>(std::max((uint32_t)2, new_size))));
         size = std::max<uint32_t>(2, new_size);
         table.resize(size);
         collection_flags.setup(size);
         epoch_flags.resize(size);
         // Set to 45% as described above
         epoch_size = std::max((uint32_t)1, (45 * size) / 100);
         // Initially set to wait for a whole epoch
         epoch_heuristic_counter = epoch_size;
         return size;
     }
 
     /**
      * setup_bytes is a convenience function which accounts for internal memory
      * usage when deciding how many elements to store. It isn't perfect because
      * it doesn't account for any overhead (struct size, MallocUsage, collection
      * and epoch flags). This was done to simplify selecting a power of two
      * size. In the expected use case, an extra two bits per entry should be
      * negligible compared to the size of the elements.
      *
      * @param bytes the approximate number of bytes to use for this data
      * structure
      * @returns the maximum number of elements storable (see setup()
      * documentation for more detail)
      */
     uint32_t setup_bytes(size_t bytes) {
         return setup(bytes / sizeof(Element));
     }
 
     /**
      * insert loops at most depth_limit times trying to insert a hash at various
      * locations in the table via a variant of the Cuckoo Algorithm with eight
      * hash locations.
      *
      * It drops the last tried element if it runs out of depth before
      * encountering an open slot.
      *
      * Thus:
      *
      * ```
      * insert(x);
      * return contains(x, false);
      * ```
      *
      * is not guaranteed to return true.
      *
      * @param e the element to insert
      * @param weither to replace if an existing element with the same key is
      * found.
      * @post one of the following: All previously inserted elements and e are
      * now in the table, one previously inserted element is evicted from the
      * table, the entry attempted to be inserted is evicted. If replace is true
      * and a matching element already exists, it is updated accordingly.
      */
     inline void insert(Element e, bool replace = false) {
         epoch_check();
         uint32_t last_loc = invalid();
         bool last_epoch = true;
         std::array<uint32_t, 8> locs = compute_hashes(e.getKey());
         // Make sure we have not already inserted this element.
         // If we have, make sure that it does not get deleted.
         for (const uint32_t loc : locs) {
             if (table[loc].getKey() == e.getKey()) {
                 if (replace) {
                     table[loc] = std::move(e);
                 }
                 please_keep(loc);
                 epoch_flags[loc] = last_epoch;
                 return;
             }
         }
         for (uint8_t depth = 0; depth < depth_limit; ++depth) {
             // First try to insert to an empty slot, if one exists
             for (const uint32_t loc : locs) {
                 if (!collection_flags.bit_is_set(loc)) {
                     continue;
                 }
                 table[loc] = std::move(e);
                 please_keep(loc);
                 epoch_flags[loc] = last_epoch;
                 return;
             }
             /**
              * Swap with the element at the location that was not the last one
              * looked at. Example:
              *
              * 1. On first iteration, last_loc == invalid(), find returns last,
              * so last_loc defaults to locs[0].
              * 2. On further iterations, where last_loc == locs[k], last_loc
              * will go to locs[k+1 % 8], i.e., next of the 8 indices wrapping
              * around to 0 if needed.
              *
              * This prevents moving the element we just put in.
              *
              * The swap is not a move -- we must switch onto the evicted element
              * for the next iteration.
              */
             last_loc =
                 locs[(1 + (std::find(locs.begin(), locs.end(), last_loc) -
                            locs.begin())) &
                      7];
             std::swap(table[last_loc], e);
             // Can't std::swap a std::vector<bool>::reference and a bool&.
             bool epoch = last_epoch;
             last_epoch = epoch_flags[last_loc];
             epoch_flags[last_loc] = epoch;
 
             // Recompute the locs -- unfortunately happens one too many times!
             locs = compute_hashes(e.getKey());
         }
     }
 
     /**
      * contains iterates through the hash locations for a given element and
      * checks to see if it is present.
      *
      * contains does not check garbage collected state (in other words, garbage
      * is only collected when the space is needed), so:
      *
      * ```
      * insert(x);
      * if (contains(x, true))
      *     return contains(x, false);
      * else
      *     return true;
      * ```
      *
      * executed on a single thread will always return true!
      *
      * This is a great property for re-org performance for example.
      *
      * contains returns a bool set true if the element was found.
      *
      * @param k the key to check
      * @param erase whether to attempt setting the garbage collect flag
      *
      * @post if erase is true and the element is found, then the garbage collect
      * flag is set
      * @returns true if the element is found, false otherwise
      */
     bool contains(const Key &k, const bool erase) const {
         return find(k, erase) != nullptr;
     }
 
     /**
      * get is almost identical to contains(), with the difference that it
      * obtains the found element (for Elements that contain key and value,
      * this has the effect of obtaining the found value).
      *
      * @param e the element to check
      * @param erase
      *
      * @post If the element is found, it is copied into e. If erase is true
      * and the element is found, then the garbage collect flag is set.
      * @returns true if the element is found, false otherwise
      */
     bool get(Element &e, const bool erase) const {
         if (const Element *eptr = find(e.getKey(), erase)) {
             e = *eptr;
             return true;
         }
 
         return false;
     }
 
 private:
     const Element *find(const Key &k, const bool erase) const {
         std::array<uint32_t, 8> locs = compute_hashes(k);
         for (const uint32_t loc : locs) {
             if (table[loc].getKey() == k) {
                 if (erase) {
                     allow_erase(loc);
                 }
                 return &table[loc];
             }
         }
         return nullptr;
     }
 };
 
 /**
  * Helper class used when we only want the cache to be a set rather than a map.
  */
 template <typename T> struct KeyOnly : public T {
     // For contains.
     using KeyType = T;
 
     // Ensure implicit conversion from T.
     KeyOnly() = default;
     KeyOnly(const T &x) : T(x) {}
 
     // Implement required features.
     const T &getKey() const { return *this; }
 };
 
 } // namespace CuckooCache
 
 #endif // BITCOIN_CUCKOOCACHE_H
diff --git a/src/prevector.h b/src/prevector.h
index 66c1d2878..b2a747a21 100644
--- a/src/prevector.h
+++ b/src/prevector.h
@@ -1,594 +1,595 @@
 // Copyright (c) 2015-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_PREVECTOR_H
 #define BITCOIN_PREVECTOR_H
 
 #include <algorithm>
 #include <cassert>
 #include <cstddef>
 #include <cstdint>
 #include <cstdlib>
 #include <cstring>
 #include <type_traits>
+#include <utility>
 
 /**
  * Implements a drop-in replacement for std::vector<T> which stores up to N
  * elements directly (without heap allocation). The types Size and Diff are used
  * to store element counts, and can be any unsigned + signed type.
  *
  * Storage layout is either:
  * - Direct allocation:
  *   - Size _size: the number of used elements (between 0 and N)
  *   - T direct[N]: an array of N elements of type T
  *     (only the first _size are initialized).
  * - Indirect allocation:
  *   - Size _size: the number of used elements plus N + 1
  *   - Size capacity: the number of allocated elements
  *   - T* indirect: a pointer to an array of capacity elements of type T
  *     (only the first _size are initialized).
  *
  * The data type T must be movable by memmove/realloc(). Once we switch to C++,
  * move constructors can be used instead.
  */
 template <unsigned int N, typename T, typename Size = uint32_t,
           typename Diff = int32_t>
 class prevector {
 public:
     typedef Size size_type;
     typedef Diff difference_type;
     typedef T value_type;
     typedef value_type &reference;
     typedef const value_type &const_reference;
     typedef value_type *pointer;
     typedef const value_type *const_pointer;
 
     class iterator {
         T *ptr;
 
     public:
         typedef Diff difference_type;
         typedef T value_type;
         typedef T *pointer;
         typedef T &reference;
         typedef std::random_access_iterator_tag iterator_category;
         iterator() : ptr(nullptr) {}
         iterator(T *ptr_) : ptr(ptr_) {}
         T &operator*() const { return *ptr; }
         T *operator->() const { return ptr; }
         T &operator[](size_type pos) { return ptr[pos]; }
         const T &operator[](size_type pos) const { return ptr[pos]; }
         iterator &operator++() {
             ptr++;
             return *this;
         }
         iterator &operator--() {
             ptr--;
             return *this;
         }
         iterator operator++(int) {
             iterator copy(*this);
             ++(*this);
             return copy;
         }
         iterator operator--(int) {
             iterator copy(*this);
             --(*this);
             return copy;
         }
         difference_type friend operator-(iterator a, iterator b) {
             return (&(*a) - &(*b));
         }
         iterator operator+(size_type n) { return iterator(ptr + n); }
         iterator &operator+=(size_type n) {
             ptr += n;
             return *this;
         }
         iterator operator-(size_type n) { return iterator(ptr - n); }
         iterator &operator-=(size_type n) {
             ptr -= n;
             return *this;
         }
         bool operator==(iterator x) const { return ptr == x.ptr; }
         bool operator!=(iterator x) const { return ptr != x.ptr; }
         bool operator>=(iterator x) const { return ptr >= x.ptr; }
         bool operator<=(iterator x) const { return ptr <= x.ptr; }
         bool operator>(iterator x) const { return ptr > x.ptr; }
         bool operator<(iterator x) const { return ptr < x.ptr; }
     };
 
     class reverse_iterator {
         T *ptr;
 
     public:
         typedef Diff difference_type;
         typedef T value_type;
         typedef T *pointer;
         typedef T &reference;
         typedef std::bidirectional_iterator_tag iterator_category;
         reverse_iterator() : ptr(nullptr) {}
         reverse_iterator(T *ptr_) : ptr(ptr_) {}
         T &operator*() { return *ptr; }
         const T &operator*() const { return *ptr; }
         T *operator->() { return ptr; }
         const T *operator->() const { return ptr; }
         reverse_iterator &operator--() {
             ptr++;
             return *this;
         }
         reverse_iterator &operator++() {
             ptr--;
             return *this;
         }
         reverse_iterator operator++(int) {
             reverse_iterator copy(*this);
             ++(*this);
             return copy;
         }
         reverse_iterator operator--(int) {
             reverse_iterator copy(*this);
             --(*this);
             return copy;
         }
         bool operator==(reverse_iterator x) const { return ptr == x.ptr; }
         bool operator!=(reverse_iterator x) const { return ptr != x.ptr; }
     };
 
     class const_iterator {
         const T *ptr;
 
     public:
         typedef Diff difference_type;
         typedef const T value_type;
         typedef const T *pointer;
         typedef const T &reference;
         typedef std::random_access_iterator_tag iterator_category;
         const_iterator() : ptr(nullptr) {}
         const_iterator(const T *ptr_) : ptr(ptr_) {}
         const_iterator(iterator x) : ptr(&(*x)) {}
         const T &operator*() const { return *ptr; }
         const T *operator->() const { return ptr; }
         const T &operator[](size_type pos) const { return ptr[pos]; }
         const_iterator &operator++() {
             ptr++;
             return *this;
         }
         const_iterator &operator--() {
             ptr--;
             return *this;
         }
         const_iterator operator++(int) {
             const_iterator copy(*this);
             ++(*this);
             return copy;
         }
         const_iterator operator--(int) {
             const_iterator copy(*this);
             --(*this);
             return copy;
         }
         difference_type friend operator-(const_iterator a, const_iterator b) {
             return (&(*a) - &(*b));
         }
         const_iterator operator+(size_type n) {
             return const_iterator(ptr + n);
         }
         const_iterator &operator+=(size_type n) {
             ptr += n;
             return *this;
         }
         const_iterator operator-(size_type n) {
             return const_iterator(ptr - n);
         }
         const_iterator &operator-=(size_type n) {
             ptr -= n;
             return *this;
         }
         bool operator==(const_iterator x) const { return ptr == x.ptr; }
         bool operator!=(const_iterator x) const { return ptr != x.ptr; }
         bool operator>=(const_iterator x) const { return ptr >= x.ptr; }
         bool operator<=(const_iterator x) const { return ptr <= x.ptr; }
         bool operator>(const_iterator x) const { return ptr > x.ptr; }
         bool operator<(const_iterator x) const { return ptr < x.ptr; }
     };
 
     class const_reverse_iterator {
         const T *ptr;
 
     public:
         typedef Diff difference_type;
         typedef const T value_type;
         typedef const T *pointer;
         typedef const T &reference;
         typedef std::bidirectional_iterator_tag iterator_category;
         const_reverse_iterator() : ptr(nullptr) {}
         const_reverse_iterator(const T *ptr_) : ptr(ptr_) {}
         const_reverse_iterator(reverse_iterator x) : ptr(&(*x)) {}
         const T &operator*() const { return *ptr; }
         const T *operator->() const { return ptr; }
         const_reverse_iterator &operator--() {
             ptr++;
             return *this;
         }
         const_reverse_iterator &operator++() {
             ptr--;
             return *this;
         }
         const_reverse_iterator operator++(int) {
             const_reverse_iterator copy(*this);
             ++(*this);
             return copy;
         }
         const_reverse_iterator operator--(int) {
             const_reverse_iterator copy(*this);
             --(*this);
             return copy;
         }
         bool operator==(const_reverse_iterator x) const { return ptr == x.ptr; }
         bool operator!=(const_reverse_iterator x) const { return ptr != x.ptr; }
     };
 
 private:
 #pragma pack(push, 1)
     union direct_or_indirect {
         char direct[sizeof(T) * N];
         struct {
             char *indirect;
             size_type capacity;
         };
     };
 #pragma pack(pop)
     alignas(char *) direct_or_indirect _union = {};
     size_type _size = 0;
 
     static_assert(alignof(char *) % alignof(size_type) == 0 &&
                       sizeof(char *) % alignof(size_type) == 0,
                   "size_type cannot have more restrictive alignment "
                   "requirement than pointer");
     static_assert(alignof(char *) % alignof(T) == 0,
                   "value_type T cannot have more restrictive alignment "
                   "requirement than pointer");
 
     T *direct_ptr(difference_type pos) {
         return reinterpret_cast<T *>(_union.direct) + pos;
     }
     const T *direct_ptr(difference_type pos) const {
         return reinterpret_cast<const T *>(_union.direct) + pos;
     }
     T *indirect_ptr(difference_type pos) {
         return reinterpret_cast<T *>(_union.indirect) + pos;
     }
     const T *indirect_ptr(difference_type pos) const {
         return reinterpret_cast<const T *>(_union.indirect) + pos;
     }
     bool is_direct() const { return _size <= N; }
 
     void change_capacity(size_type new_capacity) {
         if (new_capacity <= N) {
             if (!is_direct()) {
                 T *indirect = indirect_ptr(0);
                 T *src = indirect;
                 T *dst = direct_ptr(0);
                 memcpy(dst, src, size() * sizeof(T));
                 free(indirect);
                 _size -= N + 1;
             }
         } else {
             if (!is_direct()) {
                 // FIXME: Because malloc/realloc here won't call new_handler if
                 // allocation fails, assert success. These should instead use an
                 // allocator or new/delete so that handlers are called as
                 // necessary, but performance would be slightly degraded by
                 // doing so.
                 _union.indirect = static_cast<char *>(realloc(
                     _union.indirect, ((size_t)sizeof(T)) * new_capacity));
                 assert(_union.indirect);
                 _union.capacity = new_capacity;
             } else {
                 char *new_indirect = static_cast<char *>(
                     malloc(((size_t)sizeof(T)) * new_capacity));
                 assert(new_indirect);
                 T *src = direct_ptr(0);
                 T *dst = reinterpret_cast<T *>(new_indirect);
                 memcpy(dst, src, size() * sizeof(T));
                 _union.indirect = new_indirect;
                 _union.capacity = new_capacity;
                 _size += N + 1;
             }
         }
     }
 
     T *item_ptr(difference_type pos) {
         return is_direct() ? direct_ptr(pos) : indirect_ptr(pos);
     }
     const T *item_ptr(difference_type pos) const {
         return is_direct() ? direct_ptr(pos) : indirect_ptr(pos);
     }
 
     void fill(T *dst, ptrdiff_t count, const T &value = T{}) {
         std::fill_n(dst, count, value);
     }
 
     template <typename InputIterator>
     void fill(T *dst, InputIterator first, InputIterator last) {
         while (first != last) {
             new (static_cast<void *>(dst)) T(*first);
             ++dst;
             ++first;
         }
     }
 
 public:
     void assign(size_type n, const T &val) {
         clear();
         if (capacity() < n) {
             change_capacity(n);
         }
         _size += n;
         fill(item_ptr(0), n, val);
     }
 
     template <typename InputIterator>
     void assign(InputIterator first, InputIterator last) {
         size_type n = last - first;
         clear();
         if (capacity() < n) {
             change_capacity(n);
         }
         _size += n;
         fill(item_ptr(0), first, last);
     }
 
     prevector() {}
 
     explicit prevector(size_type n) { resize(n); }
 
     explicit prevector(size_type n, const T &val) {
         change_capacity(n);
         _size += n;
         fill(item_ptr(0), n, val);
     }
 
     template <typename InputIterator>
     prevector(InputIterator first, InputIterator last) {
         size_type n = last - first;
         change_capacity(n);
         _size += n;
         fill(item_ptr(0), first, last);
     }
 
     prevector(const prevector<N, T, Size, Diff> &other) {
         size_type n = other.size();
         change_capacity(n);
         _size += n;
         fill(item_ptr(0), other.begin(), other.end());
     }
 
     prevector(prevector<N, T, Size, Diff> &&other) { swap(other); }
 
     prevector &operator=(const prevector<N, T, Size, Diff> &other) {
         if (&other == this) {
             return *this;
         }
         assign(other.begin(), other.end());
         return *this;
     }
 
     prevector &operator=(prevector<N, T, Size, Diff> &&other) {
         swap(other);
         return *this;
     }
 
     size_type size() const { return is_direct() ? _size : _size - N - 1; }
 
     bool empty() const { return size() == 0; }
 
     iterator begin() { return iterator(item_ptr(0)); }
     const_iterator begin() const { return const_iterator(item_ptr(0)); }
     iterator end() { return iterator(item_ptr(size())); }
     const_iterator end() const { return const_iterator(item_ptr(size())); }
 
     reverse_iterator rbegin() { return reverse_iterator(item_ptr(size() - 1)); }
     const_reverse_iterator rbegin() const {
         return const_reverse_iterator(item_ptr(size() - 1));
     }
     reverse_iterator rend() { return reverse_iterator(item_ptr(-1)); }
     const_reverse_iterator rend() const {
         return const_reverse_iterator(item_ptr(-1));
     }
 
     size_t capacity() const {
         if (is_direct()) {
             return N;
         } else {
             return _union.capacity;
         }
     }
 
     T &operator[](size_type pos) { return *item_ptr(pos); }
 
     const T &operator[](size_type pos) const { return *item_ptr(pos); }
 
     void resize(size_type new_size) {
         size_type cur_size = size();
         if (cur_size == new_size) {
             return;
         }
         if (cur_size > new_size) {
             erase(item_ptr(new_size), end());
             return;
         }
         if (new_size > capacity()) {
             change_capacity(new_size);
         }
         ptrdiff_t increase = new_size - cur_size;
         fill(item_ptr(cur_size), increase);
         _size += increase;
     }
 
     void reserve(size_type new_capacity) {
         if (new_capacity > capacity()) {
             change_capacity(new_capacity);
         }
     }
 
     void shrink_to_fit() { change_capacity(size()); }
 
     void clear() { resize(0); }
 
     iterator insert(iterator pos, const T &value) {
         size_type p = pos - begin();
         size_type new_size = size() + 1;
         if (capacity() < new_size) {
             change_capacity(new_size + (new_size >> 1));
         }
         T *ptr = item_ptr(p);
         memmove(ptr + 1, ptr, (size() - p) * sizeof(T));
         _size++;
         new (static_cast<void *>(ptr)) T(value);
         return iterator(ptr);
     }
 
     void insert(iterator pos, size_type count, const T &value) {
         size_type p = pos - begin();
         size_type new_size = size() + count;
         if (capacity() < new_size) {
             change_capacity(new_size + (new_size >> 1));
         }
         T *ptr = item_ptr(p);
         memmove(ptr + count, ptr, (size() - p) * sizeof(T));
         _size += count;
         fill(item_ptr(p), count, value);
     }
 
     template <typename InputIterator>
     void insert(iterator pos, InputIterator first, InputIterator last) {
         size_type p = pos - begin();
         difference_type count = last - first;
         size_type new_size = size() + count;
         if (capacity() < new_size) {
             change_capacity(new_size + (new_size >> 1));
         }
         T *ptr = item_ptr(p);
         memmove(ptr + count, ptr, (size() - p) * sizeof(T));
         _size += count;
         fill(ptr, first, last);
     }
 
     iterator erase(iterator pos) { return erase(pos, pos + 1); }
 
     iterator erase(iterator first, iterator last) {
         // Erase is not allowed to the change the object's capacity. That means
         // that when starting with an indirectly allocated prevector with
         // size and capacity > N, the result may be a still indirectly allocated
         // prevector with size <= N and capacity > N. A shrink_to_fit() call is
         // necessary to switch to the (more efficient) directly allocated
         // representation (with capacity N and size <= N).
         iterator p = first;
         char *endp = (char *)&(*end());
         if (!std::is_trivially_destructible<T>::value) {
             while (p != last) {
                 (*p).~T();
                 _size--;
                 ++p;
             }
         } else {
             _size -= last - p;
         }
         memmove(&(*first), &(*last), endp - ((char *)(&(*last))));
         return first;
     }
 
     void push_back(const T &value) {
         size_type new_size = size() + 1;
         if (capacity() < new_size) {
             change_capacity(new_size + (new_size >> 1));
         }
         new (item_ptr(size())) T(value);
         _size++;
     }
 
     void pop_back() { erase(end() - 1, end()); }
 
     T &front() { return *item_ptr(0); }
 
     const T &front() const { return *item_ptr(0); }
 
     T &back() { return *item_ptr(size() - 1); }
 
     const T &back() const { return *item_ptr(size() - 1); }
 
     void swap(prevector<N, T, Size, Diff> &other) {
         std::swap(_union, other._union);
         std::swap(_size, other._size);
     }
 
     ~prevector() {
         if (!std::is_trivially_destructible<T>::value) {
             clear();
         }
         if (!is_direct()) {
             free(_union.indirect);
             _union.indirect = nullptr;
         }
     }
 
     bool operator==(const prevector<N, T, Size, Diff> &other) const {
         if (other.size() != size()) {
             return false;
         }
         const_iterator b1 = begin();
         const_iterator b2 = other.begin();
         const_iterator e1 = end();
         while (b1 != e1) {
             if ((*b1) != (*b2)) {
                 return false;
             }
             ++b1;
             ++b2;
         }
         return true;
     }
 
     bool operator!=(const prevector<N, T, Size, Diff> &other) const {
         return !(*this == other);
     }
 
     bool operator<(const prevector<N, T, Size, Diff> &other) const {
         if (size() < other.size()) {
             return true;
         }
         if (size() > other.size()) {
             return false;
         }
         const_iterator b1 = begin();
         const_iterator b2 = other.begin();
         const_iterator e1 = end();
         while (b1 != e1) {
             if ((*b1) < (*b2)) {
                 return true;
             }
             if ((*b2) < (*b1)) {
                 return false;
             }
             ++b1;
             ++b2;
         }
         return false;
     }
 
     size_t allocated_memory() const {
         if (is_direct()) {
             return 0;
         } else {
             return ((size_t)(sizeof(T))) * _union.capacity;
         }
     }
 
     value_type *data() { return item_ptr(0); }
 
     const value_type *data() const { return item_ptr(0); }
 };
 
 #endif // BITCOIN_PREVECTOR_H
diff --git a/src/qt/bantablemodel.cpp b/src/qt/bantablemodel.cpp
index b1c9ec7a2..21a198670 100644
--- a/src/qt/bantablemodel.cpp
+++ b/src/qt/bantablemodel.cpp
@@ -1,167 +1,167 @@
 // Copyright (c) 2011-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <qt/bantablemodel.h>
 
 #include <interfaces/node.h>
 #include <net_types.h> // For banmap_t
 #include <qt/clientmodel.h>
 
-#include <algorithm>
+#include <utility>
 
 #include <QDebug>
 #include <QList>
 
 bool BannedNodeLessThan::operator()(const CCombinedBan &left,
                                     const CCombinedBan &right) const {
     const CCombinedBan *pLeft = &left;
     const CCombinedBan *pRight = &right;
 
     if (order == Qt::DescendingOrder) {
         std::swap(pLeft, pRight);
     }
 
     switch (column) {
         case BanTableModel::Address:
             return pLeft->subnet.ToString().compare(pRight->subnet.ToString()) <
                    0;
         case BanTableModel::Bantime:
             return pLeft->banEntry.nBanUntil < pRight->banEntry.nBanUntil;
     }
 
     return false;
 }
 
 // private implementation
 class BanTablePriv {
 public:
     /** Local cache of peer information */
     QList<CCombinedBan> cachedBanlist;
     /** Column to sort nodes by (default to unsorted) */
     int sortColumn{-1};
     /** Order (ascending or descending) to sort nodes by */
     Qt::SortOrder sortOrder;
 
     /** Pull a full list of banned nodes from CNode into our cache */
     void refreshBanlist(interfaces::Node &node) {
         banmap_t banMap;
         node.getBanned(banMap);
 
         cachedBanlist.clear();
         cachedBanlist.reserve(banMap.size());
         for (const auto &entry : banMap) {
             CCombinedBan banEntry;
             banEntry.subnet = entry.first;
             banEntry.banEntry = entry.second;
             cachedBanlist.append(banEntry);
         }
 
         if (sortColumn >= 0) {
             // sort cachedBanlist (use stable sort to prevent rows jumping
             // around unnecessarily)
             std::stable_sort(cachedBanlist.begin(), cachedBanlist.end(),
                              BannedNodeLessThan(sortColumn, sortOrder));
         }
     }
 
     int size() const { return cachedBanlist.size(); }
 
     CCombinedBan *index(int idx) {
         if (idx >= 0 && idx < cachedBanlist.size()) {
             return &cachedBanlist[idx];
         }
 
         return nullptr;
     }
 };
 
 BanTableModel::BanTableModel(interfaces::Node &node, ClientModel *parent)
     : QAbstractTableModel(parent), m_node(node), clientModel(parent) {
     columns << tr("IP/Netmask") << tr("Banned Until");
     priv.reset(new BanTablePriv());
 
     // load initial data
     refresh();
 }
 
 BanTableModel::~BanTableModel() {
     // Intentionally left empty
 }
 
 int BanTableModel::rowCount(const QModelIndex &parent) const {
     Q_UNUSED(parent);
     return priv->size();
 }
 
 int BanTableModel::columnCount(const QModelIndex &parent) const {
     Q_UNUSED(parent);
     return columns.length();
 }
 
 QVariant BanTableModel::data(const QModelIndex &index, int role) const {
     if (!index.isValid()) {
         return QVariant();
     }
 
     CCombinedBan *rec = static_cast<CCombinedBan *>(index.internalPointer());
 
     if (role == Qt::DisplayRole) {
         switch (index.column()) {
             case Address:
                 return QString::fromStdString(rec->subnet.ToString());
             case Bantime:
                 QDateTime date = QDateTime::fromMSecsSinceEpoch(0);
                 date = date.addSecs(rec->banEntry.nBanUntil);
                 return date.toString(Qt::SystemLocaleLongDate);
         }
     }
 
     return QVariant();
 }
 
 QVariant BanTableModel::headerData(int section, Qt::Orientation orientation,
                                    int role) const {
     if (orientation == Qt::Horizontal) {
         if (role == Qt::DisplayRole && section < columns.size()) {
             return columns[section];
         }
     }
     return QVariant();
 }
 
 Qt::ItemFlags BanTableModel::flags(const QModelIndex &index) const {
     if (!index.isValid()) {
         return Qt::NoItemFlags;
     }
 
     Qt::ItemFlags retval = Qt::ItemIsSelectable | Qt::ItemIsEnabled;
     return retval;
 }
 
 QModelIndex BanTableModel::index(int row, int column,
                                  const QModelIndex &parent) const {
     Q_UNUSED(parent);
     CCombinedBan *data = priv->index(row);
 
     if (data) {
         return createIndex(row, column, data);
     }
     return QModelIndex();
 }
 
 void BanTableModel::refresh() {
     Q_EMIT layoutAboutToBeChanged();
     priv->refreshBanlist(m_node);
     Q_EMIT layoutChanged();
 }
 
 void BanTableModel::sort(int column, Qt::SortOrder order) {
     priv->sortColumn = column;
     priv->sortOrder = order;
     refresh();
 }
 
 bool BanTableModel::shouldShow() {
     return priv->size() > 0;
 }
diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp
index 8ad013675..ed7f3e1e6 100644
--- a/src/qt/peertablemodel.cpp
+++ b/src/qt/peertablemodel.cpp
@@ -1,230 +1,230 @@
 // Copyright (c) 2011-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <qt/peertablemodel.h>
 
 #include <qt/clientmodel.h>
 #include <qt/guiconstants.h>
 #include <qt/guiutil.h>
 
 #include <interfaces/node.h>
 
-#include <algorithm>
+#include <utility>
 
 #include <QDebug>
 #include <QList>
 #include <QTimer>
 
 bool NodeLessThan::operator()(const CNodeCombinedStats &left,
                               const CNodeCombinedStats &right) const {
     const CNodeStats *pLeft = &(left.nodeStats);
     const CNodeStats *pRight = &(right.nodeStats);
 
     if (order == Qt::DescendingOrder) {
         std::swap(pLeft, pRight);
     }
 
     switch (column) {
         case PeerTableModel::NetNodeId:
             return pLeft->nodeid < pRight->nodeid;
         case PeerTableModel::Address:
             return pLeft->addrName.compare(pRight->addrName) < 0;
         case PeerTableModel::Subversion:
             return pLeft->cleanSubVer.compare(pRight->cleanSubVer) < 0;
         case PeerTableModel::Ping:
             return pLeft->m_min_ping_usec < pRight->m_min_ping_usec;
         case PeerTableModel::Sent:
             return pLeft->nSendBytes < pRight->nSendBytes;
         case PeerTableModel::Received:
             return pLeft->nRecvBytes < pRight->nRecvBytes;
     }
 
     return false;
 }
 
 // private implementation
 class PeerTablePriv {
 public:
     /** Local cache of peer information */
     QList<CNodeCombinedStats> cachedNodeStats;
     /** Column to sort nodes by (default to unsorted) */
     int sortColumn{-1};
     /** Order (ascending or descending) to sort nodes by */
     Qt::SortOrder sortOrder;
     /** Index of rows by node ID */
     std::map<NodeId, int> mapNodeRows;
 
     /** Pull a full list of peers from vNodes into our cache */
     void refreshPeers(interfaces::Node &node) {
         {
             cachedNodeStats.clear();
 
             interfaces::Node::NodesStats nodes_stats;
             node.getNodesStats(nodes_stats);
             cachedNodeStats.reserve(nodes_stats.size());
             for (const auto &node_stats : nodes_stats) {
                 CNodeCombinedStats stats;
                 stats.nodeStats = std::get<0>(node_stats);
                 stats.fNodeStateStatsAvailable = std::get<1>(node_stats);
                 stats.nodeStateStats = std::get<2>(node_stats);
                 cachedNodeStats.append(stats);
             }
         }
 
         if (sortColumn >= 0) {
             // sort cacheNodeStats (use stable sort to prevent rows jumping
             // around unnecessarily)
             std::stable_sort(cachedNodeStats.begin(), cachedNodeStats.end(),
                              NodeLessThan(sortColumn, sortOrder));
         }
 
         // build index map
         mapNodeRows.clear();
         int row = 0;
         for (const CNodeCombinedStats &stats : cachedNodeStats) {
             mapNodeRows.insert(
                 std::pair<NodeId, int>(stats.nodeStats.nodeid, row++));
         }
     }
 
     int size() const { return cachedNodeStats.size(); }
 
     CNodeCombinedStats *index(int idx) {
         if (idx >= 0 && idx < cachedNodeStats.size()) {
             return &cachedNodeStats[idx];
         }
 
         return nullptr;
     }
 };
 
 PeerTableModel::PeerTableModel(interfaces::Node &node, ClientModel *parent)
     : QAbstractTableModel(parent), m_node(node), clientModel(parent),
       timer(nullptr) {
     columns << tr("NodeId") << tr("Node/Service") << tr("Ping") << tr("Sent")
             << tr("Received") << tr("User Agent");
     priv.reset(new PeerTablePriv());
 
     // set up timer for auto refresh
     timer = new QTimer(this);
     connect(timer, &QTimer::timeout, this, &PeerTableModel::refresh);
     timer->setInterval(MODEL_UPDATE_DELAY);
 
     // load initial data
     refresh();
 }
 
 PeerTableModel::~PeerTableModel() {
     // Intentionally left empty
 }
 
 void PeerTableModel::startAutoRefresh() {
     timer->start();
 }
 
 void PeerTableModel::stopAutoRefresh() {
     timer->stop();
 }
 
 int PeerTableModel::rowCount(const QModelIndex &parent) const {
     Q_UNUSED(parent);
     return priv->size();
 }
 
 int PeerTableModel::columnCount(const QModelIndex &parent) const {
     Q_UNUSED(parent);
     return columns.length();
 }
 
 QVariant PeerTableModel::data(const QModelIndex &index, int role) const {
     if (!index.isValid()) {
         return QVariant();
     }
 
     CNodeCombinedStats *rec =
         static_cast<CNodeCombinedStats *>(index.internalPointer());
 
     if (role == Qt::DisplayRole) {
         switch (index.column()) {
             case NetNodeId:
                 return (qint64)rec->nodeStats.nodeid;
             case Address:
                 return QString::fromStdString(rec->nodeStats.addrName);
             case Subversion:
                 return QString::fromStdString(rec->nodeStats.cleanSubVer);
             case Ping:
                 return GUIUtil::formatPingTime(rec->nodeStats.m_min_ping_usec);
             case Sent:
                 return GUIUtil::formatBytes(rec->nodeStats.nSendBytes);
             case Received:
                 return GUIUtil::formatBytes(rec->nodeStats.nRecvBytes);
         }
     } else if (role == Qt::TextAlignmentRole) {
         switch (index.column()) {
             case Ping:
             case Sent:
             case Received:
                 return QVariant(Qt::AlignRight | Qt::AlignVCenter);
             default:
                 return QVariant();
         }
     }
 
     return QVariant();
 }
 
 QVariant PeerTableModel::headerData(int section, Qt::Orientation orientation,
                                     int role) const {
     if (orientation == Qt::Horizontal) {
         if (role == Qt::DisplayRole && section < columns.size()) {
             return columns[section];
         }
     }
     return QVariant();
 }
 
 Qt::ItemFlags PeerTableModel::flags(const QModelIndex &index) const {
     if (!index.isValid()) {
         return Qt::NoItemFlags;
     }
 
     Qt::ItemFlags retval = Qt::ItemIsSelectable | Qt::ItemIsEnabled;
     return retval;
 }
 
 QModelIndex PeerTableModel::index(int row, int column,
                                   const QModelIndex &parent) const {
     Q_UNUSED(parent);
     CNodeCombinedStats *data = priv->index(row);
 
     if (data) {
         return createIndex(row, column, data);
     }
     return QModelIndex();
 }
 
 const CNodeCombinedStats *PeerTableModel::getNodeStats(int idx) {
     return priv->index(idx);
 }
 
 void PeerTableModel::refresh() {
     Q_EMIT layoutAboutToBeChanged();
     priv->refreshPeers(m_node);
     Q_EMIT layoutChanged();
 }
 
 int PeerTableModel::getRowByNodeId(NodeId nodeid) {
     std::map<NodeId, int>::iterator it = priv->mapNodeRows.find(nodeid);
     if (it == priv->mapNodeRows.end()) {
         return -1;
     }
 
     return it->second;
 }
 
 void PeerTableModel::sort(int column, Qt::SortOrder order) {
     priv->sortColumn = column;
     priv->sortOrder = order;
     refresh();
 }
diff --git a/src/test/checkqueue_tests.cpp b/src/test/checkqueue_tests.cpp
index e439e5743..0db0b5fa1 100644
--- a/src/test/checkqueue_tests.cpp
+++ b/src/test/checkqueue_tests.cpp
@@ -1,421 +1,422 @@
 // Copyright (c) 2012-2019 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <checkqueue.h>
 
 #include <sync.h>
 #include <util/system.h>
 #include <util/time.h>
 
 #include <atomic>
 #include <condition_variable>
 #include <mutex>
 #include <test/util/setup_common.h>
 #include <thread>
 #include <vector>
 
 #include <boost/test/unit_test.hpp>
 
 #include <memory>
 #include <unordered_set>
+#include <utility>
 
 BOOST_FIXTURE_TEST_SUITE(checkqueue_tests, TestingSetup)
 
 static const unsigned int QUEUE_BATCH_SIZE = 128;
 static const int SCRIPT_CHECK_THREADS = 3;
 
 struct FakeCheck {
     bool operator()() { return true; }
     void swap(FakeCheck &x){};
 };
 
 struct FakeCheckCheckCompletion {
     static std::atomic<size_t> n_calls;
     bool operator()() {
         n_calls.fetch_add(1, std::memory_order_relaxed);
         return true;
     }
     void swap(FakeCheckCheckCompletion &x){};
 };
 
 struct FailingCheck {
     bool fails;
     FailingCheck(bool _fails) : fails(_fails){};
     FailingCheck() : fails(true){};
     bool operator()() { return !fails; }
     void swap(FailingCheck &x) { std::swap(fails, x.fails); };
 };
 
 struct UniqueCheck {
     static Mutex m;
     static std::unordered_multiset<size_t> results GUARDED_BY(m);
     size_t check_id;
     UniqueCheck(size_t check_id_in) : check_id(check_id_in){};
     UniqueCheck() : check_id(0){};
     bool operator()() {
         LOCK(m);
         results.insert(check_id);
         return true;
     }
     void swap(UniqueCheck &x) { std::swap(x.check_id, check_id); };
 };
 
 struct MemoryCheck {
     static std::atomic<size_t> fake_allocated_memory;
     bool b{false};
     bool operator()() { return true; }
     MemoryCheck(){};
     MemoryCheck(const MemoryCheck &x) {
         // We have to do this to make sure that destructor calls are paired
         //
         // Really, copy constructor should be deletable, but CCheckQueue breaks
         // if it is deleted because of internal push_back.
         fake_allocated_memory.fetch_add(b, std::memory_order_relaxed);
     };
     MemoryCheck(bool b_) : b(b_) {
         fake_allocated_memory.fetch_add(b, std::memory_order_relaxed);
     };
     ~MemoryCheck() {
         fake_allocated_memory.fetch_sub(b, std::memory_order_relaxed);
     };
     void swap(MemoryCheck &x) { std::swap(b, x.b); };
 };
 
 struct FrozenCleanupCheck {
     static std::atomic<uint64_t> nFrozen;
     static std::condition_variable cv;
     static std::mutex m;
     // Freezing can't be the default initialized behavior given how the queue
     // swaps in default initialized Checks.
     bool should_freeze{false};
     bool operator()() { return true; }
     FrozenCleanupCheck() {}
     ~FrozenCleanupCheck() {
         if (should_freeze) {
             std::unique_lock<std::mutex> l(m);
             nFrozen.store(1, std::memory_order_relaxed);
             cv.notify_one();
             cv.wait(
                 l, [] { return nFrozen.load(std::memory_order_relaxed) == 0; });
         }
     }
     void swap(FrozenCleanupCheck &x) {
         std::swap(should_freeze, x.should_freeze);
     };
 };
 
 // Static Allocations
 std::mutex FrozenCleanupCheck::m{};
 std::atomic<uint64_t> FrozenCleanupCheck::nFrozen{0};
 std::condition_variable FrozenCleanupCheck::cv{};
 Mutex UniqueCheck::m;
 std::unordered_multiset<size_t> UniqueCheck::results;
 std::atomic<size_t> FakeCheckCheckCompletion::n_calls{0};
 std::atomic<size_t> MemoryCheck::fake_allocated_memory{0};
 
 // Queue Typedefs
 typedef CCheckQueue<FakeCheckCheckCompletion> Correct_Queue;
 typedef CCheckQueue<FakeCheck> Standard_Queue;
 typedef CCheckQueue<FailingCheck> Failing_Queue;
 typedef CCheckQueue<UniqueCheck> Unique_Queue;
 typedef CCheckQueue<MemoryCheck> Memory_Queue;
 typedef CCheckQueue<FrozenCleanupCheck> FrozenCleanup_Queue;
 
 /** This test case checks that the CCheckQueue works properly
  * with each specified size_t Checks pushed.
  */
 static void Correct_Queue_range(std::vector<size_t> range) {
     auto small_queue = std::make_unique<Correct_Queue>(QUEUE_BATCH_SIZE);
     boost::thread_group tg;
     for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) {
         tg.create_thread([&] { small_queue->Thread(); });
     }
     // Make vChecks here to save on malloc (this test can be slow...)
     std::vector<FakeCheckCheckCompletion> vChecks;
     for (const size_t i : range) {
         size_t total = i;
         FakeCheckCheckCompletion::n_calls = 0;
         CCheckQueueControl<FakeCheckCheckCompletion> control(small_queue.get());
         while (total) {
             vChecks.resize(std::min(total, (size_t)InsecureRandRange(10)));
             total -= vChecks.size();
             control.Add(vChecks);
         }
         BOOST_REQUIRE(control.Wait());
         if (FakeCheckCheckCompletion::n_calls != i) {
             BOOST_REQUIRE_EQUAL(FakeCheckCheckCompletion::n_calls, i);
         }
     }
     tg.interrupt_all();
     tg.join_all();
 }
 
 /** Test that 0 checks is correct
  */
 BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_Zero) {
     std::vector<size_t> range;
     range.push_back((size_t)0);
     Correct_Queue_range(range);
 }
 /** Test that 1 check is correct
  */
 BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_One) {
     std::vector<size_t> range;
     range.push_back((size_t)1);
     Correct_Queue_range(range);
 }
 /** Test that MAX check is correct
  */
 BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_Max) {
     std::vector<size_t> range;
     range.push_back(100000);
     Correct_Queue_range(range);
 }
 /** Test that random numbers of checks are correct
  */
 BOOST_AUTO_TEST_CASE(test_CheckQueue_Correct_Random) {
     std::vector<size_t> range;
     range.reserve(100000 / 1000);
     for (size_t i = 2; i < 100000;
          i += std::max((size_t)1, (size_t)InsecureRandRange(std::min(
                                       (size_t)1000, ((size_t)100000) - i)))) {
         range.push_back(i);
     }
     Correct_Queue_range(range);
 }
 
 /** Test that failing checks are caught */
 BOOST_AUTO_TEST_CASE(test_CheckQueue_Catches_Failure) {
     auto fail_queue = std::make_unique<Failing_Queue>(QUEUE_BATCH_SIZE);
 
     boost::thread_group tg;
     for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) {
         tg.create_thread([&] { fail_queue->Thread(); });
     }
 
     for (size_t i = 0; i < 1001; ++i) {
         CCheckQueueControl<FailingCheck> control(fail_queue.get());
         size_t remaining = i;
         while (remaining) {
             size_t r = InsecureRandRange(10);
 
             std::vector<FailingCheck> vChecks;
             vChecks.reserve(r);
             for (size_t k = 0; k < r && remaining; k++, remaining--) {
                 vChecks.emplace_back(remaining == 1);
             }
             control.Add(vChecks);
         }
         bool success = control.Wait();
         if (i > 0) {
             BOOST_REQUIRE(!success);
         } else if (i == 0) {
             BOOST_REQUIRE(success);
         }
     }
     tg.interrupt_all();
     tg.join_all();
 }
 // Test that a block validation which fails does not interfere with
 // future blocks, ie, the bad state is cleared.
 BOOST_AUTO_TEST_CASE(test_CheckQueue_Recovers_From_Failure) {
     auto fail_queue = std::make_unique<Failing_Queue>(QUEUE_BATCH_SIZE);
     boost::thread_group tg;
     for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) {
         tg.create_thread([&] { fail_queue->Thread(); });
     }
 
     for (auto times = 0; times < 10; ++times) {
         for (const bool end_fails : {true, false}) {
             CCheckQueueControl<FailingCheck> control(fail_queue.get());
             {
                 std::vector<FailingCheck> vChecks;
                 vChecks.resize(100, false);
                 vChecks[99] = end_fails;
                 control.Add(vChecks);
             }
             bool r = control.Wait();
             BOOST_REQUIRE(r != end_fails);
         }
     }
     tg.interrupt_all();
     tg.join_all();
 }
 
 // Test that unique checks are actually all called individually, rather than
 // just one check being called repeatedly. Test that checks are not called
 // more than once as well
 BOOST_AUTO_TEST_CASE(test_CheckQueue_UniqueCheck) {
     auto queue = std::make_unique<Unique_Queue>(QUEUE_BATCH_SIZE);
     boost::thread_group tg;
     for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) {
         tg.create_thread([&] { queue->Thread(); });
     }
 
     size_t COUNT = 100000;
     size_t total = COUNT;
     {
         CCheckQueueControl<UniqueCheck> control(queue.get());
         while (total) {
             size_t r = InsecureRandRange(10);
             std::vector<UniqueCheck> vChecks;
             for (size_t k = 0; k < r && total; k++) {
                 vChecks.emplace_back(--total);
             }
             control.Add(vChecks);
         }
     }
     {
         LOCK(UniqueCheck::m);
         bool r = true;
         BOOST_REQUIRE_EQUAL(UniqueCheck::results.size(), COUNT);
         for (size_t i = 0; i < COUNT; ++i) {
             r = r && UniqueCheck::results.count(i) == 1;
         }
         BOOST_REQUIRE(r);
     }
     tg.interrupt_all();
     tg.join_all();
 }
 
 // Test that blocks which might allocate lots of memory free their memory
 // aggressively.
 //
 // This test attempts to catch a pathological case where by lazily freeing
 // checks might mean leaving a check un-swapped out, and decreasing by 1 each
 // time could leave the data hanging across a sequence of blocks.
 BOOST_AUTO_TEST_CASE(test_CheckQueue_Memory) {
     auto queue = std::make_unique<Memory_Queue>(QUEUE_BATCH_SIZE);
     boost::thread_group tg;
     for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) {
         tg.create_thread([&] { queue->Thread(); });
     }
     for (size_t i = 0; i < 1000; ++i) {
         size_t total = i;
         {
             CCheckQueueControl<MemoryCheck> control(queue.get());
             while (total) {
                 size_t r = InsecureRandRange(10);
                 std::vector<MemoryCheck> vChecks;
                 for (size_t k = 0; k < r && total; k++) {
                     total--;
                     // Each iteration leaves data at the front, back, and middle
                     // to catch any sort of deallocation failure
                     vChecks.emplace_back(total == 0 || total == i ||
                                          total == i / 2);
                 }
                 control.Add(vChecks);
             }
         }
         BOOST_REQUIRE_EQUAL(MemoryCheck::fake_allocated_memory, 0U);
     }
     tg.interrupt_all();
     tg.join_all();
 }
 
 // Test that a new verification cannot occur until all checks
 // have been destructed
 BOOST_AUTO_TEST_CASE(test_CheckQueue_FrozenCleanup) {
     auto queue = std::make_unique<FrozenCleanup_Queue>(QUEUE_BATCH_SIZE);
     boost::thread_group tg;
     bool fails = false;
     for (auto x = 0; x < SCRIPT_CHECK_THREADS; ++x) {
         tg.create_thread([&] { queue->Thread(); });
     }
     std::thread t0([&]() {
         CCheckQueueControl<FrozenCleanupCheck> control(queue.get());
         std::vector<FrozenCleanupCheck> vChecks(1);
         // Freezing can't be the default initialized behavior given how the
         // queue
         // swaps in default initialized Checks (otherwise freezing destructor
         // would get called twice).
         vChecks[0].should_freeze = true;
         control.Add(vChecks);
         // Hangs here
         bool waitResult = control.Wait();
         assert(waitResult);
     });
     {
         std::unique_lock<std::mutex> l(FrozenCleanupCheck::m);
         // Wait until the queue has finished all jobs and frozen
         FrozenCleanupCheck::cv.wait(
             l, []() { return FrozenCleanupCheck::nFrozen == 1; });
     }
     // Try to get control of the queue a bunch of times
     for (auto x = 0; x < 100 && !fails; ++x) {
         fails = queue->ControlMutex.try_lock();
     }
     {
         // Unfreeze (we need lock n case of spurious wakeup)
         std::unique_lock<std::mutex> l(FrozenCleanupCheck::m);
         FrozenCleanupCheck::nFrozen = 0;
     }
     // Awaken frozen destructor
     FrozenCleanupCheck::cv.notify_one();
     // Wait for control to finish
     t0.join();
     tg.interrupt_all();
     tg.join_all();
     BOOST_REQUIRE(!fails);
 }
 
 /** Test that CCheckQueueControl is threadsafe */
 BOOST_AUTO_TEST_CASE(test_CheckQueueControl_Locks) {
     auto queue = std::make_unique<Standard_Queue>(QUEUE_BATCH_SIZE);
     {
         boost::thread_group tg;
         std::atomic<int> nThreads{0};
         std::atomic<int> fails{0};
         for (size_t i = 0; i < 3; ++i) {
             tg.create_thread([&] {
                 CCheckQueueControl<FakeCheck> control(queue.get());
                 // While sleeping, no other thread should execute to this point
                 auto observed = ++nThreads;
                 UninterruptibleSleep(std::chrono::milliseconds{10});
                 fails += observed != nThreads;
             });
         }
         tg.join_all();
         BOOST_REQUIRE_EQUAL(fails, 0);
     }
     {
         boost::thread_group tg;
         std::mutex m;
         std::condition_variable cv;
         bool has_lock{false};
         bool has_tried{false};
         bool done{false};
         bool done_ack{false};
         {
             std::unique_lock<std::mutex> l(m);
             tg.create_thread([&] {
                 CCheckQueueControl<FakeCheck> control(queue.get());
                 std::unique_lock<std::mutex> ll(m);
                 has_lock = true;
                 cv.notify_one();
                 cv.wait(ll, [&] { return has_tried; });
                 done = true;
                 cv.notify_one();
                 // Wait until the done is acknowledged
                 //
                 cv.wait(ll, [&] { return done_ack; });
             });
             // Wait for thread to get the lock
             cv.wait(l, [&]() { return has_lock; });
             bool fails = false;
             for (auto x = 0; x < 100 && !fails; ++x) {
                 fails = queue->ControlMutex.try_lock();
             }
             has_tried = true;
             cv.notify_one();
             cv.wait(l, [&]() { return done; });
             // Acknowledge the done
             done_ack = true;
             cv.notify_one();
             BOOST_REQUIRE(!fails);
         }
         tg.join_all();
     }
 }
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/validation.h b/src/validation.h
index 6be8f5a62..1b75dc3dd 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -1,1120 +1,1119 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2019 The Bitcoin Core developers
 // Copyright (c) 2017-2020 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_VALIDATION_H
 #define BITCOIN_VALIDATION_H
 
 #if defined(HAVE_CONFIG_H)
 #include <config/bitcoin-config.h>
 #endif
 
 #include <amount.h>
 #include <blockfileinfo.h>
 #include <blockindexworkcomparator.h>
 #include <coins.h>
 #include <consensus/consensus.h>
 #include <disconnectresult.h>
 #include <flatfile.h>
 #include <fs.h>
 #include <protocol.h> // For CMessageHeader::MessageMagic
 #include <script/script_error.h>
 #include <script/script_metrics.h>
 #include <sync.h>
 #include <txdb.h>
 #include <txmempool.h> // For CTxMemPool::cs
 #include <versionbits.h>
 
-#include <algorithm>
 #include <atomic>
 #include <cstdint>
 #include <map>
 #include <memory>
 #include <set>
 #include <utility>
 #include <vector>
 
 class BlockValidationState;
 class CBlockIndex;
 class CBlockTreeDB;
 class CBlockUndo;
 class CChainParams;
 class CChain;
 class CConnman;
 class CInv;
 class Config;
 class CScriptCheck;
 class CTxMemPool;
 class CTxUndo;
 class DisconnectedBlockTransactions;
 class TxValidationState;
 
 struct ChainTxData;
 struct FlatFilePos;
 struct PrecomputedTransactionData;
 struct LockPoints;
 
 namespace Consensus {
 struct Params;
 }
 
 #define MIN_TRANSACTION_SIZE                                                   \
     (::GetSerializeSize(CTransaction(), PROTOCOL_VERSION))
 
 /** Default for -minrelaytxfee, minimum relay fee for transactions */
 static const Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 * SATOSHI);
 /** Default for -excessutxocharge for transactions transactions */
 static const Amount DEFAULT_UTXO_FEE = Amount::zero();
 /**
  * Default for -mempoolexpiry, expiration time for mempool transactions in
  * hours.
  */
 static const unsigned int DEFAULT_MEMPOOL_EXPIRY = 336;
 /** The maximum size of a blk?????.dat file (since 0.8) */
 static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
 
 /** Maximum number of dedicated script-checking threads allowed */
 static const int MAX_SCRIPTCHECK_THREADS = 15;
 /** -par default (number of script-checking threads, 0 = auto) */
 static const int DEFAULT_SCRIPTCHECK_THREADS = 0;
 /**
  * Number of blocks that can be requested at any given time from a single peer.
  */
 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
 /**
  * Timeout in seconds during which a peer must stall block download progress
  * before being disconnected.
  */
 static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
 /**
  * Number of headers sent in one getheaders result. We rely on the assumption
  * that if a peer sends less than this number, we reached its tip. Changing this
  * value is a protocol upgrade.
  */
 static const unsigned int MAX_HEADERS_RESULTS = 2000;
 /**
  * Maximum depth of blocks we're willing to serve as compact blocks to peers
  * when requested. For older blocks, a regular BLOCK response will be sent.
  */
 static const int MAX_CMPCTBLOCK_DEPTH = 5;
 /**
  * Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
  */
 static const int MAX_BLOCKTXN_DEPTH = 10;
 /**
  * Size of the "block download window": how far ahead of our current height do
  * we fetch ? Larger windows tolerate larger download speed differences between
  * peer, but increase the potential degree of disordering of blocks on disk
  * (which make reindexing and in the future perhaps pruning harder). We'll
  * probably want to make this a per-peer adaptive value at some point.
  */
 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
 /** Time to wait (in seconds) between writing blocks/block index to disk. */
 static const unsigned int DATABASE_WRITE_INTERVAL = 60 * 60;
 /** Time to wait (in seconds) between flushing chainstate to disk. */
 static const unsigned int DATABASE_FLUSH_INTERVAL = 24 * 60 * 60;
 /** Block download timeout base, expressed in millionths of the block interval
  * (i.e. 10 min) */
 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
 /**
  * Additional block download timeout per parallel downloading peer (i.e. 5 min)
  */
 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
 
 static const int64_t DEFAULT_MAX_TIP_AGE = 24 * 60 * 60;
 /**
  * Maximum age of our tip in seconds for us to be considered current for fee
  * estimation.
  */
 static const int64_t MAX_FEE_ESTIMATION_TIP_AGE = 3 * 60 * 60;
 
 static const bool DEFAULT_CHECKPOINTS_ENABLED = true;
 static const bool DEFAULT_TXINDEX = false;
 static const char *const DEFAULT_BLOCKFILTERINDEX = "0";
 static const unsigned int DEFAULT_BANSCORE_THRESHOLD = 100;
 
 /** Default for -persistmempool */
 static const bool DEFAULT_PERSIST_MEMPOOL = true;
 /** Default for using fee filter */
 static const bool DEFAULT_FEEFILTER = true;
 
 /**
  * Maximum number of headers to announce when relaying blocks with headers
  * message.
  */
 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
 
 /** Maximum number of unconnecting headers announcements before DoS score */
 static const int MAX_UNCONNECTING_HEADERS = 10;
 
 static const bool DEFAULT_PEERBLOOMFILTERS = true;
 
 /** Default for -stopatheight */
 static const int DEFAULT_STOPATHEIGHT = 0;
 /** Default for -maxreorgdepth */
 static const int DEFAULT_MAX_REORG_DEPTH = 10;
 /**
  * Default for -finalizationdelay
  * This is the minimum time between a block header reception and the block
  * finalization.
  * This value should be >> block propagation and validation time
  */
 static const int64_t DEFAULT_MIN_FINALIZATION_DELAY = 2 * 60 * 60;
 
 extern RecursiveMutex cs_main;
 extern CTxMemPool g_mempool;
 typedef std::unordered_map<BlockHash, CBlockIndex *, BlockHasher> BlockMap;
 extern Mutex g_best_block_mutex;
 extern std::condition_variable g_best_block_cv;
 extern uint256 g_best_block;
 extern std::atomic_bool fImporting;
 extern std::atomic_bool fReindex;
 extern bool fRequireStandard;
 extern bool fCheckBlockIndex;
 extern bool fCheckpointsEnabled;
 extern size_t nCoinCacheUsage;
 
 /**
  * A fee rate smaller than this is considered zero fee (for relaying, mining and
  * transaction creation)
  */
 extern CFeeRate minRelayTxFee;
 /**
  * If the tip is older than this (in seconds), the node is considered to be in
  * initial block download.
  */
 extern int64_t nMaxTipAge;
 
 /**
  * Block hash whose ancestors we will assume to have valid scripts without
  * checking them.
  */
 extern BlockHash hashAssumeValid;
 
 /**
  * Minimum work we will assume exists on some valid chain.
  */
 extern arith_uint256 nMinimumChainWork;
 
 /**
  * Best header we've seen so far (used for getheaders queries' starting points).
  */
 extern CBlockIndex *pindexBestHeader;
 
 /** Pruning-related variables and constants */
 /** True if any block files have ever been pruned. */
 extern bool fHavePruned;
 /** True if we're running in -prune mode. */
 extern bool fPruneMode;
 /** Number of MiB of block files that we're trying to stay below. */
 extern uint64_t nPruneTarget;
 /**
  * Block files containing a block-height within MIN_BLOCKS_TO_KEEP of
  * ::ChainActive().Tip() will not be pruned.
  */
 static const unsigned int MIN_BLOCKS_TO_KEEP = 288;
 /** Minimum blocks required to signal NODE_NETWORK_LIMITED */
 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
 
 static const signed int DEFAULT_CHECKBLOCKS = 6;
 static const unsigned int DEFAULT_CHECKLEVEL = 3;
 
 /**
  * Require that user allocate at least 550 MiB for block & undo files
  * (blk???.dat and rev???.dat)
  * At 1MB per block, 288 blocks = 288MB.
  * Add 15% for Undo data = 331MB
  * Add 20% for Orphan block rate = 397MB
  * We want the low water mark after pruning to be at least 397 MB and since we
  * prune in full block file chunks, we need the high water mark which triggers
  * the prune to be one 128MB block file + added 15% undo data = 147MB greater
  * for a total of 545MB
  * Setting the target to >= 550 MiB will make it likely we can respect the
  * target.
  */
 static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
 
 class BlockValidationOptions {
 private:
     uint64_t excessiveBlockSize;
     bool checkPoW : 1;
     bool checkMerkleRoot : 1;
 
 public:
     // Do full validation by default
     explicit BlockValidationOptions(const Config &config);
     explicit BlockValidationOptions(uint64_t _excessiveBlockSize,
                                     bool _checkPow = true,
                                     bool _checkMerkleRoot = true)
         : excessiveBlockSize(_excessiveBlockSize), checkPoW(_checkPow),
           checkMerkleRoot(_checkMerkleRoot) {}
 
     BlockValidationOptions withCheckPoW(bool _checkPoW = true) const {
         BlockValidationOptions ret = *this;
         ret.checkPoW = _checkPoW;
         return ret;
     }
 
     BlockValidationOptions
     withCheckMerkleRoot(bool _checkMerkleRoot = true) const {
         BlockValidationOptions ret = *this;
         ret.checkMerkleRoot = _checkMerkleRoot;
         return ret;
     }
 
     bool shouldValidatePoW() const { return checkPoW; }
     bool shouldValidateMerkleRoot() const { return checkMerkleRoot; }
     uint64_t getExcessiveBlockSize() const { return excessiveBlockSize; }
 };
 
 /**
  * Process an incoming block. This only returns after the best known valid
  * block is made active. Note that it does not, however, guarantee that the
  * specific block passed to it has been checked for validity!
  *
  * If you want to *possibly* get feedback on whether pblock is valid, you must
  * install a CValidationInterface (see validationinterface.h) - this will have
  * its BlockChecked method called whenever *any* block completes validation.
  *
  * Note that we guarantee that either the proof-of-work is valid on pblock, or
  * (and possibly also) BlockChecked will have been called.
  *
  * May not be called in a validationinterface callback.
  *
  * @param[in]   config  The global config.
  * @param[in]   pblock  The block we want to process.
  * @param[in]   fForceProcessing Process this block even if unrequested; used
  * for non-network block sources and whitelisted peers.
  * @param[out]  fNewBlock A boolean which is set to indicate if the block was
  *                        first received via this call.
  * @returns     If the block was processed, independently of block validity
  */
 bool ProcessNewBlock(const Config &config,
                      const std::shared_ptr<const CBlock> pblock,
                      bool fForceProcessing, bool *fNewBlock)
     LOCKS_EXCLUDED(cs_main);
 
 /**
  * Process incoming block headers.
  *
  * May not be called in a validationinterface callback.
  *
  * @param[in]  config        The config.
  * @param[in]  block         The block headers themselves.
  * @param[out] state         This may be set to an Error state if any error
  *                           occurred processing them.
  * @param[out] ppindex       If set, the pointer will be set to point to the
  *                           last new block index object for the given headers.
  * @return True if block headers were accepted as valid.
  */
 bool ProcessNewBlockHeaders(const Config &config,
                             const std::vector<CBlockHeader> &block,
                             BlockValidationState &state,
                             const CBlockIndex **ppindex = nullptr)
     LOCKS_EXCLUDED(cs_main);
 
 /**
  * Import blocks from an external file.
  */
 bool LoadExternalBlockFile(const Config &config, FILE *fileIn,
                            FlatFilePos *dbp = nullptr);
 
 /**
  * Ensures we have a genesis block in the block tree, possibly writing one to
  * disk.
  */
 bool LoadGenesisBlock(const CChainParams &chainparams);
 
 /**
  * Load the block tree and coins database from disk, initializing state if we're
  * running with -reindex.
  */
 bool LoadBlockIndex(const Consensus::Params &params)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Unload database information.
  */
 void UnloadBlockIndex();
 
 /**
  * Run an instance of the script checking thread.
  */
 void ThreadScriptCheck(int worker_num);
 
 /**
  * Retrieve a transaction (from memory pool, or from disk, if possible).
  */
 bool GetTransaction(const TxId &txid, CTransactionRef &txOut,
                     const Consensus::Params &params, BlockHash &hashBlock,
                     const CBlockIndex *const blockIndex = nullptr);
 
 /**
  * Find the best known block, and make it the tip of the block chain
  *
  * May not be called with cs_main held. May not be called in a
  * validationinterface callback.
  */
 bool ActivateBestChain(
     const Config &config, BlockValidationState &state,
     std::shared_ptr<const CBlock> pblock = std::shared_ptr<const CBlock>());
 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams);
 
 /**
  * Guess verification progress (as a fraction between 0.0=genesis and
  * 1.0=current tip).
  */
 double GuessVerificationProgress(const ChainTxData &data,
                                  const CBlockIndex *pindex);
 
 /**
  * Calculate the amount of disk space the block & undo files currently use.
  */
 uint64_t CalculateCurrentUsage();
 
 /**
  * Mark one block file as pruned.
  */
 void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Actually unlink the specified files
  */
 void UnlinkPrunedFiles(const std::set<int> &setFilesToPrune);
 
 /** Prune block files up to a given height */
 void PruneBlockFilesManual(int nManualPruneHeight);
 
 /**
  * (try to) add transaction to memory pool
  */
 bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool,
                         TxValidationState &state, const CTransactionRef &tx,
                         bool bypass_limits, const Amount nAbsurdFee,
                         bool test_accept = false)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Simple class for regulating resource usage during CheckInputs (and
  * CScriptCheck), atomic so as to be compatible with parallel validation.
  */
 class CheckInputsLimiter {
 protected:
     std::atomic<int64_t> remaining;
 
 public:
     explicit CheckInputsLimiter(int64_t limit) : remaining(limit) {}
 
     bool consume_and_check(int consumed) {
         auto newvalue = (remaining -= consumed);
         return newvalue >= 0;
     }
 
     bool check() { return remaining >= 0; }
 };
 
 class TxSigCheckLimiter : public CheckInputsLimiter {
 public:
     TxSigCheckLimiter() : CheckInputsLimiter(MAX_TX_SIGCHECKS) {}
 
     // Let's make this bad boy copiable.
     TxSigCheckLimiter(const TxSigCheckLimiter &rhs)
         : CheckInputsLimiter(rhs.remaining.load()) {}
 
     TxSigCheckLimiter &operator=(const TxSigCheckLimiter &rhs) {
         remaining = rhs.remaining.load();
         return *this;
     }
 
     static TxSigCheckLimiter getDisabled() {
         TxSigCheckLimiter txLimiter;
         // Historically, there has not been a transaction with more than 20k sig
         // checks on testnet or mainnet, so this effectively disable sigchecks.
         txLimiter.remaining = 20000;
         return txLimiter;
     }
 };
 
 class ConnectTrace;
 
 /**
  * Check whether all inputs of this transaction are valid (no double spends,
  * scripts & sigs, amounts). This does not modify the UTXO set.
  *
  * If pvChecks is not nullptr, script checks are pushed onto it instead of being
  * performed inline. Any script checks which are not necessary (eg due to script
  * execution cache hits) are, obviously, not pushed onto pvChecks/run.
  *
  * Upon success nSigChecksOut will be filled in with either:
  * - correct total for all inputs, or,
  * - 0, in the case when checks were pushed onto pvChecks (i.e., a cache miss
  * with pvChecks non-null), in which case the total can be found by executing
  * pvChecks and adding the results.
  *
  * Setting sigCacheStore/scriptCacheStore to false will remove elements from the
  * corresponding cache which are matched. This is useful for checking blocks
  * where we will likely never need the cache entry again.
  *
  * pLimitSigChecks can be passed to limit the sigchecks count either in parallel
  * or serial validation. With pvChecks null (serial validation), breaking the
  * pLimitSigChecks limit will abort evaluation early and return false. With
  * pvChecks not-null (parallel validation): the cached nSigChecks may itself
  * break the limit in which case false is returned, OR, each entry in the
  * returned pvChecks must be executed exactly once in order to probe the limit
  * accurately.
  */
 bool CheckInputs(const CTransaction &tx, TxValidationState &state,
                  const CCoinsViewCache &view, const uint32_t flags,
                  bool sigCacheStore, bool scriptCacheStore,
                  const PrecomputedTransactionData &txdata, int &nSigChecksOut,
                  TxSigCheckLimiter &txLimitSigChecks,
                  CheckInputsLimiter *pBlockLimitSigChecks,
                  std::vector<CScriptCheck> *pvChecks)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Handy shortcut to full fledged CheckInputs call.
  */
 static inline bool
 CheckInputs(const CTransaction &tx, TxValidationState &state,
             const CCoinsViewCache &view, const uint32_t flags,
             bool sigCacheStore, bool scriptCacheStore,
             const PrecomputedTransactionData &txdata, int &nSigChecksOut)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     TxSigCheckLimiter nSigChecksTxLimiter;
     return CheckInputs(tx, state, view, flags, sigCacheStore, scriptCacheStore,
                        txdata, nSigChecksOut, nSigChecksTxLimiter, nullptr,
                        nullptr);
 }
 
 /** Get the BIP9 state for a given deployment at the current tip. */
 ThresholdState VersionBitsTipState(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos);
 
 /** Get the BIP9 state for a given deployment at a given block. */
 ThresholdState VersionBitsBlockState(const Consensus::Params &params,
                                      Consensus::DeploymentPos pos,
                                      const CBlockIndex *pindex);
 
 /**
  * Get the numerical statistics for the BIP9 state for a given deployment at the
  * current tip.
  */
 BIP9Stats VersionBitsTipStatistics(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos);
 
 /**
  * Get the block height at which the BIP9 deployment switched into the state for
  * the block building on the current tip.
  */
 int VersionBitsTipStateSinceHeight(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos);
 
 /** Apply the effects of this transaction on the UTXO set represented by view */
 void UpdateCoins(const CTransaction &tx, CCoinsViewCache &inputs, int nHeight);
 
 /**
  * Mark all the coins corresponding to a given transaction inputs as spent.
  */
 void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
                 int nHeight);
 
 /**
  * Apply the effects of this transaction on the UTXO set represented by view.
  */
 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, int nHeight);
 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
                  int nHeight);
 
 /**
  * Test whether the LockPoints height and time are still valid on the current
  * chain.
  */
 bool TestLockPointValidity(const LockPoints *lp)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Check if transaction will be BIP 68 final in the next block to be created.
  *
  * Simulates calling SequenceLocks() with data from the tip of the current
  * active chain. Optionally stores in LockPoints the resulting height and time
  * calculated and the hash of the block needed for calculation or skips the
  * calculation and uses the LockPoints passed in for evaluation. The LockPoints
  * should not be considered valid if CheckSequenceLocks returns false.
  *
  * See consensus/consensus.h for flag definitions.
  */
 bool CheckSequenceLocks(const CTxMemPool &pool, const CTransaction &tx,
                         int flags, LockPoints *lp = nullptr,
                         bool useExistingLockPoints = false)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Closure representing one script verification.
  * Note that this stores references to the spending transaction.
  *
  * Note that if pLimitSigChecks is passed, then failure does not imply that
  * scripts have failed.
  */
 class CScriptCheck {
 private:
     CTxOut m_tx_out;
     const CTransaction *ptxTo;
     unsigned int nIn;
     uint32_t nFlags;
     bool cacheStore;
     ScriptError error;
     ScriptExecutionMetrics metrics;
     PrecomputedTransactionData txdata;
     TxSigCheckLimiter *pTxLimitSigChecks;
     CheckInputsLimiter *pBlockLimitSigChecks;
 
 public:
     CScriptCheck()
         : ptxTo(nullptr), nIn(0), nFlags(0), cacheStore(false),
           error(ScriptError::UNKNOWN), txdata(), pTxLimitSigChecks(nullptr),
           pBlockLimitSigChecks(nullptr) {}
 
     CScriptCheck(const CTxOut &outIn, const CTransaction &txToIn,
                  unsigned int nInIn, uint32_t nFlagsIn, bool cacheIn,
                  const PrecomputedTransactionData &txdataIn,
                  TxSigCheckLimiter *pTxLimitSigChecksIn = nullptr,
                  CheckInputsLimiter *pBlockLimitSigChecksIn = nullptr)
         : m_tx_out(outIn), ptxTo(&txToIn), nIn(nInIn), nFlags(nFlagsIn),
           cacheStore(cacheIn), error(ScriptError::UNKNOWN), txdata(txdataIn),
           pTxLimitSigChecks(pTxLimitSigChecksIn),
           pBlockLimitSigChecks(pBlockLimitSigChecksIn) {}
 
     bool operator()();
 
     void swap(CScriptCheck &check) {
         std::swap(ptxTo, check.ptxTo);
         std::swap(m_tx_out, check.m_tx_out);
         std::swap(nIn, check.nIn);
         std::swap(nFlags, check.nFlags);
         std::swap(cacheStore, check.cacheStore);
         std::swap(error, check.error);
         std::swap(metrics, check.metrics);
         std::swap(txdata, check.txdata);
         std::swap(pTxLimitSigChecks, check.pTxLimitSigChecks);
         std::swap(pBlockLimitSigChecks, check.pBlockLimitSigChecks);
     }
 
     ScriptError GetScriptError() const { return error; }
 
     ScriptExecutionMetrics GetScriptExecutionMetrics() const { return metrics; }
 };
 
 bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex);
 
 /** Functions for validating blocks and updating the block tree */
 
 /**
  * Context-independent validity checks.
  *
  * Returns true if the provided block is valid (has valid header,
  * transactions are valid, block is a valid size, etc.)
  */
 bool CheckBlock(const CBlock &block, BlockValidationState &state,
                 const Consensus::Params &params,
                 BlockValidationOptions validationOptions);
 
 /**
  * This is a variant of ContextualCheckTransaction which computes the contextual
  * check for a transaction based on the chain tip.
  *
  * See consensus/consensus.h for flag definitions.
  */
 bool ContextualCheckTransactionForCurrentBlock(const Consensus::Params &params,
                                                const CTransaction &tx,
                                                TxValidationState &state,
                                                int flags = -1);
 
 /**
  * Check a block is completely valid from start to finish (only works on top of
  * our current best block)
  */
 bool TestBlockValidity(BlockValidationState &state, const CChainParams &params,
                        const CBlock &block, CBlockIndex *pindexPrev,
                        BlockValidationOptions validationOptions)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * RAII wrapper for VerifyDB: Verify consistency of the block and coin
  * databases.
  */
 class CVerifyDB {
 public:
     CVerifyDB();
     ~CVerifyDB();
     bool VerifyDB(const Config &config, CCoinsView *coinsview, int nCheckLevel,
                   int nCheckDepth);
 };
 
 CBlockIndex *LookupBlockIndex(const BlockHash &hash)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** Find the last common block between the parameter chain and a locator. */
 CBlockIndex *FindForkInGlobalIndex(const CChain &chain,
                                    const CBlockLocator &locator)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** @see CChainState::FlushStateToDisk */
 enum class FlushStateMode { NONE, IF_NEEDED, PERIODIC, ALWAYS };
 
 /** Global variable that points to the active CCoinsView (protected by cs_main)
  */
 extern std::unique_ptr<CCoinsViewCache> pcoinsTip;
 
 /**
  * Maintains a tree of blocks (stored in `m_block_index`) which is consulted
  * to determine where the most-work tip is.
  *
  * This data is used mostly in `CChainState` - information about, e.g.,
  * candidate tips is not maintained here.
  */
 class BlockManager {
 public:
     BlockMap m_block_index GUARDED_BY(cs_main);
 
     /**
      * In order to efficiently track invalidity of headers, we keep the set of
      * blocks which we tried to connect and found to be invalid here (ie which
      * were set to BLOCK_FAILED_VALID since the last restart). We can then
      * walk this set and check if a new header is a descendant of something in
      * this set, preventing us from having to walk m_block_index when we try
      * to connect a bad block and fail.
      *
      * While this is more complicated than marking everything which descends
      * from an invalid block as invalid at the time we discover it to be
      * invalid, doing so would require walking all of m_block_index to find all
      * descendants. Since this case should be very rare, keeping track of all
      * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
      * well.
      *
      * Because we already walk m_block_index in height-order at startup, we go
      * ahead and mark descendants of invalid blocks as FAILED_CHILD at that
      * time, instead of putting things in this set.
      */
     std::set<CBlockIndex *> m_failed_blocks;
 
     /**
      * All pairs A->B, where A (or one of its ancestors) misses transactions,
      * but B has transactions. Pruned nodes may have entries where B is missing
      * data.
      */
     std::multimap<CBlockIndex *, CBlockIndex *> m_blocks_unlinked;
 
     /**
      * Load the blocktree off disk and into memory. Populate certain metadata
      * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as
      * peripheral collections like setDirtyBlockIndex.
      *
      * @param[out] block_index_candidates  Fill this set with any valid blocks
      * for which we've downloaded all transactions.
      */
     bool LoadBlockIndex(const Consensus::Params &consensus_params,
                         CBlockTreeDB &blocktree,
                         std::set<CBlockIndex *, CBlockIndexWorkComparator>
                             &block_index_candidates)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /** Clear all data members. */
     void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     CBlockIndex *AddToBlockIndex(const CBlockHeader &block)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /** Create a new block index entry for a given block hash */
     CBlockIndex *InsertBlockIndex(const BlockHash &hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * If a block header hasn't already been seen, call CheckBlockHeader on it,
      * ensure that it doesn't descend from an invalid block, and then add it to
      * m_block_index.
      */
     bool AcceptBlockHeader(const Config &config, const CBlockHeader &block,
                            BlockValidationState &state, CBlockIndex **ppindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 };
 
 /**
  * A convenience class for constructing the CCoinsView* hierarchy used
  * to facilitate access to the UTXO set.
  *
  * This class consists of an arrangement of layered CCoinsView objects,
  * preferring to store and retrieve coins in memory via `m_cacheview` but
  * ultimately falling back on cache misses to the canonical store of UTXOs on
  * disk, `m_dbview`.
  */
 class CoinsViews {
 public:
     //! The lowest level of the CoinsViews cache hierarchy sits in a leveldb
     //! database on disk. All unspent coins reside in this store.
     CCoinsViewDB m_dbview GUARDED_BY(cs_main);
 
     //! This view wraps access to the leveldb instance and handles read errors
     //! gracefully.
     CCoinsViewErrorCatcher m_catcherview GUARDED_BY(cs_main);
 
     //! This is the top layer of the cache hierarchy - it keeps as many coins in
     //! memory as can fit per the dbcache setting.
     std::unique_ptr<CCoinsViewCache> m_cacheview GUARDED_BY(cs_main);
 
     //! This constructor initializes CCoinsViewDB and CCoinsViewErrorCatcher
     //! instances, but it *does not* create a CCoinsViewCache instance by
     //! default. This is done separately because the presence of the cache has
     //! implications on whether or not we're allowed to flush the cache's state
     //! to disk, which should not be done until the health of the database is
     //! verified.
     //!
     //! All arguments forwarded onto CCoinsViewDB.
     CoinsViews(std::string ldb_name, size_t cache_size_bytes, bool in_memory,
                bool should_wipe);
 
     //! Initialize the CCoinsViewCache member.
     void InitCache() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 };
 
 /**
  * CChainState stores and provides an API to update our local knowledge of the
  * current best chain.
  *
  * Eventually, the API here is targeted at being exposed externally as a
  * consumable libconsensus library, so any functions added must only call
  * other class member functions, pure functions in other parts of the consensus
  * library, callbacks via the validation interface, or read/write-to-disk
  * functions (eventually this will also be via callbacks).
  *
  * Anything that is contingent on the current tip of the chain is stored here,
  * whereas block information and metadata independent of the current tip is
  * kept in `BlockMetadataManager`.
  */
 class CChainState {
 private:
     /**
      * the ChainState CriticalSection
      * A lock that must be held when modifying this ChainState - held in
      * ActivateBestChain()
      */
     RecursiveMutex m_cs_chainstate;
 
     /**
      * Every received block is assigned a unique and increasing identifier, so
      * we know which one to give priority in case of a fork.
      * Blocks loaded from disk are assigned id 0, so start the counter at 1.
      */
     std::atomic<int32_t> nBlockSequenceId{1};
     /** Decreasing counter (used by subsequent preciousblock calls). */
     int32_t nBlockReverseSequenceId = -1;
     /** chainwork for the last block that preciousblock has been applied to. */
     arith_uint256 nLastPreciousChainwork = 0;
 
     /**
      * Whether this chainstate is undergoing initial block download.
      *
      * Mutable because we need to be able to mark IsInitialBlockDownload()
      * const, which latches this for caching purposes.
      */
     mutable std::atomic<bool> m_cached_finished_ibd{false};
 
     //! Reference to a BlockManager instance which itself is shared across all
     //! CChainState instances. Keeping a local reference allows us to test more
     //! easily as opposed to referencing a global.
     BlockManager &m_blockman;
 
     //! Manages the UTXO set, which is a reflection of the contents of
     //! `m_chain`.
     std::unique_ptr<CoinsViews> m_coins_views;
 
     /**
      * The best finalized block.
      * This block cannot be reorged in any way except by explicit user action.
      */
     const CBlockIndex *m_finalizedBlockIndex GUARDED_BY(cs_main) = nullptr;
 
 public:
     CChainState(BlockManager &blockman) : m_blockman(blockman) {}
     CChainState();
 
     /**
      * Initialize the CoinsViews UTXO set database management data structures.
      * The in-memory cache is initialized separately.
      *
      * All parameters forwarded to CoinsViews.
      */
     void InitCoinsDB(size_t cache_size_bytes, bool in_memory, bool should_wipe,
                      std::string leveldb_name = "chainstate");
 
     //! Initialize the in-memory coins cache (to be done after the health of the
     //! on-disk database is verified).
     void InitCoinsCache() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! @returns whether or not the CoinsViews object has been fully initialized
     //! and we can
     //!          safely flush this object to disk.
     bool CanFlushToDisk() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
         return m_coins_views && m_coins_views->m_cacheview;
     }
 
     //! The current chain of blockheaders we consult and build on.
     //! @see CChain, CBlockIndex.
     CChain m_chain;
     /**
      * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for
      * itself and all ancestors) and as good as our current tip or better.
      * Entries may be failed, though, and pruning nodes may be missing the data
      * for the block.
      */
     std::set<CBlockIndex *, CBlockIndexWorkComparator> setBlockIndexCandidates;
 
     //! @returns A reference to the in-memory cache of the UTXO set.
     CCoinsViewCache &CoinsTip() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
         assert(m_coins_views->m_cacheview);
         return *m_coins_views->m_cacheview.get();
     }
 
     //! @returns A reference to the on-disk UTXO set database.
     CCoinsViewDB &CoinsDB() { return m_coins_views->m_dbview; }
 
     //! @returns A reference to a wrapped view of the in-memory UTXO set that
     //!     handles disk read errors gracefully.
     CCoinsViewErrorCatcher &CoinsErrorCatcher()
         EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
         return m_coins_views->m_catcherview;
     }
 
     //! Destructs all objects related to accessing the UTXO set.
     void ResetCoinsViews() { m_coins_views.reset(); }
 
     /**
      * Update the on-disk chain state.
      * The caches and indexes are flushed depending on the mode we're called
      * with if they're too large, if it's been a while since the last write, or
      * always and in all cases if we're in prune mode and are deleting files.
      *
      * If FlushStateMode::NONE is used, then FlushStateToDisk(...) won't do
      * anything besides checking if we need to prune.
      *
      * @returns true unless a system error occurred
      */
     bool FlushStateToDisk(const CChainParams &chainparams,
                           BlockValidationState &state, FlushStateMode mode,
                           int nManualPruneHeight = 0);
 
     //! Unconditionally flush all changes to disk.
     void ForceFlushStateToDisk();
 
     //! Prune blockfiles from the disk if necessary and then flush chainstate
     //! changes if we pruned.
     void PruneAndFlush();
 
     /**
      * Make the best chain active, in multiple steps. The result is either
      * failure or an activated best chain. pblock is either nullptr or a pointer
      * to a block that is already loaded (to avoid loading it again from disk).
      *
      * ActivateBestChain is split into steps (see ActivateBestChainStep) so that
      * we avoid holding cs_main for an extended period of time; the length of
      * this call may be quite long during reindexing or a substantial reorg.
      *
      * May not be called with cs_main held. May not be called in a
      * validationinterface callback.
      *
      * @returns true unless a system error occurred
      */
     bool ActivateBestChain(
         const Config &config, BlockValidationState &state,
         std::shared_ptr<const CBlock> pblock = std::shared_ptr<const CBlock>())
         LOCKS_EXCLUDED(cs_main);
 
     bool AcceptBlock(const Config &config,
                      const std::shared_ptr<const CBlock> &pblock,
                      BlockValidationState &state, bool fRequested,
                      const FlatFilePos *dbp, bool *fNewBlock)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     // Block (dis)connection on a given view:
     DisconnectResult DisconnectBlock(const CBlock &block,
                                      const CBlockIndex *pindex,
                                      CCoinsViewCache &view);
     bool ConnectBlock(const CBlock &block, BlockValidationState &state,
                       CBlockIndex *pindex, CCoinsViewCache &view,
                       const CChainParams &params,
                       BlockValidationOptions options, bool fJustCheck = false)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     // Block disconnection on our pcoinsTip:
     bool DisconnectTip(const CChainParams &params, BlockValidationState &state,
                        DisconnectedBlockTransactions *disconnectpool)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::g_mempool.cs);
 
     // Manual block validity manipulation:
     bool PreciousBlock(const Config &config, BlockValidationState &state,
                        CBlockIndex *pindex) LOCKS_EXCLUDED(cs_main);
     /** Mark a block as invalid. */
     bool InvalidateBlock(const Config &config, BlockValidationState &state,
                          CBlockIndex *pindex)
         LOCKS_EXCLUDED(cs_main, m_cs_chainstate);
     /** Park a block. */
     bool ParkBlock(const Config &config, BlockValidationState &state,
                    CBlockIndex *pindex)
         LOCKS_EXCLUDED(cs_main, m_cs_chainstate);
 
     /**
      * Finalize a block.
      * A finalized block can not be reorged in any way.
      */
     bool FinalizeBlock(const Config &config, BlockValidationState &state,
                        CBlockIndex *pindex)
         LOCKS_EXCLUDED(cs_main, m_cs_chainstate);
     /** Return the currently finalized block index. */
     const CBlockIndex *GetFinalizedBlock() const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /**
      * Checks if a block is finalized.
      */
     bool IsBlockFinalized(const CBlockIndex *pindex) const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     void ResetBlockFailureFlags(CBlockIndex *pindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     template <typename F>
     bool UpdateFlagsForBlock(CBlockIndex *pindexBase, CBlockIndex *pindex, F f)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     template <typename F, typename C, typename AC>
     void UpdateFlags(CBlockIndex *pindex, CBlockIndex *&pindexReset, F f,
                      C fChild, AC fAncestorWasChanged)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /** Remove parked status from a block and its descendants. */
     void UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /** Replay blocks that aren't fully applied to the database. */
     bool ReplayBlocks(const Consensus::Params &params);
     bool LoadGenesisBlock(const CChainParams &chainparams);
 
     void PruneBlockIndexCandidates();
 
     void UnloadBlockIndex() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Check whether we are doing an initial block download (synchronizing from
      * disk or network)
      */
     bool IsInitialBlockDownload() const;
 
     /**
      * Make various assertions about the state of the block index.
      *
      * By default this only executes fully when using the Regtest chain; see:
      * fCheckBlockIndex.
      */
     void CheckBlockIndex(const Consensus::Params &consensusParams);
 
     /** Update the chain tip based on database information, i.e. CoinsTip()'s
      * best block. */
     bool LoadChainTip(const CChainParams &chainparams)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 private:
     bool ActivateBestChainStep(const Config &config,
                                BlockValidationState &state,
                                CBlockIndex *pindexMostWork,
                                const std::shared_ptr<const CBlock> &pblock,
                                bool &fInvalidFound, ConnectTrace &connectTrace)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::g_mempool.cs);
     bool ConnectTip(const Config &config, BlockValidationState &state,
                     CBlockIndex *pindexNew,
                     const std::shared_ptr<const CBlock> &pblock,
                     ConnectTrace &connectTrace,
                     DisconnectedBlockTransactions &disconnectpool)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::g_mempool.cs);
     void InvalidBlockFound(CBlockIndex *pindex,
                            const BlockValidationState &state)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     void InvalidChainFound(CBlockIndex *pindexNew)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     CBlockIndex *FindMostWorkChain() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool MarkBlockAsFinal(const Config &config, BlockValidationState &state,
                           const CBlockIndex *pindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     void ReceivedBlockTransactions(const CBlock &block, CBlockIndex *pindexNew,
                                    const FlatFilePos &pos)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     bool RollforwardBlock(const CBlockIndex *pindex, CCoinsViewCache &inputs,
                           const Consensus::Params &params)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool UnwindBlock(const Config &config, BlockValidationState &state,
                      CBlockIndex *pindex, bool invalidate)
         EXCLUSIVE_LOCKS_REQUIRED(m_cs_chainstate);
 };
 
 /**
  * Mark a block as precious and reorganize.
  *
  * May not be called in a validationinterface callback.
  */
 bool PreciousBlock(const Config &config, BlockValidationState &state,
                    CBlockIndex *pindex) LOCKS_EXCLUDED(cs_main);
 
 /** Remove invalidity status from a block and its descendants. */
 void ResetBlockFailureFlags(CBlockIndex *pindex)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** Remove parked status from a block and its descendants. */
 void UnparkBlockAndChildren(CBlockIndex *pindex)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** Remove parked status from a block. */
 void UnparkBlock(CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** @returns the most-work valid chainstate. */
 CChainState &ChainstateActive();
 
 /** @returns the most-work chain. */
 CChain &ChainActive();
 
 /** @returns the global block index map. */
 BlockMap &BlockIndex();
 
 // Most often ::ChainstateActive() should be used instead of this, but some code
 // may not be able to assume that this has been initialized yet and so must use
 // it directly, e.g. init.cpp.
 extern std::unique_ptr<CChainState> g_chainstate;
 
 /**
  * Global variable that points to the active block tree (protected by cs_main)
  */
 extern std::unique_ptr<CBlockTreeDB> pblocktree;
 
 /**
  * Return the spend height, which is one more than the inputs.GetBestBlock().
  * While checking, GetBestBlock() refers to the parent block. (protected by
  * cs_main)
  * This is also true for mempool checks.
  */
 int GetSpendHeight(const CCoinsViewCache &inputs);
 
 /**
  * Determine what nVersion a new block should use.
  */
 int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev,
                             const Consensus::Params &params);
 
 /** Get block file info entry for one block file */
 CBlockFileInfo *GetBlockFileInfo(size_t n);
 
 /** Dump the mempool to disk. */
 bool DumpMempool(const CTxMemPool &pool);
 
 /** Load the mempool from disk. */
 bool LoadMempool(const Config &config, CTxMemPool &pool);
 
 //! Check whether the block associated with this index entry is pruned or not.
 bool IsBlockPruned(const CBlockIndex *pblockindex);
 
 #endif // BITCOIN_VALIDATION_H