diff --git a/contrib/teamcity/build-configurations.sh b/contrib/teamcity/build-configurations.sh index 084968b91..257856471 100755 --- a/contrib/teamcity/build-configurations.sh +++ b/contrib/teamcity/build-configurations.sh @@ -1,186 +1,187 @@ #!/usr/bin/env bash export LC_ALL=C.UTF-8 set -euxo pipefail : "${ABC_BUILD_NAME:=""}" if [ -z "$ABC_BUILD_NAME" ]; then echo "Error: Environment variable ABC_BUILD_NAME must be set" exit 1 fi echo "Running build configuration '${ABC_BUILD_NAME}'..." TOPLEVEL=$(git rev-parse --show-toplevel) export TOPLEVEL setup() { : "${BUILD_DIR:=${TOPLEVEL}/build}" mkdir -p "${BUILD_DIR}/output" BUILD_DIR=$(cd "${BUILD_DIR}"; pwd) export BUILD_DIR TEST_RUNNER_FLAGS="--tmpdirprefix=output" cd "${BUILD_DIR}" # Determine the number of build threads THREADS=$(nproc || sysctl -n hw.ncpu) export THREADS # Base directories for sanitizer related files SAN_SUPP_DIR="${TOPLEVEL}/test/sanitizer_suppressions" SAN_LOG_DIR="/tmp/sanitizer_logs" # Create the log directory if it doesn't exist and clear it mkdir -p "${SAN_LOG_DIR}" rm -rf "${SAN_LOG_DIR:?}"/* # Sanitizers options, not used if sanitizers are not enabled export ASAN_OPTIONS="malloc_context_size=0:log_path=${SAN_LOG_DIR}/asan.log" export LSAN_OPTIONS="suppressions=${SAN_SUPP_DIR}/lsan:log_path=${SAN_LOG_DIR}/lsan.log" export TSAN_OPTIONS="suppressions=${SAN_SUPP_DIR}/tsan:log_path=${SAN_LOG_DIR}/tsan.log" export UBSAN_OPTIONS="suppressions=${SAN_SUPP_DIR}/ubsan:print_stacktrace=1:halt_on_error=1:log_path=${SAN_LOG_DIR}/ubsan.log" } # Facility to print out sanitizer log outputs to the build log console print_sanitizers_log() { for log in "${SAN_LOG_DIR}"/*.log.* do echo "*** Output of ${log} ***" cat "${log}" done } trap "print_sanitizers_log" ERR CI_SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" setup case "$ABC_BUILD_NAME" in build-asan) # Build with the address sanitizer, then run unit tests and functional tests. CMAKE_FLAGS=( + "-DCMAKE_CXX_FLAGS=-DARENA_DEBUG" "-DCMAKE_BUILD_TYPE=Debug" # ASAN does not support assembly code: https://github.com/google/sanitizers/issues/192 # This will trigger a segfault if the SSE4 implementation is selected for SHA256. # Disabling the assembly works around the issue. "-DCRYPTO_USE_ASM=OFF" "-DENABLE_SANITIZERS=address" "-DCCACHE=OFF" ) CMAKE_FLAGS="${CMAKE_FLAGS[*]}" "${CI_SCRIPTS_DIR}"/build_cmake.sh ninja check # FIXME Remove when wallet_multiwallet works with asan after backporting at least the following PRs from Core and their dependencies: 13161, 12493, 14320, 14552, 14760, 11911. TEST_RUNNER_FLAGS="${TEST_RUNNER_FLAGS} --exclude=wallet_multiwallet" ./test/functional/test_runner.py ${TEST_RUNNER_FLAGS} ;; build-ubsan) # Build with the undefined sanitizer, then run unit tests and functional tests. CMAKE_FLAGS=( "-DCMAKE_BUILD_TYPE=Debug" "-DENABLE_SANITIZERS=undefined" "-DCCACHE=OFF" "-DCMAKE_C_COMPILER=clang" "-DCMAKE_CXX_COMPILER=clang++" ) CMAKE_FLAGS="${CMAKE_FLAGS[*]}" "${CI_SCRIPTS_DIR}"/build_cmake.sh ninja check # FIXME Remove when abc-p2p-compactblocks works with ubsan. TEST_RUNNER_FLAGS="${TEST_RUNNER_FLAGS} --exclude=abc-p2p-compactblocks" ./test/functional/test_runner.py ${TEST_RUNNER_FLAGS} ;; build-tsan) # Build with the thread sanitizer, then run unit tests and functional tests. CMAKE_FLAGS=( "-DCMAKE_BUILD_TYPE=Debug" "-DENABLE_SANITIZERS=thread" "-DCCACHE=OFF" "-DCMAKE_C_COMPILER=clang" "-DCMAKE_CXX_COMPILER=clang++" ) CMAKE_FLAGS="${CMAKE_FLAGS[*]}" "${CI_SCRIPTS_DIR}"/build_cmake.sh ninja check # FIXME Remove when wallet_multiwallet works with tsan after backporting at least the following PRs from Core and their dependencies: 13161, 12493, 14320, 14552, 14760, 11911. TEST_RUNNER_FLAGS="${TEST_RUNNER_FLAGS} --exclude=wallet_multiwallet" ./test/functional/test_runner.py ${TEST_RUNNER_FLAGS} ;; build-default) # Build, run unit tests and functional tests (all extended tests if this is the master branch). CMAKE_FLAGS=( "-DSECP256K1_ENABLE_MODULE_ECDH=ON" "-DSECP256K1_ENABLE_JNI=ON" ) CMAKE_FLAGS="${CMAKE_FLAGS[*]}" "${CI_SCRIPTS_DIR}"/build_cmake.sh ninja check check-secp256k1 BRANCH=$(git rev-parse --abbrev-ref HEAD) if [[ "${BRANCH}" == "master" ]]; then TEST_RUNNER_FLAGS="${TEST_RUNNER_FLAGS} --extended" fi ./test/functional/test_runner.py ${TEST_RUNNER_FLAGS} ./test/functional/test_runner.py -J=junit_results_next_upgrade.xml --with-phononactivation ${TEST_RUNNER_FLAGS} ;; build-without-wallet) # Build without wallet and run the unit tests. CMAKE_FLAGS=( "-DBUILD_BITCOIN_WALLET=OFF" ) CMAKE_FLAGS="${CMAKE_FLAGS[*]}" "${CI_SCRIPTS_DIR}"/build_cmake.sh ninja check ;; build-ibd) "${CI_SCRIPTS_DIR}"/build_cmake.sh "${CI_SCRIPTS_DIR}"/ibd.sh -disablewallet -debug=net ;; build-ibd-no-assumevalid-checkpoint) "${CI_SCRIPTS_DIR}"/build_cmake.sh "${CI_SCRIPTS_DIR}"/ibd.sh -disablewallet -assumevalid=0 -checkpoints=0 -debug=net ;; build-werror) # Build with variable-length-array and thread-safety-analysis treated as errors CMAKE_FLAGS=( "-DENABLE_WERROR=ON" "-DCMAKE_C_COMPILER=clang" "-DCMAKE_CXX_COMPILER=clang++" ) CMAKE_FLAGS="${CMAKE_FLAGS[*]}" "${CI_SCRIPTS_DIR}"/build_cmake.sh ;; build-check-all) CMAKE_FLAGS=( "-DSECP256K1_ENABLE_MODULE_ECDH=ON" "-DSECP256K1_ENABLE_JNI=ON" ) CMAKE_FLAGS="${CMAKE_FLAGS[*]}" "${CI_SCRIPTS_DIR}"/build_cmake.sh ninja check-all ;; build-autotools) # Ensure that the build using autotools is not broken "${CI_SCRIPTS_DIR}"/build_autotools.sh make -j "${THREADS}" check ;; check-seeds-mainnet) "${CI_SCRIPTS_DIR}"/build_cmake.sh "${CI_SCRIPTS_DIR}"/check-seeds.sh main 80 ;; check-seeds-testnet) "${CI_SCRIPTS_DIR}"/build_cmake.sh "${CI_SCRIPTS_DIR}"/check-seeds.sh test 70 ;; *) echo "Error: Invalid build name '${ABC_BUILD_NAME}'" exit 2 ;; esac diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp index a6cd81909..49d161300 100644 --- a/src/bench/lockedpool.cpp +++ b/src/bench/lockedpool.cpp @@ -1,44 +1,44 @@ // Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #define ASIZE 2048 #define BITER 5000 #define MSIZE 2048 static void BenchLockedPool(benchmark::State &state) { void *synth_base = reinterpret_cast(0x08000000); const size_t synth_size = 1024 * 1024; Arena b(synth_base, synth_size, 16); std::vector addr; for (int x = 0; x < ASIZE; ++x) addr.push_back(nullptr); uint32_t s = 0x12345678; while (state.KeepRunning()) { for (int x = 0; x < BITER; ++x) { int idx = s & (addr.size() - 1); if (s & 0x80000000) { b.free(addr[idx]); addr[idx] = nullptr; } else if (!addr[idx]) { addr[idx] = b.alloc((s >> 16) & (MSIZE - 1)); } bool lsb = s & 1; s >>= 1; if (lsb) s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0 } } for (void *ptr : addr) b.free(ptr); addr.clear(); } -BENCHMARK(BenchLockedPool, 530); +BENCHMARK(BenchLockedPool, 1300); diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index 076b5b223..3d5f21ae4 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -1,389 +1,409 @@ // Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #if defined(HAVE_CONFIG_H) #include #endif #ifdef WIN32 #ifdef _WIN32_WINNT #undef _WIN32_WINNT #endif #define _WIN32_WINNT 0x0501 #define WIN32_LEAN_AND_MEAN 1 #ifndef NOMINMAX #define NOMINMAX #endif #include #else #include // for PAGESIZE #include // for mmap #include // for getrlimit #include // for sysconf #endif #include #include +#ifdef ARENA_DEBUG +#include +#include +#endif LockedPoolManager *LockedPoolManager::_instance = nullptr; std::once_flag LockedPoolManager::init_flag; /*******************************************************************************/ // Utilities // /** Align up to power of 2 */ static inline size_t align_up(size_t x, size_t align) { return (x + align - 1) & ~(align - 1); } /*******************************************************************************/ // Implementation: Arena Arena::Arena(void *base_in, size_t size_in, size_t alignment_in) : base(static_cast(base_in)), end(static_cast(base_in) + size_in), alignment(alignment_in) { // Start with one free chunk that covers the entire arena - chunks_free.emplace(base, size_in); + auto it = size_to_free_chunk.emplace(size_in, base); + chunks_free.emplace(base, it); + chunks_free_end.emplace(base + size_in, it); } Arena::~Arena() {} void *Arena::alloc(size_t size) { // Round to next multiple of alignment size = align_up(size, alignment); // Don't handle zero-sized chunks if (size == 0) { return nullptr; } - // Pick a large enough free-chunk - auto it = - std::find_if(chunks_free.begin(), chunks_free.end(), - [=](const std::map::value_type &chunk) { - return chunk.second >= size; - }); - if (it == chunks_free.end()) { + // Pick a large enough free-chunk. Returns an iterator pointing to the first + // element that is not less than key. This allocation strategy is best-fit. + // According to "Dynamic Storage Allocation: A Survey and Critical Review", + // Wilson et. al. 1995, + // http://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, + // best-fit and first-fit policies seem to work well in practice. + auto size_ptr_it = size_to_free_chunk.lower_bound(size); + if (size_ptr_it == size_to_free_chunk.end()) { return nullptr; } // Create the used-chunk, taking its space from the end of the free-chunk + const size_t size_remaining = size_ptr_it->first - size; auto alloced = - chunks_used.emplace(it->first + it->second - size, size).first; - if (!(it->second -= size)) { - chunks_free.erase(it); + chunks_used.emplace(size_ptr_it->second + size_remaining, size).first; + chunks_free_end.erase(size_ptr_it->second + size_ptr_it->first); + if (size_ptr_it->first == size) { + // whole chunk is used up + chunks_free.erase(size_ptr_it->second); + } else { + // still some memory left in the chunk + auto it_remaining = + size_to_free_chunk.emplace(size_remaining, size_ptr_it->second); + chunks_free[size_ptr_it->second] = it_remaining; + chunks_free_end.emplace(size_ptr_it->second + size_remaining, + it_remaining); } - return reinterpret_cast(alloced->first); -} + size_to_free_chunk.erase(size_ptr_it); -/* extend the Iterator if other begins at its end */ -template -bool extend(Iterator it, const Pair &other) { - if (it->first + it->second == other.first) { - it->second += other.second; - return true; - } - return false; + return reinterpret_cast(alloced->first); } void Arena::free(void *ptr) { // Freeing the nullptr pointer is OK. if (ptr == nullptr) { return; } // Remove chunk from used map auto i = chunks_used.find(static_cast(ptr)); if (i == chunks_used.end()) { throw std::runtime_error("Arena: invalid or double free"); } - auto freed = *i; + std::pair freed = *i; chunks_used.erase(i); - // Add space to free map, coalescing contiguous chunks - auto next = chunks_free.upper_bound(freed.first); - auto prev = - (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next); - if (prev == chunks_free.end() || !extend(prev, freed)) { - prev = chunks_free.emplace_hint(next, freed); + // coalesce freed with previous chunk + auto prev = chunks_free_end.find(freed.first); + if (prev != chunks_free_end.end()) { + freed.first -= prev->second->first; + freed.second += prev->second->first; + size_to_free_chunk.erase(prev->second); + chunks_free_end.erase(prev); } - if (next != chunks_free.end() && extend(prev, *next)) { + + // coalesce freed with chunk after freed + auto next = chunks_free.find(freed.first + freed.second); + if (next != chunks_free.end()) { + freed.second += next->second->first; + size_to_free_chunk.erase(next->second); chunks_free.erase(next); } + + // Add/set space with coalesced free chunk + auto it = size_to_free_chunk.emplace(freed.second, freed.first); + chunks_free[freed.first] = it; + chunks_free_end[freed.first + freed.second] = it; } Arena::Stats Arena::stats() const { Arena::Stats r{0, 0, 0, chunks_used.size(), chunks_free.size()}; for (const auto &chunk : chunks_used) { r.used += chunk.second; } for (const auto &chunk : chunks_free) { - r.free += chunk.second; + r.free += chunk.second->first; } r.total = r.used + r.free; return r; } #ifdef ARENA_DEBUG -static void printchunk(char *base, size_t sz, bool used) { +static void printchunk(void *base, size_t sz, bool used) { std::cout << "0x" << std::hex << std::setw(16) << std::setfill('0') << base << " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz << " 0x" << used << std::endl; } void Arena::walk() const { for (const auto &chunk : chunks_used) { printchunk(chunk.first, chunk.second, true); } std::cout << std::endl; for (const auto &chunk : chunks_free) { - printchunk(chunk.first, chunk.second, false); + printchunk(chunk.first, chunk.second->first, false); } std::cout << std::endl; } #endif /*******************************************************************************/ // Implementation: Win32LockedPageAllocator #ifdef WIN32 /** * LockedPageAllocator specialized for Windows. */ class Win32LockedPageAllocator : public LockedPageAllocator { public: Win32LockedPageAllocator(); void *AllocateLocked(size_t len, bool *lockingSuccess) override; void FreeLocked(void *addr, size_t len) override; size_t GetLimit() override; private: size_t page_size; }; Win32LockedPageAllocator::Win32LockedPageAllocator() { // Determine system page size in bytes SYSTEM_INFO sSysInfo; GetSystemInfo(&sSysInfo); page_size = sSysInfo.dwPageSize; } void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) { len = align_up(len, page_size); void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); if (addr) { // VirtualLock is used to attempt to keep keying material out of swap. // Note that it does not provide this as a guarantee, but, in practice, // memory that has been VirtualLock'd almost never gets written to the // pagefile except in rare circumstances where memory is extremely low. *lockingSuccess = VirtualLock(const_cast(addr), len) != 0; } return addr; } void Win32LockedPageAllocator::FreeLocked(void *addr, size_t len) { len = align_up(len, page_size); memory_cleanse(addr, len); VirtualUnlock(const_cast(addr), len); } size_t Win32LockedPageAllocator::GetLimit() { // TODO is there a limit on Windows, how to get it? return std::numeric_limits::max(); } #endif /*******************************************************************************/ // Implementation: PosixLockedPageAllocator #ifndef WIN32 /** * LockedPageAllocator specialized for OSes that don't try to be special * snowflakes. */ class PosixLockedPageAllocator : public LockedPageAllocator { public: PosixLockedPageAllocator(); void *AllocateLocked(size_t len, bool *lockingSuccess) override; void FreeLocked(void *addr, size_t len) override; size_t GetLimit() override; private: size_t page_size; }; PosixLockedPageAllocator::PosixLockedPageAllocator() { // Determine system page size in bytes #if defined(PAGESIZE) // defined in climits page_size = PAGESIZE; #else // assume some POSIX OS page_size = sysconf(_SC_PAGESIZE); #endif } // Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define // MAP_ANON which is deprecated #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) { void *addr; len = align_up(len, page_size); addr = mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr) { *lockingSuccess = mlock(addr, len) == 0; } return addr; } void PosixLockedPageAllocator::FreeLocked(void *addr, size_t len) { len = align_up(len, page_size); memory_cleanse(addr, len); munlock(addr, len); munmap(addr, len); } size_t PosixLockedPageAllocator::GetLimit() { #ifdef RLIMIT_MEMLOCK struct rlimit rlim; if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) { if (rlim.rlim_cur != RLIM_INFINITY) { return rlim.rlim_cur; } } #endif return std::numeric_limits::max(); } #endif /*******************************************************************************/ // Implementation: LockedPool LockedPool::LockedPool(std::unique_ptr allocator_in, LockingFailed_Callback lf_cb_in) : allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0) {} LockedPool::~LockedPool() {} void *LockedPool::alloc(size_t size) { std::lock_guard lock(mutex); // Don't handle impossible sizes if (size == 0 || size > ARENA_SIZE) { return nullptr; } // Try allocating from each current arena for (auto &arena : arenas) { void *addr = arena.alloc(size); if (addr) { return addr; } } // If that fails, create a new one if (new_arena(ARENA_SIZE, ARENA_ALIGN)) { return arenas.back().alloc(size); } return nullptr; } void LockedPool::free(void *ptr) { std::lock_guard lock(mutex); // TODO we can do better than this linear search by keeping a map of arena // extents to arena, and looking up the address. for (auto &arena : arenas) { if (arena.addressInArena(ptr)) { arena.free(ptr); return; } } throw std::runtime_error( "LockedPool: invalid address not pointing to any arena"); } LockedPool::Stats LockedPool::stats() const { std::lock_guard lock(mutex); LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0}; for (const auto &arena : arenas) { Arena::Stats i = arena.stats(); r.used += i.used; r.free += i.free; r.total += i.total; r.chunks_used += i.chunks_used; r.chunks_free += i.chunks_free; } return r; } bool LockedPool::new_arena(size_t size, size_t align) { bool locked; // If this is the first arena, handle this specially: Cap the upper size by // the process limit. This makes sure that the first arena will at least be // locked. An exception to this is if the process limit is 0: in this case // no memory can be locked at all so we'll skip past this logic. if (arenas.empty()) { size_t limit = allocator->GetLimit(); if (limit > 0) { size = std::min(size, limit); } } void *addr = allocator->AllocateLocked(size, &locked); if (!addr) { return false; } if (locked) { cumulative_bytes_locked += size; } else if (lf_cb) { // Call the locking-failed callback if locking failed if (!lf_cb()) { // If the callback returns false, free the memory and fail, // otherwise consider the user warned and proceed. allocator->FreeLocked(addr, size); return false; } } arenas.emplace_back(allocator.get(), addr, size, align); return true; } LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in) : Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in) {} LockedPool::LockedPageArena::~LockedPageArena() { allocator->FreeLocked(base, size); } /*******************************************************************************/ // Implementation: LockedPoolManager // LockedPoolManager::LockedPoolManager( std::unique_ptr allocator_in) : LockedPool(std::move(allocator_in), &LockedPoolManager::LockingFailed) {} bool LockedPoolManager::LockingFailed() { // TODO: log something but how? without including util.h return true; } void LockedPoolManager::CreateInstance() { // Using a local static instance guarantees that the object is initialized when // it's first needed and also deinitialized after all objects that use it are // done with it. I can think of one unlikely scenario where we may have a static // deinitialization order/problem, but the check in LockedPoolManagerBase's // destructor helps us detect if that ever happens. #ifdef WIN32 std::unique_ptr allocator( new Win32LockedPageAllocator()); #else std::unique_ptr allocator( new PosixLockedPageAllocator()); #endif static LockedPoolManager instance(std::move(allocator)); LockedPoolManager::_instance = &instance; } diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h index 89220866f..512977a9c 100644 --- a/src/support/lockedpool.h +++ b/src/support/lockedpool.h @@ -1,245 +1,254 @@ // Copyright (c) 2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H #define BITCOIN_SUPPORT_LOCKEDPOOL_H #include #include #include #include #include +#include /** * OS-dependent allocation and deallocation of locked/pinned memory pages. * Abstract base class. */ class LockedPageAllocator { public: virtual ~LockedPageAllocator() {} /** * Allocate and lock memory pages. * If len is not a multiple of the system page size, it is rounded up. * Returns 0 in case of allocation failure. * * If locking the memory pages could not be accomplished it will still * return the memory, however the lockingSuccess flag will be false. * lockingSuccess is undefined if the allocation fails. */ virtual void *AllocateLocked(size_t len, bool *lockingSuccess) = 0; /** * Unlock and free memory pages. * Clear the memory before unlocking. */ virtual void FreeLocked(void *addr, size_t len) = 0; /** * Get the total limit on the amount of memory that may be locked by this * process, in bytes. Return size_t max if there is no limit or the limit is * unknown. Return 0 if no memory can be locked at all. */ virtual size_t GetLimit() = 0; }; /** * An arena manages a contiguous region of memory by dividing it into chunks. */ class Arena { public: Arena(void *base, size_t size, size_t alignment); virtual ~Arena(); Arena(const Arena &other) = delete; // non construction-copyable Arena &operator=(const Arena &) = delete; // non copyable /** Memory statistics. */ struct Stats { size_t used; size_t free; size_t total; size_t chunks_used; size_t chunks_free; }; /** * Allocate size bytes from this arena. * Returns pointer on success, or 0 if memory is full or the application * tried to allocate 0 bytes. */ void *alloc(size_t size); /** * Free a previously allocated chunk of memory. * Freeing the zero pointer has no effect. * Raises std::runtime_error in case of error. */ void free(void *ptr); /** Get arena usage statistics */ Stats stats() const; #ifdef ARENA_DEBUG void walk() const; #endif /** * Return whether a pointer points inside this arena. * This returns base <= ptr < (base+size) so only use it for (inclusive) * chunk starting addresses. */ bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; } private: - /** - * Map of chunk address to chunk information. This class makes use of the - * sorted order to merge previous and next chunks during deallocation. - */ - std::map chunks_free; - std::map chunks_used; + typedef std::multimap SizeToChunkSortedMap; + /** Map to enable O(log(n)) best-fit allocation, as it's sorted by size */ + SizeToChunkSortedMap size_to_free_chunk; + + typedef std::unordered_map + ChunkToSizeMap; + /** Map from begin of free chunk to its node in size_to_free_chunk */ + ChunkToSizeMap chunks_free; + /** Map from end of free chunk to its node in size_to_free_chunk */ + ChunkToSizeMap chunks_free_end; + + /** Map from begin of used chunk to its size */ + std::unordered_map chunks_used; + /** Base address of arena */ char *base; /** End address of arena */ char *end; /** Minimum chunk alignment */ size_t alignment; }; /** * Pool for locked memory chunks. * * To avoid sensitive key data from being swapped to disk, the memory in this * pool is locked/pinned. * * An arena manages a contiguous region of memory. The pool starts out with one * arena but can grow to multiple arenas if the need arises. * * Unlike a normal C heap, the administrative structures are separate from the * managed memory. This has been done as the sizes and bases of objects are not * in themselves sensitive information, as to conserve precious locked memory. * In some operating systems the amount of memory that can be locked is small. */ class LockedPool { public: /** * Size of one arena of locked memory. This is a compromise. * Do not set this too low, as managing many arenas will increase allocation * and deallocation overhead. Setting it too high allocates more locked * memory from the OS than strictly necessary. */ static const size_t ARENA_SIZE = 256 * 1024; /** * Chunk alignment. Another compromise. Setting this too high will waste * memory, setting it too low will facilitate fragmentation. */ static const size_t ARENA_ALIGN = 16; /** * Callback when allocation succeeds but locking fails. */ typedef bool (*LockingFailed_Callback)(); /** Memory statistics. */ struct Stats { size_t used; size_t free; size_t total; size_t locked; size_t chunks_used; size_t chunks_free; }; /** * Create a new LockedPool. This takes ownership of the MemoryPageLocker, * you can only instantiate this with LockedPool(std::move(...)). * * The second argument is an optional callback when locking a newly * allocated arena failed. If this callback is provided and returns false, * the allocation fails (hard fail), if it returns true the allocation * proceeds, but it could warn. */ explicit LockedPool(std::unique_ptr allocator, LockingFailed_Callback lf_cb_in = nullptr); ~LockedPool(); LockedPool(const LockedPool &other) = delete; // non construction-copyable LockedPool &operator=(const LockedPool &) = delete; // non copyable /** * Allocate size bytes from this arena. * Returns pointer on success, or 0 if memory is full or the application * tried to allocate 0 bytes. */ void *alloc(size_t size); /** * Free a previously allocated chunk of memory. * Freeing the zero pointer has no effect. * Raises std::runtime_error in case of error. */ void free(void *ptr); /** Get pool usage statistics */ Stats stats() const; private: std::unique_ptr allocator; /** Create an arena from locked pages */ class LockedPageArena : public Arena { public: LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align); ~LockedPageArena(); private: void *base; size_t size; LockedPageAllocator *allocator; }; bool new_arena(size_t size, size_t align); std::list arenas; LockingFailed_Callback lf_cb; size_t cumulative_bytes_locked; /** * Mutex protects access to this pool's data structures, including arenas. */ mutable std::mutex mutex; }; /** * Singleton class to keep track of locked (ie, non-swappable) memory, for use * in std::allocator templates. * * Some implementations of the STL allocate memory in some constructors (i.e., * see MSVC's vector implementation where it allocates 1 byte of memory in * the allocator). Due to the unpredictable order of static initializers, we * have to make sure the LockedPoolManager instance exists before any other * STL-based objects that use secure_allocator are created. So instead of having * LockedPoolManager also be static-initialized, it is created on demand. */ class LockedPoolManager : public LockedPool { public: /** Return the current instance, or create it once */ static LockedPoolManager &Instance() { std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance); return *LockedPoolManager::_instance; } private: explicit LockedPoolManager(std::unique_ptr allocator); /** Create a new LockedPoolManager specialized to the OS */ static void CreateInstance(); /** Called when locking fails, warn the user here */ static bool LockingFailed(); static LockedPoolManager *_instance; static std::once_flag init_flag; }; #endif // BITCOIN_SUPPORT_LOCKEDPOOL_H