diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp
index 813869a13..01273c979 100644
--- a/src/support/lockedpool.cpp
+++ b/src/support/lockedpool.cpp
@@ -1,390 +1,385 @@
 // Copyright (c) 2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include "support/lockedpool.h"
 #include "support/cleanse.h"
 
 #if defined(HAVE_CONFIG_H)
 #include "config/bitcoin-config.h"
 #endif
 
 #ifdef WIN32
 #ifdef _WIN32_WINNT
 #undef _WIN32_WINNT
 #endif
 #define _WIN32_WINNT 0x0501
 #define WIN32_LEAN_AND_MEAN 1
 #ifndef NOMINMAX
 #define NOMINMAX
 #endif
 #include <windows.h>
 #else
 #include <sys/mman.h> // for mmap
 #include <sys/resource.h> // for getrlimit
 #include <limits.h> // for PAGESIZE
 #include <unistd.h> // for sysconf
 #endif
 
+#include <algorithm>
+
 LockedPoolManager* LockedPoolManager::_instance = NULL;
 std::once_flag LockedPoolManager::init_flag;
 
 /*******************************************************************************/
 // Utilities
 //
 /** Align up to power of 2 */
 static inline size_t align_up(size_t x, size_t align)
 {
     return (x + align - 1) & ~(align - 1);
 }
 
 /*******************************************************************************/
 // Implementation: Arena
 
 Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
     base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
 {
     // Start with one free chunk that covers the entire arena
-    chunks.emplace(base, Chunk(size_in, false));
+    chunks_free.emplace(base, size_in);
 }
 
 Arena::~Arena()
 {
 }
 
 void* Arena::alloc(size_t size)
 {
     // Round to next multiple of alignment
     size = align_up(size, alignment);
 
-    // Don't handle zero-sized chunks, or those bigger than MAX_SIZE
-    if (size == 0 || size >= Chunk::MAX_SIZE) {
+    // Don't handle zero-sized chunks
+    if (size == 0)
         return nullptr;
-    }
 
-    for (auto& chunk: chunks) {
-        if (!chunk.second.isInUse() && size <= chunk.second.getSize()) {
-            char* _base = chunk.first;
-            size_t leftover = chunk.second.getSize() - size;
-            if (leftover > 0) { // Split chunk
-                chunks.emplace(_base + size, Chunk(leftover, false));
-                chunk.second.setSize(size);
-            }
-            chunk.second.setInUse(true);
-            return reinterpret_cast<void*>(_base);
-        }
+    // Pick a large enough free-chunk
+    auto it = std::find_if(chunks_free.begin(), chunks_free.end(),
+        [=](const std::map<char*, size_t>::value_type& chunk){ return chunk.second >= size; });
+    if (it == chunks_free.end())
+        return nullptr;
+
+    // Create the used-chunk, taking its space from the end of the free-chunk
+    auto alloced = chunks_used.emplace(it->first + it->second - size, size).first;
+    if (!(it->second -= size))
+        chunks_free.erase(it);
+    return reinterpret_cast<void*>(alloced->first);
+}
+
+/* extend the Iterator if other begins at its end */
+template <class Iterator, class Pair> bool extend(Iterator it, const Pair& other) {
+    if (it->first + it->second == other.first) {
+        it->second += other.second;
+        return true;
     }
-    return nullptr;
+    return false;
 }
 
 void Arena::free(void *ptr)
 {
     // Freeing the NULL pointer is OK.
     if (ptr == nullptr) {
         return;
     }
-    auto i = chunks.find(static_cast<char*>(ptr));
-    if (i == chunks.end() || !i->second.isInUse()) {
-        throw std::runtime_error("Arena: invalid or double free");
-    }
 
-    i->second.setInUse(false);
-
-    if (i != chunks.begin()) { // Absorb into previous chunk if exists and free
-        auto prev = i;
-        --prev;
-        if (!prev->second.isInUse()) {
-            // Absorb current chunk size into previous chunk.
-            prev->second.setSize(prev->second.getSize() + i->second.getSize());
-            // Erase current chunk. Erasing does not invalidate current
-            // iterators for a map, except for that pointing to the object
-            // itself, which will be overwritten in the next statement.
-            chunks.erase(i);
-            // From here on, the previous chunk is our current chunk.
-            i = prev;
-        }
-    }
-    auto next = i;
-    ++next;
-    if (next != chunks.end()) { // Absorb next chunk if exists and free
-        if (!next->second.isInUse()) {
-            // Absurb next chunk size into current chunk
-            i->second.setSize(i->second.getSize() + next->second.getSize());
-            // Erase next chunk.
-            chunks.erase(next);
-        }
+    // Remove chunk from used map
+    auto i = chunks_used.find(static_cast<char*>(ptr));
+    if (i == chunks_used.end()) {
+        throw std::runtime_error("Arena: invalid or double free");
     }
+    auto freed = *i;
+    chunks_used.erase(i);
+
+    // Add space to free map, coalescing contiguous chunks
+    auto next = chunks_free.upper_bound(freed.first);
+    auto prev = (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next);
+    if (prev == chunks_free.end() || !extend(prev, freed))
+        prev = chunks_free.emplace_hint(next, freed);
+    if (next != chunks_free.end() && extend(prev, *next))
+        chunks_free.erase(next);
 }
 
 Arena::Stats Arena::stats() const
 {
-    Arena::Stats r;
-    r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0;
-    for (const auto& chunk: chunks) {
-        if (chunk.second.isInUse()) {
-            r.used += chunk.second.getSize();
-            r.chunks_used += 1;
-        } else {
-            r.free += chunk.second.getSize();
-            r.chunks_free += 1;
-        }
-        r.total += chunk.second.getSize();
-    }
+    Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() };
+    for (const auto& chunk: chunks_used)
+        r.used += chunk.second;
+    for (const auto& chunk: chunks_free)
+        r.free += chunk.second;
+    r.total = r.used + r.free;
     return r;
 }
 
 #ifdef ARENA_DEBUG
+void printchunk(char* base, size_t sz, bool used) {
+    std::cout <<
+        "0x" << std::hex << std::setw(16) << std::setfill('0') << base <<
+        " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz <<
+        " 0x" << used << std::endl;
+}
 void Arena::walk() const
 {
-    for (const auto& chunk: chunks) {
-        std::cout <<
-            "0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.first <<
-            " 0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.second.getSize() <<
-            " 0x" << chunk.second.isInUse() << std::endl;
-    }
+    for (const auto& chunk: chunks_used)
+        printchunk(chunk.first, chunk.second, true);
+    std::cout << std::endl;
+    for (const auto& chunk: chunks_free)
+        printchunk(chunk.first, chunk.second, false);
     std::cout << std::endl;
 }
 #endif
 
 /*******************************************************************************/
 // Implementation: Win32LockedPageAllocator
 
 #ifdef WIN32
 /** LockedPageAllocator specialized for Windows.
  */
 class Win32LockedPageAllocator: public LockedPageAllocator
 {
 public:
     Win32LockedPageAllocator();
     void* AllocateLocked(size_t len, bool *lockingSuccess);
     void FreeLocked(void* addr, size_t len);
     size_t GetLimit();
 private:
     size_t page_size;
 };
 
 Win32LockedPageAllocator::Win32LockedPageAllocator()
 {
     // Determine system page size in bytes
     SYSTEM_INFO sSysInfo;
     GetSystemInfo(&sSysInfo);
     page_size = sSysInfo.dwPageSize;
 }
 void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
 {
     len = align_up(len, page_size);
     void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
     if (addr) {
         // VirtualLock is used to attempt to keep keying material out of swap. Note
         // that it does not provide this as a guarantee, but, in practice, memory
         // that has been VirtualLock'd almost never gets written to the pagefile
         // except in rare circumstances where memory is extremely low.
         *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
     }
     return addr;
 }
 void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
 {
     len = align_up(len, page_size);
     memory_cleanse(addr, len);
     VirtualUnlock(const_cast<void*>(addr), len);
 }
 
 size_t Win32LockedPageAllocator::GetLimit()
 {
     // TODO is there a limit on windows, how to get it?
     return std::numeric_limits<size_t>::max();
 }
 #endif
 
 /*******************************************************************************/
 // Implementation: PosixLockedPageAllocator
 
 #ifndef WIN32
 /** LockedPageAllocator specialized for OSes that don't try to be
  * special snowflakes.
  */
 class PosixLockedPageAllocator: public LockedPageAllocator
 {
 public:
     PosixLockedPageAllocator();
     void* AllocateLocked(size_t len, bool *lockingSuccess);
     void FreeLocked(void* addr, size_t len);
     size_t GetLimit();
 private:
     size_t page_size;
 };
 
 PosixLockedPageAllocator::PosixLockedPageAllocator()
 {
     // Determine system page size in bytes
 #if defined(PAGESIZE) // defined in limits.h
     page_size = PAGESIZE;
 #else                   // assume some POSIX OS
     page_size = sysconf(_SC_PAGESIZE);
 #endif
 }
 
 // Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define
 // MAP_ANON which is deprecated
 #ifndef MAP_ANONYMOUS
 #define MAP_ANONYMOUS MAP_ANON
 #endif
 
 void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
 {
     void *addr;
     len = align_up(len, page_size);
     addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
     if (addr) {
         *lockingSuccess = mlock(addr, len) == 0;
     }
     return addr;
 }
 void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len)
 {
     len = align_up(len, page_size);
     memory_cleanse(addr, len);
     munlock(addr, len);
     munmap(addr, len);
 }
 size_t PosixLockedPageAllocator::GetLimit()
 {
 #ifdef RLIMIT_MEMLOCK
     struct rlimit rlim;
     if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
         if (rlim.rlim_cur != RLIM_INFINITY) {
             return rlim.rlim_cur;
         }
     }
 #endif
     return std::numeric_limits<size_t>::max();
 }
 #endif
 
 /*******************************************************************************/
 // Implementation: LockedPool
 
 LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in):
     allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
 {
 }
 
 LockedPool::~LockedPool()
 {
 }
 void* LockedPool::alloc(size_t size)
 {
     std::lock_guard<std::mutex> lock(mutex);
+
+    // Don't handle impossible sizes
+    if (size == 0 || size > ARENA_SIZE)
+        return nullptr;
+
     // Try allocating from each current arena
     for (auto &arena: arenas) {
         void *addr = arena.alloc(size);
         if (addr) {
             return addr;
         }
     }
     // If that fails, create a new one
     if (new_arena(ARENA_SIZE, ARENA_ALIGN)) {
         return arenas.back().alloc(size);
     }
     return nullptr;
 }
 
 void LockedPool::free(void *ptr)
 {
     std::lock_guard<std::mutex> lock(mutex);
     // TODO we can do better than this linear search by keeping a map of arena
     // extents to arena, and looking up the address.
     for (auto &arena: arenas) {
         if (arena.addressInArena(ptr)) {
             arena.free(ptr);
             return;
         }
     }
     throw std::runtime_error("LockedPool: invalid address not pointing to any arena");
 }
 
 LockedPool::Stats LockedPool::stats() const
 {
     std::lock_guard<std::mutex> lock(mutex);
-    LockedPool::Stats r;
-    r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0;
-    r.locked = cumulative_bytes_locked;
+    LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0};
     for (const auto &arena: arenas) {
         Arena::Stats i = arena.stats();
         r.used += i.used;
         r.free += i.free;
         r.total += i.total;
         r.chunks_used += i.chunks_used;
         r.chunks_free += i.chunks_free;
     }
     return r;
 }
 
 bool LockedPool::new_arena(size_t size, size_t align)
 {
     bool locked;
     // If this is the first arena, handle this specially: Cap the upper size
     // by the process limit. This makes sure that the first arena will at least
     // be locked. An exception to this is if the process limit is 0:
     // in this case no memory can be locked at all so we'll skip past this logic.
     if (arenas.empty()) {
         size_t limit = allocator->GetLimit();
         if (limit > 0) {
             size = std::min(size, limit);
         }
     }
     void *addr = allocator->AllocateLocked(size, &locked);
     if (!addr) {
         return false;
     }
     if (locked) {
         cumulative_bytes_locked += size;
     } else if (lf_cb) { // Call the locking-failed callback if locking failed
         if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
             allocator->FreeLocked(addr, size);
             return false;
         }
     }
     arenas.emplace_back(allocator.get(), addr, size, align);
     return true;
 }
 
 LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in):
     Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
 {
 }
 LockedPool::LockedPageArena::~LockedPageArena()
 {
     allocator->FreeLocked(base, size);
 }
 
 /*******************************************************************************/
 // Implementation: LockedPoolManager
 //
 LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator):
     LockedPool(std::move(allocator), &LockedPoolManager::LockingFailed)
 {
 }
 
 bool LockedPoolManager::LockingFailed()
 {
     // TODO: log something but how? without including util.h
     return true;
 }
 
 void LockedPoolManager::CreateInstance()
 {
     // Using a local static instance guarantees that the object is initialized
     // when it's first needed and also deinitialized after all objects that use
     // it are done with it.  I can think of one unlikely scenario where we may
     // have a static deinitialization order/problem, but the check in
     // LockedPoolManagerBase's destructor helps us detect if that ever happens.
 #ifdef WIN32
     std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator());
 #else
     std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator());
 #endif
     static LockedPoolManager instance(std::move(allocator));
     LockedPoolManager::_instance = &instance;
 }
diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h
index 526c17a73..340341543 100644
--- a/src/support/lockedpool.h
+++ b/src/support/lockedpool.h
@@ -1,251 +1,231 @@
 // Copyright (c) 2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H
 #define BITCOIN_SUPPORT_LOCKEDPOOL_H
 
 #include <stdint.h>
 #include <list>
 #include <map>
 #include <mutex>
 #include <memory>
 
 /**
  * OS-dependent allocation and deallocation of locked/pinned memory pages.
  * Abstract base class.
  */
 class LockedPageAllocator
 {
 public:
     virtual ~LockedPageAllocator() {}
     /** Allocate and lock memory pages.
      * If len is not a multiple of the system page size, it is rounded up.
      * Returns 0 in case of allocation failure.
      *
      * If locking the memory pages could not be accomplished it will still
      * return the memory, however the lockingSuccess flag will be false.
      * lockingSuccess is undefined if the allocation fails.
      */
     virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0;
 
     /** Unlock and free memory pages.
      * Clear the memory before unlocking.
      */
     virtual void FreeLocked(void* addr, size_t len) = 0;
 
     /** Get the total limit on the amount of memory that may be locked by this
      * process, in bytes. Return size_t max if there is no limit or the limit
      * is unknown. Return 0 if no memory can be locked at all.
      */
     virtual size_t GetLimit() = 0;
 };
 
 /* An arena manages a contiguous region of memory by dividing it into
  * chunks.
  */
 class Arena
 {
 public:
     Arena(void *base, size_t size, size_t alignment);
     virtual ~Arena();
 
-    /** A chunk of memory.
-     */
-    struct Chunk
-    {
-        /** Most significant bit of size_t. This is used to mark
-         * in-usedness of chunk.
-         */
-        const static size_t SIZE_MSB = 1LLU << ((sizeof(size_t)*8)-1);
-        /** Maximum size of a chunk */
-        const static size_t MAX_SIZE = SIZE_MSB - 1;
-
-        Chunk(size_t size_in, bool used_in):
-            size(size_in | (used_in ? SIZE_MSB : 0)) {}
-
-        bool isInUse() const { return size & SIZE_MSB; }
-        void setInUse(bool used_in) { size = (size & ~SIZE_MSB) | (used_in ? SIZE_MSB : 0); }
-        size_t getSize() const { return size & ~SIZE_MSB; }
-        void setSize(size_t size_in) { size = (size & SIZE_MSB) | size_in; }
-    private:
-        size_t size;
-    };
     /** Memory statistics. */
     struct Stats
     {
         size_t used;
         size_t free;
         size_t total;
         size_t chunks_used;
         size_t chunks_free;
     };
 
     /** Allocate size bytes from this arena.
      * Returns pointer on success, or 0 if memory is full or
      * the application tried to allocate 0 bytes.
      */
     void* alloc(size_t size);
 
     /** Free a previously allocated chunk of memory.
      * Freeing the zero pointer has no effect.
      * Raises std::runtime_error in case of error.
      */
     void free(void *ptr);
 
     /** Get arena usage statistics */
     Stats stats() const;
 
 #ifdef ARENA_DEBUG
     void walk() const;
 #endif
 
     /** Return whether a pointer points inside this arena.
      * This returns base <= ptr < (base+size) so only use it for (inclusive)
      * chunk starting addresses.
      */
     bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; }
 private:
     Arena(const Arena& other) = delete; // non construction-copyable
     Arena& operator=(const Arena&) = delete; // non copyable
 
     /** Map of chunk address to chunk information. This class makes use of the
      * sorted order to merge previous and next chunks during deallocation.
      */
-    std::map<char*, Chunk> chunks;
+    std::map<char*, size_t> chunks_free;
+    std::map<char*, size_t> chunks_used;
     /** Base address of arena */
     char* base;
     /** End address of arena */
     char* end;
     /** Minimum chunk alignment */
     size_t alignment;
 };
 
 /** Pool for locked memory chunks.
  *
  * To avoid sensitive key data from being swapped to disk, the memory in this pool
  * is locked/pinned.
  *
  * An arena manages a contiguous region of memory. The pool starts out with one arena
  * but can grow to multiple arenas if the need arises.
  *
  * Unlike a normal C heap, the administrative structures are seperate from the managed
  * memory. This has been done as the sizes and bases of objects are not in themselves sensitive
  * information, as to conserve precious locked memory. In some operating systems
  * the amount of memory that can be locked is small.
  */
 class LockedPool
 {
 public:
     /** Size of one arena of locked memory. This is a compromise.
      * Do not set this too low, as managing many arenas will increase
      * allocation and deallocation overhead. Setting it too high allocates
      * more locked memory from the OS than strictly necessary.
      */
     static const size_t ARENA_SIZE = 256*1024;
     /** Chunk alignment. Another compromise. Setting this too high will waste
      * memory, setting it too low will facilitate fragmentation.
      */
     static const size_t ARENA_ALIGN = 16;
 
     /** Callback when allocation succeeds but locking fails.
      */
     typedef bool (*LockingFailed_Callback)();
 
     /** Memory statistics. */
     struct Stats
     {
         size_t used;
         size_t free;
         size_t total;
         size_t locked;
         size_t chunks_used;
         size_t chunks_free;
     };
 
     /** Create a new LockedPool. This takes ownership of the MemoryPageLocker,
      * you can only instantiate this with LockedPool(std::move(...)).
      *
      * The second argument is an optional callback when locking a newly allocated arena failed.
      * If this callback is provided and returns false, the allocation fails (hard fail), if
      * it returns true the allocation proceeds, but it could warn.
      */
     LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = 0);
     ~LockedPool();
 
     /** Allocate size bytes from this arena.
      * Returns pointer on success, or 0 if memory is full or
      * the application tried to allocate 0 bytes.
      */
     void* alloc(size_t size);
 
     /** Free a previously allocated chunk of memory.
      * Freeing the zero pointer has no effect.
      * Raises std::runtime_error in case of error.
      */
     void free(void *ptr);
 
     /** Get pool usage statistics */
     Stats stats() const;
 private:
     LockedPool(const LockedPool& other) = delete; // non construction-copyable
     LockedPool& operator=(const LockedPool&) = delete; // non copyable
 
     std::unique_ptr<LockedPageAllocator> allocator;
 
     /** Create an arena from locked pages */
     class LockedPageArena: public Arena
     {
     public:
         LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align);
         ~LockedPageArena();
     private:
         void *base;
         size_t size;
         LockedPageAllocator *allocator;
     };
 
     bool new_arena(size_t size, size_t align);
 
     std::list<LockedPageArena> arenas;
     LockingFailed_Callback lf_cb;
     size_t cumulative_bytes_locked;
     /** Mutex protects access to this pool's data structures, including arenas.
      */
     mutable std::mutex mutex;
 };
 
 /**
  * Singleton class to keep track of locked (ie, non-swappable) memory, for use in
  * std::allocator templates.
  *
  * Some implementations of the STL allocate memory in some constructors (i.e., see
  * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
  * Due to the unpredictable order of static initializers, we have to make sure the
  * LockedPoolManager instance exists before any other STL-based objects that use
  * secure_allocator are created. So instead of having LockedPoolManager also be
  * static-initialized, it is created on demand.
  */
 class LockedPoolManager : public LockedPool
 {
 public:
     /** Return the current instance, or create it once */
     static LockedPoolManager& Instance()
     {
         std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance);
         return *LockedPoolManager::_instance;
     }
 
 private:
     LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator);
 
     /** Create a new LockedPoolManager specialized to the OS */
     static void CreateInstance();
     /** Called when locking fails, warn the user here */
     static bool LockingFailed();
 
     static LockedPoolManager* _instance;
     static std::once_flag init_flag;
 };
 
 #endif // BITCOIN_SUPPORT_LOCKEDPOOL_H
diff --git a/src/test/allocator_tests.cpp b/src/test/allocator_tests.cpp
index f0e848655..77e9df5d8 100644
--- a/src/test/allocator_tests.cpp
+++ b/src/test/allocator_tests.cpp
@@ -1,224 +1,234 @@
 // Copyright (c) 2012-2015 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include "util.h"
 
 #include "support/allocators/secure.h"
 #include "test/test_bitcoin.h"
 
 #include <boost/test/unit_test.hpp>
 
 BOOST_FIXTURE_TEST_SUITE(allocator_tests, BasicTestingSetup)
 
 BOOST_AUTO_TEST_CASE(arena_tests)
 {
     // Fake memory base address for testing
     // without actually using memory.
     void *synth_base = reinterpret_cast<void*>(0x08000000);
     const size_t synth_size = 1024*1024;
     Arena b(synth_base, synth_size, 16);
     void *chunk = b.alloc(1000);
 #ifdef ARENA_DEBUG
     b.walk();
 #endif
     BOOST_CHECK(chunk != nullptr);
     BOOST_CHECK(b.stats().used == 1008); // Aligned to 16
     BOOST_CHECK(b.stats().total == synth_size); // Nothing has disappeared?
     b.free(chunk);
 #ifdef ARENA_DEBUG
     b.walk();
 #endif
     BOOST_CHECK(b.stats().used == 0);
     BOOST_CHECK(b.stats().free == synth_size);
     try { // Test exception on double-free
         b.free(chunk);
         BOOST_CHECK(0);
     } catch(std::runtime_error &)
     {
     }
 
     void *a0 = b.alloc(128);
-    BOOST_CHECK(a0 == synth_base); // first allocation must start at beginning
     void *a1 = b.alloc(256);
     void *a2 = b.alloc(512);
     BOOST_CHECK(b.stats().used == 896);
     BOOST_CHECK(b.stats().total == synth_size);
 #ifdef ARENA_DEBUG
     b.walk();
 #endif
     b.free(a0);
 #ifdef ARENA_DEBUG
     b.walk();
 #endif
     BOOST_CHECK(b.stats().used == 768);
     b.free(a1);
     BOOST_CHECK(b.stats().used == 512);
     void *a3 = b.alloc(128);
 #ifdef ARENA_DEBUG
     b.walk();
 #endif
     BOOST_CHECK(b.stats().used == 640);
     b.free(a2);
     BOOST_CHECK(b.stats().used == 128);
     b.free(a3);
     BOOST_CHECK(b.stats().used == 0);
+    BOOST_CHECK_EQUAL(b.stats().chunks_used, 0);
     BOOST_CHECK(b.stats().total == synth_size);
     BOOST_CHECK(b.stats().free == synth_size);
+    BOOST_CHECK_EQUAL(b.stats().chunks_free, 1);
 
     std::vector<void*> addr;
     BOOST_CHECK(b.alloc(0) == nullptr); // allocating 0 always returns nullptr
 #ifdef ARENA_DEBUG
     b.walk();
 #endif
     // Sweeping allocate all memory
     for (int x=0; x<1024; ++x)
         addr.push_back(b.alloc(1024));
-    BOOST_CHECK(addr[0] == synth_base); // first allocation must start at beginning
     BOOST_CHECK(b.stats().free == 0);
     BOOST_CHECK(b.alloc(1024) == nullptr); // memory is full, this must return nullptr
     BOOST_CHECK(b.alloc(0) == nullptr);
     for (int x=0; x<1024; ++x)
         b.free(addr[x]);
     addr.clear();
     BOOST_CHECK(b.stats().total == synth_size);
     BOOST_CHECK(b.stats().free == synth_size);
 
     // Now in the other direction...
     for (int x=0; x<1024; ++x)
         addr.push_back(b.alloc(1024));
     for (int x=0; x<1024; ++x)
         b.free(addr[1023-x]);
     addr.clear();
 
     // Now allocate in smaller unequal chunks, then deallocate haphazardly
     // Not all the chunks will succeed allocating, but freeing nullptr is
     // allowed so that is no problem.
     for (int x=0; x<2048; ++x)
         addr.push_back(b.alloc(x+1));
     for (int x=0; x<2048; ++x)
         b.free(addr[((x*23)%2048)^242]);
     addr.clear();
 
     // Go entirely wild: free and alloc interleaved,
     // generate targets and sizes using pseudo-randomness.
     for (int x=0; x<2048; ++x)
         addr.push_back(0);
     uint32_t s = 0x12345678;
     for (int x=0; x<5000; ++x) {
         int idx = s & (addr.size()-1);
         if (s & 0x80000000) {
             b.free(addr[idx]);
             addr[idx] = 0;
         } else if(!addr[idx]) {
             addr[idx] = b.alloc((s >> 16) & 2047);
         }
         bool lsb = s & 1;
         s >>= 1;
         if (lsb)
             s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
     }
     for (void *ptr: addr)
         b.free(ptr);
     addr.clear();
 
     BOOST_CHECK(b.stats().total == synth_size);
     BOOST_CHECK(b.stats().free == synth_size);
 }
 
 /** Mock LockedPageAllocator for testing */
 class TestLockedPageAllocator: public LockedPageAllocator
 {
 public:
     TestLockedPageAllocator(int count_in, int lockedcount_in): count(count_in), lockedcount(lockedcount_in) {}
     void* AllocateLocked(size_t len, bool *lockingSuccess)
     {
         *lockingSuccess = false;
         if (count > 0) {
             --count;
 
             if (lockedcount > 0) {
                 --lockedcount;
                 *lockingSuccess = true;
             }
 
             return reinterpret_cast<void*>(0x08000000 + (count<<24)); // Fake address, do not actually use this memory
         }
         return 0;
     }
     void FreeLocked(void* addr, size_t len)
     {
     }
     size_t GetLimit()
     {
         return std::numeric_limits<size_t>::max();
     }
 private:
     int count;
     int lockedcount;
 };
 
 BOOST_AUTO_TEST_CASE(lockedpool_tests_mock)
 {
     // Test over three virtual arenas, of which one will succeed being locked
     std::unique_ptr<LockedPageAllocator> x(new TestLockedPageAllocator(3, 1));
     LockedPool pool(std::move(x));
     BOOST_CHECK(pool.stats().total == 0);
     BOOST_CHECK(pool.stats().locked == 0);
 
+    // Ensure unreasonable requests are refused without allocating anything
+    void *invalid_toosmall = pool.alloc(0);
+    BOOST_CHECK(invalid_toosmall == nullptr);
+    BOOST_CHECK(pool.stats().used == 0);
+    BOOST_CHECK(pool.stats().free == 0);
+    void *invalid_toobig = pool.alloc(LockedPool::ARENA_SIZE+1);
+    BOOST_CHECK(invalid_toobig == nullptr);
+    BOOST_CHECK(pool.stats().used == 0);
+    BOOST_CHECK(pool.stats().free == 0);
+
     void *a0 = pool.alloc(LockedPool::ARENA_SIZE / 2);
     BOOST_CHECK(a0);
     BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE);
     void *a1 = pool.alloc(LockedPool::ARENA_SIZE / 2);
     BOOST_CHECK(a1);
     void *a2 = pool.alloc(LockedPool::ARENA_SIZE / 2);
     BOOST_CHECK(a2);
     void *a3 = pool.alloc(LockedPool::ARENA_SIZE / 2);
     BOOST_CHECK(a3);
     void *a4 = pool.alloc(LockedPool::ARENA_SIZE / 2);
     BOOST_CHECK(a4);
     void *a5 = pool.alloc(LockedPool::ARENA_SIZE / 2);
     BOOST_CHECK(a5);
     // We've passed a count of three arenas, so this allocation should fail
     void *a6 = pool.alloc(16);
     BOOST_CHECK(!a6);
 
     pool.free(a0);
     pool.free(a2);
     pool.free(a4);
     pool.free(a1);
     pool.free(a3);
     pool.free(a5);
     BOOST_CHECK(pool.stats().total == 3*LockedPool::ARENA_SIZE);
     BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE);
     BOOST_CHECK(pool.stats().used == 0);
 }
 
 // These tests used the live LockedPoolManager object, this is also used
 // by other tests so the conditions are somewhat less controllable and thus the
 // tests are somewhat more error-prone.
 BOOST_AUTO_TEST_CASE(lockedpool_tests_live)
 {
     LockedPoolManager &pool = LockedPoolManager::Instance();
     LockedPool::Stats initial = pool.stats();
 
     void *a0 = pool.alloc(16);
     BOOST_CHECK(a0);
     // Test reading and writing the allocated memory
     *((uint32_t*)a0) = 0x1234;
     BOOST_CHECK(*((uint32_t*)a0) == 0x1234);
 
     pool.free(a0);
     try { // Test exception on double-free
         pool.free(a0);
         BOOST_CHECK(0);
     } catch(std::runtime_error &)
     {
     }
     // If more than one new arena was allocated for the above tests, something is wrong
     BOOST_CHECK(pool.stats().total <= (initial.total + LockedPool::ARENA_SIZE));
     // Usage must be back to where it started
     BOOST_CHECK(pool.stats().used == initial.used);
 }
 
 BOOST_AUTO_TEST_SUITE_END()