diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp --- a/src/bench/lockedpool.cpp +++ b/src/bench/lockedpool.cpp @@ -41,4 +41,4 @@ addr.clear(); } -BENCHMARK(BenchLockedPool, 530); +BENCHMARK(BenchLockedPool, 1300); diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h --- a/src/support/lockedpool.h +++ b/src/support/lockedpool.h @@ -10,6 +10,7 @@ #include #include #include +#include /** * OS-dependent allocation and deallocation of locked/pinned memory pages. @@ -92,12 +93,20 @@ bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; } private: - /** - * Map of chunk address to chunk information. This class makes use of the - * sorted order to merge previous and next chunks during deallocation. - */ - std::map chunks_free; - std::map chunks_used; + typedef std::multimap SizeToChunkSortedMap; + /** Map to enable O(log(n)) best-fit allocation, as it's sorted by size */ + SizeToChunkSortedMap size_to_free_chunk; + + typedef std::unordered_map + ChunkToSizeMap; + /** Map from begin of free chunk to its node in size_to_free_chunk */ + ChunkToSizeMap chunks_free; + /** Map from end of free chunk to its node in size_to_free_chunk */ + ChunkToSizeMap chunks_free_end; + + /** Map from begin of used chunk to its size */ + std::unordered_map chunks_used; + /** Base address of arena */ char *base; /** End address of arena */ diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -47,7 +47,9 @@ : base(static_cast(base_in)), end(static_cast(base_in) + size_in), alignment(alignment_in) { // Start with one free chunk that covers the entire arena - chunks_free.emplace(base, size_in); + auto it = size_to_free_chunk.emplace(size_in, base); + chunks_free.emplace(base, it); + chunks_free_end.emplace(base + size_in, it); } Arena::~Arena() {} @@ -59,29 +61,36 @@ // Don't handle zero-sized chunks if (size == 0) return nullptr; - // Pick a large enough free-chunk - auto it = - std::find_if(chunks_free.begin(), chunks_free.end(), - [=](const std::map::value_type &chunk) { - return chunk.second >= size; - }); - if (it == chunks_free.end()) return nullptr; + // Pick a large enough free-chunk. Returns an iterator pointing to the first + // element that is not less than key. This allocation strategy is best-fit. + // According to "Dynamic Storage Allocation: A Survey and Critical Review", + // Wilson et. al. 1995, + // http://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, + // best-fit and first-fit policies seem to work well in practice. + auto size_ptr_it = size_to_free_chunk.lower_bound(size); + if (size_ptr_it == size_to_free_chunk.end()) { + return nullptr; + } // Create the used-chunk, taking its space from the end of the free-chunk + const size_t size_remaining = size_ptr_it->first - size; auto alloced = - chunks_used.emplace(it->first + it->second - size, size).first; - if (!(it->second -= size)) chunks_free.erase(it); - return reinterpret_cast(alloced->first); -} - -/* extend the Iterator if other begins at its end */ -template -bool extend(Iterator it, const Pair &other) { - if (it->first + it->second == other.first) { - it->second += other.second; - return true; + chunks_used.emplace(size_ptr_it->second + size_remaining, size).first; + chunks_free_end.erase(size_ptr_it->second + size_ptr_it->first); + if (size_ptr_it->first == size) { + // whole chunk is used up + chunks_free.erase(size_ptr_it->second); + } else { + // still some memory left in the chunk + auto it_remaining = + size_to_free_chunk.emplace(size_remaining, size_ptr_it->second); + chunks_free[size_ptr_it->second] = it_remaining; + chunks_free_end.emplace(size_ptr_it->second + size_remaining, + it_remaining); } - return false; + size_to_free_chunk.erase(size_ptr_it); + + return reinterpret_cast(alloced->first); } void Arena::free(void *ptr) { @@ -95,25 +104,40 @@ if (i == chunks_used.end()) { throw std::runtime_error("Arena: invalid or double free"); } - auto freed = *i; + std::pair freed = *i; chunks_used.erase(i); - // Add space to free map, coalescing contiguous chunks - auto next = chunks_free.upper_bound(freed.first); - auto prev = - (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next); - if (prev == chunks_free.end() || !extend(prev, freed)) - prev = chunks_free.emplace_hint(next, freed); - if (next != chunks_free.end() && extend(prev, *next)) + // coalesce freed with previous chunk + auto prev = chunks_free_end.find(freed.first); + if (prev != chunks_free_end.end()) { + freed.first -= prev->second->first; + freed.second += prev->second->first; + size_to_free_chunk.erase(prev->second); + chunks_free_end.erase(prev); + } + + // coalesce freed with chunk after freed + auto next = chunks_free.find(freed.first + freed.second); + if (next != chunks_free.end()) { + freed.second += next->second->first; + size_to_free_chunk.erase(next->second); chunks_free.erase(next); + } + + // Add/set space with coalesced free chunk + auto it = size_to_free_chunk.emplace(freed.second, freed.first); + chunks_free[freed.first] = it; + chunks_free_end[freed.first + freed.second] = it; } Arena::Stats Arena::stats() const { Arena::Stats r{0, 0, 0, chunks_used.size(), chunks_free.size()}; - for (const auto &chunk : chunks_used) + for (const auto &chunk : chunks_used) { r.used += chunk.second; - for (const auto &chunk : chunks_free) - r.free += chunk.second; + } + for (const auto &chunk : chunks_free) { + r.free += chunk.second->first; + } r.total = r.used + r.free; return r; }