Changeset View
Changeset View
Standalone View
Standalone View
src/test/cuckoocache_tests.cpp
Show All 18 Lines | |||||
* 2) Some test methods are templated to allow for easier testing | * 2) Some test methods are templated to allow for easier testing | ||||
* against new versions / comparing | * against new versions / comparing | ||||
* 3) Results should be treated as a regression test, i.e., did the behavior | * 3) Results should be treated as a regression test, i.e., did the behavior | ||||
* change significantly from what was expected. This can be OK, depending on | * change significantly from what was expected. This can be OK, depending on | ||||
* the nature of the change, but requires updating the tests to reflect the new | * the nature of the change, but requires updating the tests to reflect the new | ||||
* expected behavior. For example improving the hit rate may cause some tests | * expected behavior. For example improving the hit rate may cause some tests | ||||
* using BOOST_CHECK_CLOSE to fail. | * using BOOST_CHECK_CLOSE to fail. | ||||
*/ | */ | ||||
FastRandomContext local_rand_ctx(true); | |||||
BOOST_AUTO_TEST_SUITE(cuckoocache_tests); | BOOST_AUTO_TEST_SUITE(cuckoocache_tests); | ||||
/** | /** | ||||
* insecure_GetRandHash fills in a uint256 from local_rand_ctx | |||||
*/ | |||||
void insecure_GetRandHash(uint256 &t) { | |||||
uint32_t *ptr = (uint32_t *)t.begin(); | |||||
for (uint8_t j = 0; j < 8; ++j) { | |||||
*(ptr++) = local_rand_ctx.rand32(); | |||||
} | |||||
} | |||||
/** | |||||
* Test that no values not inserted into the cache are read out of it. | * Test that no values not inserted into the cache are read out of it. | ||||
* | * | ||||
* There are no repeats in the first 200000 insecure_GetRandHash calls | * There are no repeats in the first 200000 insecure_GetRandHash calls | ||||
*/ | */ | ||||
BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes) { | BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes) { | ||||
local_rand_ctx = FastRandomContext(true); | SeedInsecureRand(true); | ||||
CuckooCache::cache<uint256, SignatureCacheHasher> cc{}; | CuckooCache::cache<uint256, SignatureCacheHasher> cc{}; | ||||
size_t megabytes = 4; | size_t megabytes = 4; | ||||
cc.setup_bytes(megabytes << 20); | cc.setup_bytes(megabytes << 20); | ||||
uint256 v; | |||||
for (int x = 0; x < 100000; ++x) { | for (int x = 0; x < 100000; ++x) { | ||||
insecure_GetRandHash(v); | cc.insert(InsecureRand256()); | ||||
cc.insert(v); | |||||
} | } | ||||
for (int x = 0; x < 100000; ++x) { | for (int x = 0; x < 100000; ++x) { | ||||
insecure_GetRandHash(v); | BOOST_CHECK(!cc.contains(InsecureRand256(), false)); | ||||
BOOST_CHECK(!cc.contains(v, false)); | |||||
} | } | ||||
}; | }; | ||||
/** | /** | ||||
* This helper returns the hit rate when megabytes*load worth of entries are | * This helper returns the hit rate when megabytes*load worth of entries are | ||||
* inserted into a megabytes sized cache | * inserted into a megabytes sized cache | ||||
*/ | */ | ||||
template <typename Cache> double test_cache(size_t megabytes, double load) { | template <typename Cache> double test_cache(size_t megabytes, double load) { | ||||
local_rand_ctx = FastRandomContext(true); | SeedInsecureRand(true); | ||||
std::vector<uint256> hashes; | std::vector<uint256> hashes; | ||||
Cache set{}; | Cache set{}; | ||||
size_t bytes = megabytes * (1 << 20); | size_t bytes = megabytes * (1 << 20); | ||||
set.setup_bytes(bytes); | set.setup_bytes(bytes); | ||||
uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256))); | uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256))); | ||||
hashes.resize(n_insert); | hashes.resize(n_insert); | ||||
for (uint32_t i = 0; i < n_insert; ++i) { | for (uint32_t i = 0; i < n_insert; ++i) { | ||||
uint32_t *ptr = (uint32_t *)hashes[i].begin(); | uint32_t *ptr = (uint32_t *)hashes[i].begin(); | ||||
for (uint8_t j = 0; j < 8; ++j) { | for (uint8_t j = 0; j < 8; ++j) { | ||||
*(ptr++) = local_rand_ctx.rand32(); | *(ptr++) = InsecureRand32(); | ||||
} | } | ||||
} | } | ||||
/** | /** | ||||
* We make a copy of the hashes because future optimizations of the | * We make a copy of the hashes because future optimizations of the | ||||
* cuckoocache may overwrite the inserted element, so the test is "future | * cuckoocache may overwrite the inserted element, so the test is "future | ||||
* proofed". | * proofed". | ||||
*/ | */ | ||||
std::vector<uint256> hashes_insert_copy = hashes; | std::vector<uint256> hashes_insert_copy = hashes; | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | for (double load = 0.1; load < 2; load *= 2) { | ||||
BOOST_CHECK(normalize_hit_rate(hits, load) > HitRateThresh); | BOOST_CHECK(normalize_hit_rate(hits, load) > HitRateThresh); | ||||
} | } | ||||
} | } | ||||
/** This helper checks that erased elements are preferentially inserted onto and | /** This helper checks that erased elements are preferentially inserted onto and | ||||
* that the hit rate of "fresher" keys is reasonable*/ | * that the hit rate of "fresher" keys is reasonable*/ | ||||
template <typename Cache> void test_cache_erase(size_t megabytes) { | template <typename Cache> void test_cache_erase(size_t megabytes) { | ||||
double load = 1; | double load = 1; | ||||
local_rand_ctx = FastRandomContext(true); | SeedInsecureRand(true); | ||||
std::vector<uint256> hashes; | std::vector<uint256> hashes; | ||||
Cache set{}; | Cache set{}; | ||||
size_t bytes = megabytes * (1 << 20); | size_t bytes = megabytes * (1 << 20); | ||||
set.setup_bytes(bytes); | set.setup_bytes(bytes); | ||||
uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256))); | uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256))); | ||||
hashes.resize(n_insert); | hashes.resize(n_insert); | ||||
for (uint32_t i = 0; i < n_insert; ++i) { | for (uint32_t i = 0; i < n_insert; ++i) { | ||||
uint32_t *ptr = (uint32_t *)hashes[i].begin(); | uint32_t *ptr = (uint32_t *)hashes[i].begin(); | ||||
for (uint8_t j = 0; j < 8; ++j) { | for (uint8_t j = 0; j < 8; ++j) { | ||||
*(ptr++) = local_rand_ctx.rand32(); | *(ptr++) = InsecureRand32(); | ||||
} | } | ||||
} | } | ||||
/** We make a copy of the hashes because future optimizations of the | /** We make a copy of the hashes because future optimizations of the | ||||
* cuckoocache may overwrite the inserted element, so the test is | * cuckoocache may overwrite the inserted element, so the test is | ||||
* "future proofed". | * "future proofed". | ||||
*/ | */ | ||||
std::vector<uint256> hashes_insert_copy = hashes; | std::vector<uint256> hashes_insert_copy = hashes; | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | |||||
BOOST_AUTO_TEST_CASE(cuckoocache_erase_ok) { | BOOST_AUTO_TEST_CASE(cuckoocache_erase_ok) { | ||||
size_t megabytes = 4; | size_t megabytes = 4; | ||||
test_cache_erase<CuckooCache::cache<uint256, SignatureCacheHasher>>( | test_cache_erase<CuckooCache::cache<uint256, SignatureCacheHasher>>( | ||||
megabytes); | megabytes); | ||||
} | } | ||||
template <typename Cache> void test_cache_erase_parallel(size_t megabytes) { | template <typename Cache> void test_cache_erase_parallel(size_t megabytes) { | ||||
double load = 1; | double load = 1; | ||||
local_rand_ctx = FastRandomContext(true); | SeedInsecureRand(true); | ||||
std::vector<uint256> hashes; | std::vector<uint256> hashes; | ||||
Cache set{}; | Cache set{}; | ||||
size_t bytes = megabytes * (1 << 20); | size_t bytes = megabytes * (1 << 20); | ||||
set.setup_bytes(bytes); | set.setup_bytes(bytes); | ||||
uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256))); | uint32_t n_insert = static_cast<uint32_t>(load * (bytes / sizeof(uint256))); | ||||
hashes.resize(n_insert); | hashes.resize(n_insert); | ||||
for (uint32_t i = 0; i < n_insert; ++i) { | for (uint32_t i = 0; i < n_insert; ++i) { | ||||
uint32_t *ptr = (uint32_t *)hashes[i].begin(); | uint32_t *ptr = (uint32_t *)hashes[i].begin(); | ||||
for (uint8_t j = 0; j < 8; ++j) { | for (uint8_t j = 0; j < 8; ++j) { | ||||
*(ptr++) = local_rand_ctx.rand32(); | *(ptr++) = InsecureRand32(); | ||||
} | } | ||||
} | } | ||||
/** We make a copy of the hashes because future optimizations of the | /** We make a copy of the hashes because future optimizations of the | ||||
* cuckoocache may overwrite the inserted element, so the test is | * cuckoocache may overwrite the inserted element, so the test is | ||||
* "future proofed". | * "future proofed". | ||||
*/ | */ | ||||
std::vector<uint256> hashes_insert_copy = hashes; | std::vector<uint256> hashes_insert_copy = hashes; | ||||
boost::shared_mutex mtx; | boost::shared_mutex mtx; | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | template <typename Cache> void test_cache_generations() { | ||||
// min_hit_rate*max_rate_less_than_tight_hit_rate = 0.999*99%+0.99*1% == | // min_hit_rate*max_rate_less_than_tight_hit_rate = 0.999*99%+0.99*1% == | ||||
// 99.89% | // 99.89% | ||||
// hit rate with low variance. | // hit rate with low variance. | ||||
// We use deterministic values, but this test has also passed on many | // We use deterministic values, but this test has also passed on many | ||||
// iterations with non-deterministic values, so it isn't "overfit" to the | // iterations with non-deterministic values, so it isn't "overfit" to the | ||||
// specific entropy in FastRandomContext(true) and implementation of the | // specific entropy in FastRandomContext(true) and implementation of the | ||||
// cache. | // cache. | ||||
local_rand_ctx = FastRandomContext(true); | SeedInsecureRand(true); | ||||
// block_activity models a chunk of network activity. n_insert elements are | // block_activity models a chunk of network activity. n_insert elements are | ||||
// adde to the cache. The first and last n/4 are stored for removal later | // adde to the cache. The first and last n/4 are stored for removal later | ||||
// and the middle n/2 are not stored. This models a network which uses half | // and the middle n/2 are not stored. This models a network which uses half | ||||
// the signatures of recently (since the last block) added transactions | // the signatures of recently (since the last block) added transactions | ||||
// immediately and never uses the other half. | // immediately and never uses the other half. | ||||
struct block_activity { | struct block_activity { | ||||
std::vector<uint256> reads; | std::vector<uint256> reads; | ||||
block_activity(uint32_t n_insert, Cache &c) : reads() { | block_activity(uint32_t n_insert, Cache &c) : reads() { | ||||
std::vector<uint256> inserts; | std::vector<uint256> inserts; | ||||
inserts.resize(n_insert); | inserts.resize(n_insert); | ||||
reads.reserve(n_insert / 2); | reads.reserve(n_insert / 2); | ||||
for (uint32_t i = 0; i < n_insert; ++i) { | for (uint32_t i = 0; i < n_insert; ++i) { | ||||
uint32_t *ptr = (uint32_t *)inserts[i].begin(); | uint32_t *ptr = (uint32_t *)inserts[i].begin(); | ||||
for (uint8_t j = 0; j < 8; ++j) { | for (uint8_t j = 0; j < 8; ++j) { | ||||
*(ptr++) = local_rand_ctx.rand32(); | *(ptr++) = InsecureRand32(); | ||||
} | } | ||||
} | } | ||||
for (uint32_t i = 0; i < n_insert / 4; ++i) { | for (uint32_t i = 0; i < n_insert / 4; ++i) { | ||||
reads.push_back(inserts[i]); | reads.push_back(inserts[i]); | ||||
} | } | ||||
for (uint32_t i = n_insert - (n_insert / 4); i < n_insert; ++i) { | for (uint32_t i = n_insert - (n_insert / 4); i < n_insert; ++i) { | ||||
reads.push_back(inserts[i]); | reads.push_back(inserts[i]); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 57 Lines • Show Last 20 Lines |