diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -513,6 +513,7 @@ noui.cpp policy/fees.cpp policy/settings.cpp + pow/aserti32d.cpp pow/daa.cpp pow/eda.cpp pow/pow.cpp diff --git a/src/Makefile.am b/src/Makefile.am --- a/src/Makefile.am +++ b/src/Makefile.am @@ -199,6 +199,7 @@ policy/mempool.h \ policy/policy.h \ policy/settings.h \ + pow/aserti32d.h \ pow/daa.h \ pow/eda.h \ pow/pow.h \ @@ -337,6 +338,7 @@ noui.cpp \ policy/fees.cpp \ policy/settings.cpp \ + pow/aserti32d.cpp \ pow/daa.cpp \ pow/eda.cpp \ pow/pow.cpp \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -123,6 +123,7 @@ avalanche/test/peermanager_tests.cpp \ avalanche/test/processor_tests.cpp \ avalanche/test/proof_tests.cpp \ + pow/test/aserti32d_tests.cpp \ pow/test/daa_tests.cpp \ pow/test/eda_tests.cpp \ test/scriptnum10.h \ diff --git a/src/chainparams.cpp b/src/chainparams.cpp --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -97,6 +97,9 @@ consensus.fPowAllowMinDifficultyBlocks = false; consensus.fPowNoRetargeting = false; + // two days + consensus.nDAAHalfLife = 2 * 24 * 60 * 60; + // nPowTargetTimespan / nPowTargetSpacing consensus.nMinerConfirmationWindow = 2016; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY] = { @@ -295,6 +298,9 @@ consensus.fPowAllowMinDifficultyBlocks = true; consensus.fPowNoRetargeting = false; + // two days + consensus.nDAAHalfLife = 2 * 24 * 60 * 60; + // nPowTargetTimespan / nPowTargetSpacing consensus.nMinerConfirmationWindow = 2016; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY] = { @@ -448,6 +454,9 @@ consensus.fPowAllowMinDifficultyBlocks = true; consensus.fPowNoRetargeting = true; + // two days + consensus.nDAAHalfLife = 2 * 24 * 60 * 60; + // Faster than normal for regtest (144 instead of 2016) consensus.nMinerConfirmationWindow = 144; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY] = { diff --git a/src/consensus/params.h b/src/consensus/params.h --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -97,6 +97,7 @@ uint256 powLimit; bool fPowAllowMinDifficultyBlocks; bool fPowNoRetargeting; + int64_t nDAAHalfLife; int64_t nPowTargetSpacing; int64_t nPowTargetTimespan; int64_t DifficultyAdjustmentInterval() const { diff --git a/src/pow/aserti32d.h b/src/pow/aserti32d.h new file mode 100644 --- /dev/null +++ b/src/pow/aserti32d.h @@ -0,0 +1,42 @@ +// Copyright (c) 2020 The Bitcoin developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_POW_ASERTI32D_H +#define BITCOIN_POW_ASERTI32D_H + +#include + +class arith_uint256; +class CBlockHeader; +class CBlockIndex; + +namespace Consensus { +struct Params; +} + +arith_uint256 CalculateASERT(const arith_uint256 &refTarget, + const int64_t nPowTargetSpacing, + const int64_t nTimeDiff, const int64_t nHeightDiff, + const arith_uint256 &powLimit, + const int64_t nHalfLife) noexcept; + +uint32_t GetNextASERTWorkRequired(const CBlockIndex *pindexPrev, + const CBlockHeader *pblock, + const Consensus::Params ¶ms) noexcept; + +uint32_t +GetNextASERTWorkRequired(const CBlockIndex *pindexPrev, + const CBlockHeader *pblock, + const Consensus::Params ¶ms, + const CBlockIndex *pindexAnchorBlock) noexcept; + +/** + * ASERT caches a special block index for efficiency. If block indices are + * freed then this needs to be called to ensure no dangling pointer when a new + * block tree is created. + * (this is temporary and will be removed after the ASERT constants are fixed) + */ +void ResetASERTAnchorBlockCache() noexcept; + +#endif // BITCOIN_POW_ASERTI32D_H diff --git a/src/pow/aserti32d.cpp b/src/pow/aserti32d.cpp new file mode 100644 --- /dev/null +++ b/src/pow/aserti32d.cpp @@ -0,0 +1,246 @@ +// Copyright (c) 2020 The Bitcoin developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include + +#include +#include +#include +#include // ::ChainActive() + +#include + +static std::atomic cachedAnchor{nullptr}; + +void ResetASERTAnchorBlockCache() noexcept { + cachedAnchor = nullptr; +} + +/** + * Returns a pointer to the anchor block used for ASERT. + * As anchor we use the first block for which IsAxionEnabled() returns true. + * This block happens to be the last block which was mined under the old DAA + * rules. + * + * This function is meant to be removed some time after the upgrade, once + * the anchor block is deeply buried, and behind a hard-coded checkpoint. + * + * Preconditions: - pindex must not be nullptr + * - pindex must satisfy: IsAxionEnabled(params, pindex) == true + * Postcondition: Returns a pointer to the first (lowest) block for which + * IsAxionEnabled is true, and for which IsAxionEnabled(pprev) + * is false (or for which pprev is nullptr). The return value may + * be pindex itself. + */ +static const CBlockIndex *GetASERTAnchorBlock(const CBlockIndex *const pindex, + const Consensus::Params ¶ms) { + assert(pindex); + + // - We check if we have a cached result, and if we do and it is really the + // ancestor of pindex, then we return it. + // + // - If we do not or if the cached result is not the ancestor of pindex, + // then we proceed with the more expensive walk back to find the ASERT + // anchor block. + // + // CBlockIndex::GetAncestor() is reasonably efficient; it uses + // CBlockIndex::pskip Note that if pindex == cachedAnchor, GetAncestor() + // here will return cachedAnchor, which is what we want. + const CBlockIndex *lastCached = cachedAnchor.load(); + if (lastCached && pindex->GetAncestor(lastCached->nHeight) == lastCached) { + return lastCached; + } + + // Slow path: walk back until we find the first ancestor for which + // IsAxionEnabled() == true. + const CBlockIndex *anchor = pindex; + + while (anchor->pprev) { + // first, skip backwards testing IsAxionEnabled + // The below code leverages CBlockIndex::pskip to walk back efficiently. + if (anchor->pskip && IsAxionEnabled(params, anchor->pskip)) { + // skip backward + anchor = anchor->pskip; + // continue skipping + continue; + } + // cannot skip here, walk back by 1 + if (!IsAxionEnabled(params, anchor->pprev)) { + // found it -- highest block where Axion is not enabled is + // anchor->pprev, and anchor points to the first block for which + // IsAxionEnabled() == true + break; + } + anchor = anchor->pprev; + } + + // Overwrite the cache with the anchor we found. More likely than not, the + // next time we are asked to validate a header it will be part of same / + // similar chain, not some other unrelated chain with a totally different + // anchor. + cachedAnchor = anchor; + + return anchor; +} + +uint32_t GetNextASERTWorkRequired(const CBlockIndex *pindexPrev, + const CBlockHeader *pblock, + const Consensus::Params ¶ms) noexcept { + return GetNextASERTWorkRequired(pindexPrev, pblock, params, + GetASERTAnchorBlock(pindexPrev, params)); +} + +/** + * Compute the next required proof of work using an absolutely scheduled + * exponentially weighted target (ASERT). + * + * With ASERT, we define an ideal schedule for block issuance (e.g. 1 block + * every 600 seconds), and we calculate the difficulty based on how far the most + * recent block's timestamp is ahead of or behind that schedule. We set our + * targets (difficulty) exponentially. For every [nHalfLife] seconds ahead of or + * behind schedule we get, we double or halve the difficulty. + */ +uint32_t +GetNextASERTWorkRequired(const CBlockIndex *pindexPrev, + const CBlockHeader *pblock, + const Consensus::Params ¶ms, + const CBlockIndex *pindexAnchorBlock) noexcept { + // This cannot handle the genesis block and early blocks in general. + assert(pindexPrev != nullptr); + + // Anchor block is the block on which all ASERT scheduling calculations are + // based. It too must exist, and it must have a valid parent. + assert(pindexAnchorBlock != nullptr); + + // We make no further assumptions other than the height of the prev block + // must be >= that of the anchor block. + assert(pindexPrev->nHeight >= pindexAnchorBlock->nHeight); + + const arith_uint256 powLimit = UintToArith256(params.powLimit); + + // Special difficulty rule for testnet + // If the new block's timestamp is more than 2* 10 minutes then allow + // mining of a min-difficulty block. + if (params.fPowAllowMinDifficultyBlocks && + (pblock->GetBlockTime() > + pindexPrev->GetBlockTime() + 2 * params.nPowTargetSpacing)) { + return UintToArith256(params.powLimit).GetCompact(); + } + + // For nTimeDiff calculation, the timestamp of the parent to the anchor + // block is used, as per the absolute formulation of ASERT. This is somewhat + // counterintuitive since it is referred to as the anchor timestamp, but as + // per the formula the timestamp of block M-1 must be used if the anchor is + // M. + assert(pindexPrev->pprev != nullptr); + // Note: time difference is to parent of anchor block (or to anchor block + // itself iff anchor is genesis). + // (according to absolute formulation of ASERT) + const auto anchorTime = pindexAnchorBlock->pprev + ? pindexAnchorBlock->pprev->GetBlockTime() + : pindexAnchorBlock->GetBlockTime(); + const int64_t nTimeDiff = pindexPrev->GetBlockTime() - anchorTime; + // Height difference is from current block to anchor block + const int64_t nHeightDiff = + pindexPrev->nHeight - pindexAnchorBlock->nHeight; + const arith_uint256 refBlockTarget = + arith_uint256().SetCompact(pindexAnchorBlock->nBits); + // Do the actual target adaptation calculation in separate + // CalculateASERT() function + arith_uint256 nextTarget = + CalculateASERT(refBlockTarget, params.nPowTargetSpacing, nTimeDiff, + nHeightDiff, powLimit, params.nDAAHalfLife); + + // CalculateASERT() already clamps to powLimit. + return nextTarget.GetCompact(); +} + +// ASERT calculation function. +// Clamps to powLimit. +arith_uint256 CalculateASERT(const arith_uint256 &refTarget, + const int64_t nPowTargetSpacing, + const int64_t nTimeDiff, const int64_t nHeightDiff, + const arith_uint256 &powLimit, + const int64_t nHalfLife) noexcept { + // Input target must never be zero nor exceed powLimit. + assert(refTarget > 0 && refTarget <= powLimit); + + // We need some leading zero bits in powLimit in order to have room to + // handle overflows easily. 32 leading zero bits is more than enough. + assert((powLimit >> 224) == 0); + + // Height diff should NOT be negative. + assert(nHeightDiff >= 0); + + // It will be helpful when reading what follows, to remember that + // nextTarget is adapted from anchor block target value. + + // Ultimately, we want to approximate the following ASERT formula, using + // only integer (fixed-point) math: + // new_target = old_target * 2^((blocks_time - IDEAL_BLOCK_TIME * + // (height_diff + 1)) / nHalfLife) + + // First, we'll calculate the exponent: + assert(llabs(nTimeDiff - nPowTargetSpacing * nHeightDiff) < + (1ll << (63 - 16))); + const int64_t exponent = + ((nTimeDiff - nPowTargetSpacing * (nHeightDiff + 1)) * 65536) / + nHalfLife; + + // Next, we use the 2^x = 2 * 2^(x-1) identity to shift our exponent into + // the [0, 1) interval. The truncated exponent tells us how many shifts we + // need to do Note1: This needs to be a right shift. Right shift rounds + // downward (floored division), + // whereas integer division in C++ rounds towards zero (truncated + // division). + // Note2: This algorithm uses arithmetic shifts of negative numbers. This + // is unpecified but very common behavior for C++ compilers before + // C++20, and standard with C++20. We must check this behavior e.g. + // using static_assert. + static_assert(int64_t(-1) >> 1 == int64_t(-1), + "ASERT algorithm needs arithmetic shift support"); + + // Now we compute an approximated target * 2^(exponent/65536.0) + + // First decompose exponent into 'integer' and 'fractional' parts: + int64_t shifts = exponent >> 16; + const auto frac = uint16_t(exponent); + assert(exponent == (shifts * 65536) + frac); + + // multiply target by 65536 * 2^(fractional part) + // 2^x ~= (1 + 0.695502049*x + 0.2262698*x**2 + 0.0782318*x**3) for 0 <= x < + // 1 Error versus actual 2^x is less than 0.013%. + const uint32_t factor = + 65536 + ((+195766423245049ull * frac + 971821376ull * frac * frac + + 5127ull * frac * frac * frac + (1ull << 47)) >> + 48); + // this is always < 2^241 since refTarget < 2^224 + arith_uint256 nextTarget = refTarget * factor; + + // multiply by 2^(integer part) / 65536 + shifts -= 16; + if (shifts <= 0) { + nextTarget >>= -shifts; + } else { + // Detect overflow that would discard high bits + const auto nextTargetShifted = nextTarget << shifts; + if ((nextTargetShifted >> shifts) != nextTarget) { + // If we had wider integers, the final value of nextTarget would + // be >= 2^256 so it would have just ended up as powLimit anyway. + nextTarget = powLimit; + } else { + // Shifting produced no overflow, can assign value + nextTarget = nextTargetShifted; + } + } + + if (nextTarget == 0) { + // 0 is not a valid target, but 1 is. + nextTarget = arith_uint256(1); + } else if (nextTarget > powLimit) { + nextTarget = powLimit; + } + + // we return from only 1 place for copy elision + return nextTarget; +} diff --git a/src/pow/pow.cpp b/src/pow/pow.cpp --- a/src/pow/pow.cpp +++ b/src/pow/pow.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,10 @@ return pindexPrev->nBits; } + if (IsAxionEnabled(params, pindexPrev)) { + return GetNextASERTWorkRequired(pindexPrev, pblock, params); + } + if (IsDAAEnabled(params, pindexPrev)) { return GetNextDAAWorkRequired(pindexPrev, pblock, params); } diff --git a/src/pow/test/CMakeLists.txt b/src/pow/test/CMakeLists.txt --- a/src/pow/test/CMakeLists.txt +++ b/src/pow/test/CMakeLists.txt @@ -12,6 +12,7 @@ fixture.cpp TESTS + aserti32d_tests.cpp daa_tests.cpp eda_tests.cpp ) diff --git a/src/pow/test/aserti32d_tests.cpp b/src/pow/test/aserti32d_tests.cpp new file mode 100644 --- /dev/null +++ b/src/pow/test/aserti32d_tests.cpp @@ -0,0 +1,714 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT/X11 software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#include + +BOOST_FIXTURE_TEST_SUITE(aserti32d_tests, BasicTestingSetup) + +static CBlockIndex GetBlockIndex(CBlockIndex *pindexPrev, int64_t nTimeInterval, + uint32_t nBits) { + CBlockIndex block; + block.pprev = pindexPrev; + block.nHeight = pindexPrev->nHeight + 1; + block.nTime = pindexPrev->nTime + nTimeInterval; + block.nBits = nBits; + + block.BuildSkip(); + block.nChainWork = pindexPrev->nChainWork + GetBlockProof(block); + return block; +} + +static double TargetFromBits(const uint32_t nBits) { + return (nBits & 0xff'ff'ff) * pow(256, (nBits >> 24) - 3); +} + +static double GetASERTApproximationError(const CBlockIndex *pindexPrev, + const uint32_t finalBits, + const CBlockIndex *pindexAnchorBlock) { + const int64_t nHeightDiff = + pindexPrev->nHeight - pindexAnchorBlock->nHeight; + const int64_t nTimeDiff = + pindexPrev->GetBlockTime() - pindexAnchorBlock->pprev->GetBlockTime(); + const uint32_t initialBits = pindexAnchorBlock->nBits; + + BOOST_CHECK(nHeightDiff >= 0); + double dInitialPow = TargetFromBits(initialBits); + double dFinalPow = TargetFromBits(finalBits); + + double dExponent = + double(nTimeDiff - (nHeightDiff + 1) * 600) / double(2 * 24 * 3600); + double dTarget = dInitialPow * pow(2, dExponent); + + return (dFinalPow - dTarget) / dTarget; +} + +BOOST_AUTO_TEST_CASE(asert_difficulty_test) { + DummyConfig config(CBaseChainParams::MAIN); + + std::vector blocks(3000 + 2 * 24 * 3600); + + const Consensus::Params ¶ms = config.GetChainParams().GetConsensus(); + const arith_uint256 powLimit = UintToArith256(params.powLimit); + arith_uint256 currentPow = powLimit >> 3; + uint32_t initialBits = currentPow.GetCompact(); + double dMaxErr = 0.0001166792656486; + + // Genesis block, and parent of ASERT anchor block in this test case. + blocks[0] = CBlockIndex(); + blocks[0].nHeight = 0; + blocks[0].nTime = 1269211443; + // The pre-anchor block's nBits should never be used, so we set it to a + // nonsense value in order to trigger an error if it is ever accessed + blocks[0].nBits = 0x0dedbeef; + + blocks[0].nChainWork = GetBlockProof(blocks[0]); + + // Block counter. + size_t i = 1; + + // ASERT anchor block. We give this one a solvetime of 150 seconds to ensure + // that the solvetime between the pre-anchor and the anchor blocks is + // actually used. + blocks[1] = GetBlockIndex(&blocks[0], 150, initialBits); + // The nBits for the next block should not be equal to the anchor block's + // nBits + CBlockHeader blkHeaderDummy; + uint32_t nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, + params, &blocks[1]); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + BOOST_CHECK(nBits != initialBits); + + // If we add another block at 1050 seconds, we should return to the anchor + // block's nBits + blocks[i] = GetBlockIndex(&blocks[i - 1], 1050, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + BOOST_CHECK(nBits == initialBits); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + + currentPow = arith_uint256().SetCompact(nBits); + // Before we do anything else, check that timestamps *before* the anchor + // block work fine. Jumping 2 days into the past will give a timestamp + // before the achnor, and should halve the target + blocks[i] = GetBlockIndex(&blocks[i - 1], 600 - 172800, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + currentPow = arith_uint256().SetCompact(nBits); + // Because nBits truncates target, we don't end up with exactly 1/2 the + // target + BOOST_CHECK(currentPow <= arith_uint256().SetCompact(initialBits) / 2); + BOOST_CHECK(currentPow >= arith_uint256().SetCompact(initialBits - 1) / 2); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + + // Jumping forward 2 days should return the target to the initial value + blocks[i] = GetBlockIndex(&blocks[i - 1], 600 + 172800, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + currentPow = arith_uint256().SetCompact(nBits); + BOOST_CHECK(nBits == initialBits); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + + // Pile up some blocks every 10 mins to establish some history. + for (; i < 150; i++) { + blocks[i] = GetBlockIndex(&blocks[i - 1], 600, nBits); + BOOST_CHECK_EQUAL(blocks[i].nBits, nBits); + } + + nBits = GetNextASERTWorkRequired(&blocks[i - 1], &blkHeaderDummy, params, + &blocks[1]); + + BOOST_CHECK_EQUAL(nBits, initialBits); + + // Difficulty stays the same as long as we produce a block every 10 mins. + for (size_t j = 0; j < 10; i++, j++) { + blocks[i] = GetBlockIndex(&blocks[i - 1], 600, nBits); + BOOST_CHECK_EQUAL(GetNextASERTWorkRequired(&blocks[i], &blkHeaderDummy, + params, &blocks[1]), + nBits); + } + + // If we add a two blocks whose solvetimes together add up to 1200s, + // then the next block's target should be the same as the one before these + // blocks (at this point, equal to initialBits). + blocks[i] = GetBlockIndex(&blocks[i - 1], 300, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + // relative + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[i - 2])) < dMaxErr); + blocks[i] = GetBlockIndex(&blocks[i - 1], 900, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + // absolute + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + // relative + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[i - 2])) < dMaxErr); + BOOST_CHECK_EQUAL(nBits, initialBits); + BOOST_CHECK(nBits != blocks[i - 1].nBits); + + // Same in reverse - this time slower block first, followed by faster block. + blocks[i] = GetBlockIndex(&blocks[i - 1], 900, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + // absolute + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + // relative + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[i - 2])) < dMaxErr); + blocks[i] = GetBlockIndex(&blocks[i - 1], 300, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + // absolute + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + // relative + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[i - 2])) < dMaxErr); + BOOST_CHECK_EQUAL(nBits, initialBits); + BOOST_CHECK(nBits != blocks[i - 1].nBits); + + // Jumping forward 2 days should double the target (halve the difficulty) + blocks[i] = GetBlockIndex(&blocks[i - 1], 600 + 2 * 24 * 3600, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + // absolute + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + // relative + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[i - 2])) < dMaxErr); + currentPow = arith_uint256().SetCompact(nBits) / 2; + BOOST_CHECK_EQUAL(currentPow.GetCompact(), initialBits); + + // Jumping backward 2 days should bring target back to where we started + blocks[i] = GetBlockIndex(&blocks[i - 1], 600 - 2 * 24 * 3600, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + BOOST_CHECK(fabs(GetASERTApproximationError( + &blocks[i - 1], nBits, &blocks[1])) < dMaxErr); // absolute + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[i - 2])) < + dMaxErr); // relative + BOOST_CHECK_EQUAL(nBits, initialBits); + + // Jumping backward 2 days should halve the target (double the difficulty) + blocks[i] = GetBlockIndex(&blocks[i - 1], 600 - 2 * 24 * 3600, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + // absolute + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + // relative + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[i - 2])) < dMaxErr); + currentPow = arith_uint256().SetCompact(nBits); + // Because nBits truncates target, we don't end up with exactly 1/2 the + // target + BOOST_CHECK(currentPow <= arith_uint256().SetCompact(initialBits) / 2); + BOOST_CHECK(currentPow >= arith_uint256().SetCompact(initialBits - 1) / 2); + + // And forward again + blocks[i] = GetBlockIndex(&blocks[i - 1], 600 + 2 * 24 * 3600, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + // absolute + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + // relative + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[i - 2])) < dMaxErr); + BOOST_CHECK_EQUAL(nBits, initialBits); + blocks[i] = GetBlockIndex(&blocks[i - 1], 600 + 2 * 24 * 3600, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[1]); + // absolute + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[1])) < dMaxErr); + // relative + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[i - 2])) < dMaxErr); + currentPow = arith_uint256().SetCompact(nBits) / 2; + BOOST_CHECK_EQUAL(currentPow.GetCompact(), initialBits); + + // Iterate over the entire -2*24*3600..+2*24*3600 range to check that our + // integer approximation: + // 1. Should be monotonic. + // 2. Should change target at least once every 8 seconds (worst-case: + // 15-bit precision on nBits) + // 3. Should never change target by more than XXXX per 1-second step. + // 4. Never exceeds dMaxError in absolute error vs a double float + // calculation. + // 5. Has almost exactly the dMax and dMin errors we expect for the + // formula. + double dMin = 0; + double dMax = 0; + double dErr; + double dRelMin = 0; + double dRelMax = 0; + double dRelErr; + double dMaxStep = 0; + uint32_t nBitsRingBuffer[8]; + double dStep = 0; + blocks[i] = GetBlockIndex(&blocks[i - 1], -2 * 24 * 3600 - 30, nBits); + for (size_t j = 0; j < 4 * 24 * 3600 + 660; j++) { + blocks[i].nTime++; + nBits = GetNextASERTWorkRequired(&blocks[i], &blkHeaderDummy, params, + &blocks[1]); + + if (j > 8) { + // 1: Monotonic + BOOST_CHECK( + arith_uint256().SetCompact(nBits) >= + arith_uint256().SetCompact(nBitsRingBuffer[(j - 1) % 8])); + // 2: Changes at least once every 8 seconds (worst case: nBits = + // 1d008000 to 1d008001) + BOOST_CHECK(arith_uint256().SetCompact(nBits) > + arith_uint256().SetCompact(nBitsRingBuffer[j % 8])); + // 3: Check 1-sec step size + dStep = (TargetFromBits(nBits) - + TargetFromBits(nBitsRingBuffer[(j - 1) % 8])) / + TargetFromBits(nBits); + dMaxStep = std::max(dMaxStep, dStep); + // from nBits = 1d008000 to 1d008001 + BOOST_CHECK(dStep < 0.0000314812106363); + } + nBitsRingBuffer[j % 8] = nBits; + + // 4 and 5: check error vs double precision float calculation + dErr = GetASERTApproximationError(&blocks[i], nBits, &blocks[1]); + dRelErr = GetASERTApproximationError(&blocks[i], nBits, &blocks[i - 1]); + dMin = std::min(dMin, dErr); + dMax = std::max(dMax, dErr); + dRelMin = std::min(dRelMin, dRelErr); + dRelMax = std::max(dRelMax, dRelErr); + BOOST_CHECK_MESSAGE( + fabs(dErr) < dMaxErr, + strprintf( + "solveTime: %d\tStep size: %.8f%%\tdErr: %.8f%%\tnBits: %0x\n", + int64_t(blocks[i].nTime) - blocks[i - 1].nTime, dStep * 100, + dErr * 100, nBits)); + BOOST_CHECK_MESSAGE( + fabs(dRelErr) < dMaxErr, + strprintf("solveTime: %d\tStep size: %.8f%%\tdRelErr: " + "%.8f%%\tnBits: %0x\n", + int64_t(blocks[i].nTime) - blocks[i - 1].nTime, + dStep * 100, dRelErr * 100, nBits)); + } + auto failMsg = strprintf( + "Min error: %16.14f%%\tMax error: %16.14f%%\tMax step: %16.14f%%\n", + dMin * 100, dMax * 100, dMaxStep * 100); + BOOST_CHECK_MESSAGE( + dMin < -0.0001013168981059 && dMin > -0.0001013168981060 && + dMax > 0.0001166792656485 && dMax < 0.0001166792656486, + failMsg); + failMsg = strprintf("Min relError: %16.14f%%\tMax relError: %16.14f%%\n", + dRelMin * 100, dRelMax * 100); + BOOST_CHECK_MESSAGE( + dRelMin < -0.0001013168981059 && dRelMin > -0.0001013168981060 && + dRelMax > 0.0001166792656485 && dRelMax < 0.0001166792656486, + failMsg); + + // Difficulty increases as long as we produce fast blocks + for (size_t j = 0; j < 100; i++, j++) { + uint32_t nextBits; + arith_uint256 currentTarget; + currentTarget.SetCompact(nBits); + + blocks[i] = GetBlockIndex(&blocks[i - 1], 500, nBits); + nextBits = GetNextASERTWorkRequired(&blocks[i], &blkHeaderDummy, params, + &blocks[1]); + arith_uint256 nextTarget; + nextTarget.SetCompact(nextBits); + + // Make sure that target is decreased + BOOST_CHECK(nextTarget <= currentTarget); + + nBits = nextBits; + } +} + +static std::string StrPrintCalcArgs(const arith_uint256 refTarget, + const int64_t targetSpacing, + const int64_t timeDiff, + const int64_t heightDiff, + const arith_uint256 expectedTarget, + const uint32_t expectednBits) { + return strprintf("\n" + "ref= %s\n" + "spacing= %d\n" + "timeDiff= %d\n" + "heightDiff= %d\n" + "expTarget= %s\n" + "exp nBits= 0x%08x\n", + refTarget.ToString(), targetSpacing, timeDiff, heightDiff, + expectedTarget.ToString(), expectednBits); +} + +// Tests of the CalculateASERT function. +BOOST_AUTO_TEST_CASE(calculate_asert_test) { + DummyConfig config(CBaseChainParams::MAIN); + const Consensus::Params ¶ms = config.GetChainParams().GetConsensus(); + const int64_t nHalfLife = params.nDAAHalfLife; + + const arith_uint256 powLimit = UintToArith256(params.powLimit); + arith_uint256 initialTarget = powLimit >> 4; + int64_t height = 0; + + // The CalculateASERT function uses the absolute ASERT formulation + // and adds +1 to the height difference that it receives. + // The time difference passed to it must factor in the difference + // to the *parent* of the reference block. + // We assume the parent is ideally spaced in time before the reference + // block. + static const int64_t parent_time_diff = 600; + + // Steady + arith_uint256 nextTarget = CalculateASERT( + initialTarget, params.nPowTargetSpacing, + parent_time_diff + 600 /* nTimeDiff */, ++height, powLimit, nHalfLife); + BOOST_CHECK(nextTarget == initialTarget); + + // A block that arrives in half the expected time + nextTarget = CalculateASERT(initialTarget, params.nPowTargetSpacing, + parent_time_diff + 600 + 300, ++height, + powLimit, nHalfLife); + BOOST_CHECK(nextTarget < initialTarget); + + // A block that makes up for the shortfall of the previous one, restores the + // target to initial + arith_uint256 prevTarget = nextTarget; + nextTarget = CalculateASERT(initialTarget, params.nPowTargetSpacing, + parent_time_diff + 600 + 300 + 900, ++height, + powLimit, nHalfLife); + BOOST_CHECK(nextTarget > prevTarget); + BOOST_CHECK(nextTarget == initialTarget); + + // Two days ahead of schedule should double the target (halve the + // difficulty) + prevTarget = nextTarget; + nextTarget = + CalculateASERT(prevTarget, params.nPowTargetSpacing, + parent_time_diff + 288 * 1200, 288, powLimit, nHalfLife); + BOOST_CHECK(nextTarget == prevTarget * 2); + + // Two days behind schedule should halve the target (double the difficulty) + prevTarget = nextTarget; + nextTarget = + CalculateASERT(prevTarget, params.nPowTargetSpacing, + parent_time_diff + 288 * 0, 288, powLimit, nHalfLife); + BOOST_CHECK(nextTarget == prevTarget / 2); + BOOST_CHECK(nextTarget == initialTarget); + + // Ramp up from initialTarget to PowLimit - should only take 4 doublings... + uint32_t powLimit_nBits = powLimit.GetCompact(); + uint32_t next_nBits; + for (size_t k = 0; k < 3; k++) { + prevTarget = nextTarget; + nextTarget = CalculateASERT(prevTarget, params.nPowTargetSpacing, + parent_time_diff + 288 * 1200, 288, + powLimit, nHalfLife); + BOOST_CHECK(nextTarget == prevTarget * 2); + BOOST_CHECK(nextTarget < powLimit); + next_nBits = nextTarget.GetCompact(); + BOOST_CHECK(next_nBits != powLimit_nBits); + } + + prevTarget = nextTarget; + nextTarget = + CalculateASERT(prevTarget, params.nPowTargetSpacing, + parent_time_diff + 288 * 1200, 288, powLimit, nHalfLife); + next_nBits = nextTarget.GetCompact(); + BOOST_CHECK(nextTarget == prevTarget * 2); + BOOST_CHECK(next_nBits == powLimit_nBits); + + // Fast periods now cannot increase target beyond POW limit, even if we try + // to overflow nextTarget. prevTarget is a uint256, so 256*2 = 512 days + // would overflow nextTarget unless CalculateASERT correctly detects this + // error + nextTarget = CalculateASERT(prevTarget, params.nPowTargetSpacing, + parent_time_diff + 512 * 144 * 600, 0, powLimit, + nHalfLife); + next_nBits = nextTarget.GetCompact(); + BOOST_CHECK(next_nBits == powLimit_nBits); + + // We also need to watch for underflows on nextTarget. We need to withstand + // an extra ~446 days worth of blocks. This should bring down a powLimit + // target to the a minimum target of 1. + nextTarget = CalculateASERT(powLimit, params.nPowTargetSpacing, 0, + 2 * (256 - 33) * 144, powLimit, nHalfLife); + next_nBits = nextTarget.GetCompact(); + BOOST_CHECK_EQUAL(next_nBits, arith_uint256(1).GetCompact()); + + // Define a structure holding parameters to pass to CalculateASERT. + // We are going to check some expected results against a vector of + // possible arguments. + struct calc_params { + arith_uint256 refTarget; + int64_t targetSpacing; + int64_t timeDiff; + int64_t heightDiff; + arith_uint256 expectedTarget; + uint32_t expectednBits; + }; + + // Define some named input argument values + const arith_uint256 SINGLE_300_TARGET{ + "00000000ffb1ffffffffffffffffffffffffffffffffffffffffffffffffffff"}; + const arith_uint256 FUNNY_REF_TARGET{ + "000000008000000000000000000fffffffffffffffffffffffffffffffffffff"}; + + // Define our expected input and output values. + // The timeDiff entries exclude the `parent_time_diff` - this is + // added in the call to CalculateASERT in the test loop. + const std::vector calculate_args = { + + /* refTarget, targetSpacing, timeDiff, heightDiff, expectedTarget, + expectednBits */ + + {powLimit, 600, 0, 2 * 144, powLimit >> 1, 0x1c7fffff}, + {powLimit, 600, 0, 4 * 144, powLimit >> 2, 0x1c3fffff}, + {powLimit >> 1, 600, 0, 2 * 144, powLimit >> 2, 0x1c3fffff}, + {powLimit >> 2, 600, 0, 2 * 144, powLimit >> 3, 0x1c1fffff}, + {powLimit >> 3, 600, 0, 2 * 144, powLimit >> 4, 0x1c0fffff}, + {powLimit, 600, 0, 2 * (256 - 34) * 144, 3, 0x01030000}, + {powLimit, 600, 0, 2 * (256 - 34) * 144 + 119, 3, 0x01030000}, + {powLimit, 600, 0, 2 * (256 - 34) * 144 + 120, 2, 0x01020000}, + {powLimit, 600, 0, 2 * (256 - 33) * 144 - 1, 2, 0x01020000}, + // 1 bit less since we do not need to shift to 0 + {powLimit, 600, 0, 2 * (256 - 33) * 144, 1, 0x01010000}, + // more will not decrease below 1 + {powLimit, 600, 0, 2 * (256 - 32) * 144, 1, 0x01010000}, + {1, 600, 0, 2 * (256 - 32) * 144, 1, 0x01010000}, + {powLimit, 600, 2 * (512 - 32) * 144, 0, powLimit, powLimit_nBits}, + {1, 600, (512 - 64) * 144 * 600, 0, powLimit, powLimit_nBits}, + // clamps to powLimit + {powLimit, 600, 300, 1, SINGLE_300_TARGET, 0x1d00ffb1}, + // confuses any attempt to detect overflow by inspecting result + {FUNNY_REF_TARGET, 600, 600 * 2 * 33 * 144, 0, powLimit, + powLimit_nBits}, + }; + + for (auto &v : calculate_args) { + nextTarget = CalculateASERT(v.refTarget, v.targetSpacing, + parent_time_diff + v.timeDiff, v.heightDiff, + powLimit, nHalfLife); + next_nBits = nextTarget.GetCompact(); + const auto failMsg = + StrPrintCalcArgs(v.refTarget, v.targetSpacing, + parent_time_diff + v.timeDiff, v.heightDiff, + v.expectedTarget, v.expectednBits) + + strprintf("nextTarget= %s\nnext nBits= 0x%08x\n", + nextTarget.ToString(), next_nBits); + BOOST_CHECK_MESSAGE(nextTarget == v.expectedTarget && + next_nBits == v.expectednBits, + failMsg); + } +} + +class ChainParamsWithDAAActivation : public CChainParams { +public: + ChainParamsWithDAAActivation(const CChainParams &chainParams, int daaHeight) + : CChainParams(chainParams) { + consensus.daaHeight = daaHeight; + } +}; + +/** + * Test transition of cw144 to ASERT algorithm, which involves the selection + * of an anchor block. + */ +BOOST_AUTO_TEST_CASE(asert_activation_anchor_test) { + // Make a custom chain params based on mainnet, activating the cw144 DAA + // at a lower height than usual, so we don't need to waste time making a + // 504000-long chain. + const auto mainChainParams = CreateChainParams(CBaseChainParams::MAIN); + const ChainParamsWithDAAActivation chainParams(*mainChainParams, 2016); + const Consensus::Params ¶ms = chainParams.GetConsensus(); + + const int64_t activationTime = + gArgs.GetArg("-axionactivationtime", params.axionActivationTime); + CBlockHeader blkHeaderDummy; + + // an arbitrary compact target for our chain (based on BCH chain ~ Aug 10 + // 2020). + uint32_t initialBits = 0x1802a842; + + // Block store for anonymous blocks; needs to be big enough to fit all + // generated blocks in this test. + std::vector blocks(10000); + int bidx = 1; + + // Genesis block. + blocks[0].nHeight = 0; + blocks[0].nTime = 1269211443; + blocks[0].nBits = initialBits; + blocks[0].nChainWork = GetBlockProof(blocks[0]); + + // Pile up a random number of blocks to establish some history of random + // height. cw144 DAA requires us to have height at least 2016, dunno why + // that much. + const int initialBlockCount = 2000 + int(InsecureRandRange(1000)); + for (int i = 1; i < initialBlockCount; i++) { + blocks[bidx] = GetBlockIndex(&blocks[bidx - 1], 600, initialBits); + bidx++; + BOOST_REQUIRE(bidx < int(blocks.size())); + } + + // Start making blocks prior to activation. First, make a block about 1 day + // before activation. Then put down 145 more blocks with 500 second + // solvetime each, such that the MTP on the final block is 1 second short of + // activationTime. + { + blocks[bidx] = GetBlockIndex(&blocks[bidx - 1], 600, initialBits); + blocks[bidx].nTime = activationTime - 140 * 500 - 1; + bidx++; + } + for (int i = 0; i < 145; i++) { + BOOST_REQUIRE(bidx < int(blocks.size())); + blocks[bidx] = GetBlockIndex(&blocks[bidx - 1], 500, initialBits); + bidx++; + } + CBlockIndex *pindexPreActivation = &blocks[bidx - 1]; + BOOST_CHECK_EQUAL(pindexPreActivation->nTime, activationTime + 5 * 500 - 1); + BOOST_CHECK_EQUAL(pindexPreActivation->GetMedianTimePast(), + activationTime - 1); + BOOST_CHECK(IsDAAEnabled(params, pindexPreActivation)); + + // If we consult DAA, then it uses cw144 which returns a significantly lower + // target because we have been mining too fast by a ratio 600/500 for a + // whole day. + BOOST_CHECK(!IsAxionEnabled(params, pindexPreActivation)); + BOOST_CHECK_EQUAL( + GetNextWorkRequired(pindexPreActivation, &blkHeaderDummy, chainParams), + 0x180236e1); + + /** + * Now we'll try adding on blocks to activate ASERT. The activation block + * is going to be our anchor block. We will make several distinct anchor + * blocks. + */ + + // Create an activating block with expected solvetime, taking the cw144 + // difficulty we just saw. Since solvetime is expected the next target is + // unchanged. + CBlockIndex indexActivation0 = + GetBlockIndex(pindexPreActivation, 600, 0x180236e1); + BOOST_CHECK(IsAxionEnabled(params, &indexActivation0)); + BOOST_CHECK_EQUAL( + GetNextWorkRequired(&indexActivation0, &blkHeaderDummy, chainParams), + 0x180236e1); + // second call will have used anchor cache, shouldn't change anything + BOOST_CHECK_EQUAL( + GetNextWorkRequired(&indexActivation0, &blkHeaderDummy, chainParams), + 0x180236e1); + + // Now we'll generate some more activations/anchors, using unique targets + // for each one (if the algo gets confused between different anchors, we + // will know). + + // Create an activating block with 0 solvetime, which will drop target by + // ~415/416. + CBlockIndex indexActivation1 = + GetBlockIndex(pindexPreActivation, 0, 0x18023456); + BOOST_CHECK(IsAxionEnabled(params, &indexActivation1)); + // cache will be stale here, and we should get the right result regardless: + BOOST_CHECK_EQUAL( + GetNextWorkRequired(&indexActivation1, &blkHeaderDummy, chainParams), + 0x180232fd); + // second call will have used anchor cache, shouldn't change anything + BOOST_CHECK_EQUAL( + GetNextWorkRequired(&indexActivation1, &blkHeaderDummy, chainParams), + 0x180232fd); + // for good measure, try again with wiped cache + ResetASERTAnchorBlockCache(); + BOOST_CHECK_EQUAL( + GetNextWorkRequired(&indexActivation1, &blkHeaderDummy, chainParams), + 0x180232fd); + + // Try activation with expected solvetime, which will keep target the same. + uint32_t anchorBits2 = 0x180210fe; + CBlockIndex indexActivation2 = + GetBlockIndex(pindexPreActivation, 600, anchorBits2); + BOOST_CHECK(IsAxionEnabled(params, &indexActivation2)); + BOOST_CHECK_EQUAL( + GetNextWorkRequired(&indexActivation2, &blkHeaderDummy, chainParams), + anchorBits2); + + // Try a three-month solvetime which will cause us to hit powLimit. + uint32_t anchorBits3 = 0x18034567; + CBlockIndex indexActivation3 = + GetBlockIndex(pindexPreActivation, 86400 * 90, anchorBits3); + BOOST_CHECK(IsAxionEnabled(params, &indexActivation2)); + BOOST_CHECK_EQUAL( + GetNextWorkRequired(&indexActivation3, &blkHeaderDummy, chainParams), + 0x1d00ffff); + // If the next block jumps back in time, we get back our original difficulty + // level. + CBlockIndex indexActivation3_return = + GetBlockIndex(&indexActivation3, -86400 * 90 + 2 * 600, anchorBits3); + BOOST_CHECK_EQUAL(GetNextWorkRequired(&indexActivation3_return, + &blkHeaderDummy, chainParams), + anchorBits3); + // Retry for cache + BOOST_CHECK_EQUAL(GetNextWorkRequired(&indexActivation3_return, + &blkHeaderDummy, chainParams), + anchorBits3); + + // Make an activation with MTP == activation exactly. This is a backwards + // timestamp jump so the resulting target is 1.2% lower. + CBlockIndex indexActivation4 = + GetBlockIndex(pindexPreActivation, 0, 0x18011111); + indexActivation4.nTime = activationTime; + BOOST_CHECK_EQUAL(indexActivation4.GetMedianTimePast(), activationTime); + BOOST_CHECK(IsAxionEnabled(params, &indexActivation4)); + BOOST_CHECK_EQUAL( + GetNextWorkRequired(&indexActivation4, &blkHeaderDummy, chainParams), + 0x18010db3); + + // Finally create a random chain on top of our second activation, using + // ASERT targets all the way. Erase cache so that this will do a fresh + // search for anchor at every step (fortauntely this is not too slow, due to + // the skiplist traversal) + CBlockIndex *pindexChain2 = &indexActivation2; + for (int i = 1; i < 1000; i++) { + BOOST_REQUIRE(bidx < int(blocks.size())); + ResetASERTAnchorBlockCache(); + uint32_t nextBits = + GetNextWorkRequired(pindexChain2, &blkHeaderDummy, chainParams); + blocks[bidx] = + GetBlockIndex(pindexChain2, InsecureRandRange(1200), nextBits); + pindexChain2 = &blocks[bidx++]; + } + // Scan back down to make sure all targets are same when we keep cached + // anchor. + for (CBlockIndex *pindex = pindexChain2; pindex != &indexActivation2; + pindex = pindex->pprev) { + uint32_t nextBits = + GetNextWorkRequired(pindex->pprev, &blkHeaderDummy, chainParams); + BOOST_CHECK_EQUAL(nextBits, pindex->nBits); + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/validation.cpp b/src/validation.cpp --- a/src/validation.cpp +++ b/src/validation.cpp @@ -24,6 +24,7 @@ #include #include #include +#include // For ResetASERTAnchorBlockCache #include #include #include @@ -4860,6 +4861,7 @@ pindexBestHeader = nullptr; pindexBestForkTip = nullptr; pindexBestForkBase = nullptr; + ResetASERTAnchorBlockCache(); g_mempool.clear(); mapBlocksUnlinked.clear(); vinfoBlockFile.clear();