diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 38dc4836e..be46ff752 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,676 +1,677 @@
 # Copyright (c) 2017 The Bitcoin developers
 
 project(bitcoind)
 
 set(CMAKE_CXX_STANDARD 14)
 
 # Default visibility is hidden on all targets.
 set(CMAKE_C_VISIBILITY_PRESET hidden)
 set(CMAKE_CXX_VISIBILITY_PRESET hidden)
 
 option(BUILD_BITCOIN_WALLET "Activate the wallet functionality" ON)
 option(BUILD_BITCOIN_ZMQ "Activate the ZeroMQ functionalities" ON)
 option(BUILD_BITCOIN_CLI "Build bitcoin-cli" ON)
 option(BUILD_BITCOIN_TX "Build bitcoin-tx" ON)
 option(BUILD_BITCOIN_QT "Build bitcoin-qt" ON)
 option(BUILD_BITCOIN_SEEDER "Build bitcoin-seeder" ON)
 option(BUILD_LIBBITCOINCONSENSUS "Build the bitcoinconsenus shared library" ON)
 option(ENABLE_BIP70 "Enable BIP70 (payment protocol) support in GUI" ON)
 option(ENABLE_HARDENING "Harden the executables" ON)
 option(ENABLE_REDUCE_EXPORTS "Reduce the amount of exported symbols" OFF)
 option(ENABLE_STATIC_LIBSTDCXX "Statically link libstdc++" OFF)
 option(ENABLE_GLIBC_BACK_COMPAT "Enable Glibc compatibility features" OFF)
 option(ENABLE_QRCODE "Enable QR code display" ON)
 option(ENABLE_UPNP "Enable UPnP support" ON)
 option(START_WITH_UPNP "Make UPnP the default to map ports" OFF)
 option(ENABLE_CLANG_TIDY "Enable clang-tidy checks for Bitcoin ABC" OFF)
 option(ENABLE_PROFILING "Select the profiling tool to use" OFF)
 option(USE_LD_GOLD "Try to use gold as a linker if available" ON)
 
 set(OS_WITH_JEMALLOC_AS_SYSTEM_DEFAULT
 	"Android"
 	"FreeBSD"
 	"NetBSD"
 )
 if(NOT CMAKE_SYSTEM_NAME IN_LIST OS_WITH_JEMALLOC_AS_SYSTEM_DEFAULT)
 	set(USE_JEMALLOC_DEFAULT ON)
 endif()
 
 # FIXME: Building against jemalloc causes the software to segfault on OSX.
 # See https://github.com/Bitcoin-ABC/bitcoin-abc/issues/401
 if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING)
 	set(USE_JEMALLOC_DEFAULT OFF)
 endif()
 
 option(USE_JEMALLOC "Use jemalloc as an allocation library" ${USE_JEMALLOC_DEFAULT})
 
 if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
 	set(DEFAULT_ENABLE_DBUS_NOTIFICATIONS ON)
 endif()
 option(ENABLE_DBUS_NOTIFICATIONS "Enable DBus desktop notifications. Linux only." ${DEFAULT_ENABLE_DBUS_NOTIFICATIONS})
 
 # If ccache is available, then use it.
 find_program(CCACHE ccache)
 if(CCACHE)
 	message(STATUS "Using ccache: ${CCACHE}")
 	set(CMAKE_C_COMPILER_LAUNCHER ${CCACHE})
 	set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE})
 endif(CCACHE)
 
 # Disable what we do not need for the native build.
 include(NativeExecutable)
 native_add_cmake_flags(
 	"-DBUILD_BITCOIN_WALLET=OFF"
 	"-DBUILD_BITCOIN_QT=OFF"
 	"-DBUILD_BITCOIN_ZMQ=OFF"
 	"-DENABLE_QRCODE=OFF"
 	"-DENABLE_UPNP=OFF"
 	"-DUSE_JEMALLOC=OFF"
 	"-DENABLE_CLANG_TIDY=OFF"
 	"-DENABLE_BIP70=OFF"
 )
 
 if(ENABLE_CLANG_TIDY)
 	include(ClangTidy)
 endif()
 
 if(ENABLE_SANITIZERS)
 	include(Sanitizers)
 	enable_sanitizers(${ENABLE_SANITIZERS})
 endif()
 
 include(AddCompilerFlags)
 
 if(USE_LD_GOLD)
 	add_linker_flags(-fuse-ld=gold)
 endif()
 
 # Prefer -g3, defaults to -g if unavailable
 foreach(LANGUAGE C CXX)
 	set(COMPILER_DEBUG_LEVEL -g)
 	check_compiler_flags(G3_IS_SUPPORTED ${LANGUAGE} -g3)
 	if(${G3_IS_SUPPORTED})
 		set(COMPILER_DEBUG_LEVEL -g3)
 	endif()
 	add_compile_options_to_configuration_for_language(Debug ${LANGUAGE} ${COMPILER_DEBUG_LEVEL})
 endforeach()
 
 # Define the debugging symbols DEBUG and DEBUG_LOCKORDER when the Debug build
 # type is selected.
 add_compile_definitions_to_configuration(Debug DEBUG DEBUG_LOCKORDER)
 
 # Add -ftrapv when building in Debug
 add_compile_options_to_configuration(Debug -ftrapv)
 
 # All versions of gcc that we commonly use for building are subject to bug
 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90348. To work around that, set
 # -fstack-reuse=none for all gcc builds. (Only gcc understands this flag)
 if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
 	add_compiler_flags(-fstack-reuse=none)
 endif()
 
 if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
 	# Ensure that WINDRES_PREPROC is enabled when using windres.
 	list(APPEND CMAKE_RC_FLAGS "-DWINDRES_PREPROC")
 
 	# Build all static so there is no dll file to distribute.
 	add_linker_flags(-static)
 
 	add_compile_definitions(
 		# Windows 7
 		_WIN32_WINNT=0x0601
 		# Internet Explorer 5.01 (!)
 		_WIN32_IE=0x0501
 		# Define WIN32_LEAN_AND_MEAN to exclude APIs such as Cryptography, DDE,
 		# RPC, Shell, and Windows Sockets.
 		WIN32_LEAN_AND_MEAN
 	)
 endif()
 
 if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
 	add_compile_definitions(MAC_OSX OBJC_OLD_DISPATCH_PROTOTYPES=0)
 	add_linker_flags(-Wl,-dead_strip_dylibs)
 endif()
 
 if(ENABLE_REDUCE_EXPORTS)
 	# Default visibility is set by CMAKE_<LANG>_VISIBILITY_PRESET, but this
 	# doesn't tell if the visibility set is effective.
 	# Check if the flag -fvisibility=hidden is supported, as using the hidden
 	# visibility is a requirement to reduce exports.
 	check_compiler_flags(HAS_CXX_FVISIBILITY CXX -fvisibility=hidden)
 	if(NOT HAS_CXX_FVISIBILITY)
 		message(FATAL_ERROR "Cannot set default symbol visibility. Use -DENABLE_REDUCE_EXPORTS=OFF.")
 	endif()
 
 	# Also hide symbols from static libraries
 	add_linker_flags(-Wl,--exclude-libs,libstdc++)
 endif()
 
 # Enable statically linking libstdc++
 if(ENABLE_STATIC_LIBSTDCXX)
 	add_linker_flags(-static-libstdc++)
 endif()
 
 set(CMAKE_POSITION_INDEPENDENT_CODE ON)
 
 if(ENABLE_HARDENING)
 	# Enable stack protection
 	add_cxx_compiler_flags(-fstack-protector-all -Wstack-protector)
 
 	# Enable some buffer overflow checking, except in -O0 builds which
 	# do not support them
 	add_compiler_flags(-U_FORTIFY_SOURCE)
 	add_compile_options($<$<NOT:$<CONFIG:Debug>>:-D_FORTIFY_SOURCE=2>)
 
 	# Enable ASLR (these flags are primarily targeting MinGw)
 	add_linker_flags(-Wl,--dynamicbase -Wl,--nxcompat -Wl,--high-entropy-va)
 
 	# Make the relocated sections read-only
 	add_linker_flags(-Wl,-z,relro -Wl,-z,now)
 
 	# CMake provides the POSITION_INDEPENDENT_CODE property to set PIC/PIE.
 	cmake_policy(SET CMP0083 NEW)
 	include(CheckPIESupported)
 	check_pie_supported()
 
 	if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
 		# MinGw provides its own libssp for stack smashing protection
 		link_libraries(ssp)
 	endif()
 endif()
 
 if(ENABLE_PROFILING MATCHES "gprof")
 	message(STATUS "Enable profiling with gprof")
 
 	# -pg is incompatible with -pie. Since hardening and profiling together
 	# doesn't make sense, we simply make them mutually exclusive here.
 	# Additionally, hardened toolchains may force -pie by default, in which
 	# case it needs to be turned off with -no-pie.
 	if(ENABLE_HARDENING)
 		message(FATAL_ERROR "Profiling with gprof requires disabling hardening with -DENABLE_HARDENING=OFF.")
 	endif()
 	add_linker_flags(-no-pie)
 
 	add_compiler_flags(-pg)
 	add_linker_flags(-pg)
 endif()
 
 # Enable warning
 add_c_compiler_flags(-Wnested-externs -Wstrict-prototypes)
 add_compiler_flags(
 	-Wall
 	-Wextra
 	-Wformat
 	-Wvla
 	-Wcast-align
 	-Wunused-parameter
 	-Wmissing-braces
 	-Wthread-safety
 	-Wshadow
 	-Wshadow-field
 	-Wrange-loop-analysis
 	-Wredundant-decls
 )
 add_compiler_flag_group(-Wformat -Wformat-security)
 add_cxx_compiler_flags(
 	-Wredundant-move
 )
 
 option(EXTRA_WARNINGS "Enable extra warnings" OFF)
 if(EXTRA_WARNINGS)
 	add_cxx_compiler_flags(-Wsuggest-override)
 else()
 	add_compiler_flags(-Wno-unused-parameter)
 	add_compiler_flags(-Wno-implicit-fallthrough)
 endif()
 
 # libtool style configure
 add_subdirectory(config)
 
 # Enable LFS (Large File Support) on targets that don't have it natively.
 # This should be defined before the libraries are included as leveldb need the
 # definition to be set.
 if(NOT HAVE_LARGE_FILE_SUPPORT)
 	add_compile_definitions(_FILE_OFFSET_BITS=64)
 	add_linker_flags(-Wl,--large-address-aware)
 endif()
 
 if(ENABLE_GLIBC_BACK_COMPAT)
 	# Wrap some glibc functions with ours
 	add_linker_flags(-Wl,--wrap=__divmoddi4)
 	add_linker_flags(-Wl,--wrap=log2f)
 
 	if(NOT HAVE_LARGE_FILE_SUPPORT)
 		add_linker_flags(-Wl,--wrap=fcntl -Wl,--wrap=fcntl64)
 	endif()
 endif()
 
 if(USE_JEMALLOC)
 	# Most of the sanitizers require their instrumented allocation functions to
 	# be fully functional. This is obviously the case for all the memory related
 	# sanitizers (asan, lsan, msan) but not only.
 	if(ENABLE_SANITIZERS)
 		message(WARNING "Jemalloc is incompatible with the sanitizers and has been disabled.")
 	else()
 		find_package(Jemalloc 3.6.0 REQUIRED)
 		link_libraries(Jemalloc::jemalloc)
 	endif()
 endif()
 
 
 # Make sure that all the global compiler and linker flags are set BEFORE
 # including the libraries so they apply as needed.
 
 
 # libraries
 add_subdirectory(crypto)
 add_subdirectory(leveldb)
 add_subdirectory(secp256k1)
 add_subdirectory(univalue)
 
 # Find the git root, and returns the full path to the .git/logs/HEAD file if
 # it exists.
 function(find_git_head_logs_file RESULT)
 	find_package(Git)
 	if(GIT_FOUND)
 		execute_process(
 			COMMAND "${GIT_EXECUTABLE}" "rev-parse" "--show-toplevel"
 			WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
 			OUTPUT_VARIABLE GIT_ROOT
 			RESULT_VARIABLE GIT_RESULT
 			OUTPUT_STRIP_TRAILING_WHITESPACE
 			ERROR_QUIET
 		)
 
 		if(GIT_RESULT EQUAL 0)
 			set(GIT_LOGS_DIR "${GIT_ROOT}/.git/logs")
 			set(GIT_HEAD_LOGS_FILE "${GIT_LOGS_DIR}/HEAD")
 			# If the .git/logs/HEAD does not exist, create it
 			if(NOT EXISTS "${GIT_HEAD_LOGS_FILE}")
 				file(MAKE_DIRECTORY "${GIT_LOGS_DIR}")
 				file(TOUCH "${GIT_HEAD_LOGS_FILE}")
 			endif()
 			set(${RESULT} "${GIT_HEAD_LOGS_FILE}" PARENT_SCOPE)
 		endif()
 	endif()
 endfunction()
 
 find_git_head_logs_file(GIT_HEAD_LOGS_FILE)
 
 set(OBJ_DIR "${CMAKE_CURRENT_BINARY_DIR}/obj")
 file(MAKE_DIRECTORY "${OBJ_DIR}")
 set(BUILD_HEADER "${OBJ_DIR}/build.h")
 set(BUILD_HEADER_TMP "${BUILD_HEADER}.tmp")
 
 add_custom_command(
 	DEPENDS
 		"${GIT_HEAD_LOGS_FILE}"
 		"${CMAKE_SOURCE_DIR}/share/genbuild.sh"
 	OUTPUT
 		"${BUILD_HEADER}"
 	COMMAND
 		"${CMAKE_SOURCE_DIR}/share/genbuild.sh"
 		"${BUILD_HEADER_TMP}"
 		"${CMAKE_SOURCE_DIR}"
 	COMMAND
 		${CMAKE_COMMAND} -E copy_if_different "${BUILD_HEADER_TMP}" "${BUILD_HEADER}"
 	COMMAND
 		${CMAKE_COMMAND} -E remove "${BUILD_HEADER_TMP}"
 )
 
 # Because the Bitcoin ABc source code is disorganised, we
 # end up with a bunch of libraries without any apparent
 # cohesive structure. This is inherited from Bitcoin Core
 # and reflecting this.
 # TODO: Improve the structure once cmake is rocking.
 
 # Various completely unrelated features shared by all executables.
 add_library(util
 	chainparamsbase.cpp
 	clientversion.cpp
 	compat/glibcxx_sanity.cpp
 	compat/strnlen.cpp
 	fs.cpp
 	interfaces/handler.cpp
 	logging.cpp
 	random.cpp
 	randomenv.cpp
 	rcu.cpp
 	rpc/request.cpp
+	blockdb.cpp
 	support/cleanse.cpp
 	support/lockedpool.cpp
 	sync.cpp
 	threadinterrupt.cpp
 	uint256.cpp
 	util/bip32.cpp
 	util/bytevectorhash.cpp
 	util/error.cpp
 	util/moneystr.cpp
 	util/settings.cpp
 	util/spanparsing.cpp
 	util/strencodings.cpp
 	util/string.cpp
 	util/system.cpp
 	util/threadnames.cpp
 	util/time.cpp
 	util/url.cpp
 	util/validation.cpp
 
 	# obj/build.h
 	"${BUILD_HEADER}"
 )
 
 target_compile_definitions(util PUBLIC HAVE_CONFIG_H HAVE_BUILD_INFO)
 target_include_directories(util
 	PUBLIC
 		.
 		# To access the config/ and obj/ directories
 		${CMAKE_CURRENT_BINARY_DIR}
 )
 
 if(ENABLE_GLIBC_BACK_COMPAT)
 	target_sources(util PRIVATE compat/glibc_compat.cpp)
 endif()
 
 # Target specific configs
 if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
 	set(Boost_USE_STATIC_LIBS ON)
 	set(Boost_USE_STATIC_RUNTIME ON)
 	set(Boost_THREADAPI win32)
 
 	find_package(SHLWAPI REQUIRED)
 	target_link_libraries(util SHLWAPI::shlwapi)
 
 	find_library(WS2_32_LIBRARY NAMES ws2_32)
 	target_link_libraries(util ${WS2_32_LIBRARY})
 
 	target_compile_definitions(util PUBLIC BOOST_THREAD_USE_LIB)
 endif()
 
 target_link_libraries(util univalue crypto)
 
 macro(link_event TARGET)
 	non_native_target_link_libraries(${TARGET} Event 2.0.22 ${ARGN})
 endmacro()
 
 link_event(util event)
 
 macro(link_boost TARGET)
 	non_native_target_link_libraries(${TARGET} Boost 1.59 ${ARGN})
 endmacro()
 
 link_boost(util filesystem thread)
 # Make sure boost uses std::atomic (it doesn't before 1.63)
 target_compile_definitions(util PUBLIC BOOST_SP_USE_STD_ATOMIC BOOST_AC_USE_STD_ATOMIC)
 
 # More completely unrelated features shared by all executables.
 # Because nothing says this is different from util than "common"
 add_library(common
 	amount.cpp
 	base58.cpp
 	bloom.cpp
 	cashaddr.cpp
 	cashaddrenc.cpp
 	chainparams.cpp
 	config.cpp
 	consensus/merkle.cpp
 	coins.cpp
 	compressor.cpp
 	eventloop.cpp
 	feerate.cpp
 	core_read.cpp
 	core_write.cpp
 	key.cpp
 	key_io.cpp
 	merkleblock.cpp
 	net_permissions.cpp
 	netaddress.cpp
 	netbase.cpp
 	outputtype.cpp
 	policy/policy.cpp
 	primitives/block.cpp
 	protocol.cpp
 	psbt.cpp
 	rpc/rawtransaction_util.cpp
 	rpc/util.cpp
 	scheduler.cpp
 	salteduint256hasher.cpp
 	versionbitsinfo.cpp
 	warnings.cpp
 )
 
 target_link_libraries(common util secp256k1 script)
 
 # script library
 add_library(script
 	script/bitfield.cpp
 	script/descriptor.cpp
 	script/interpreter.cpp
 	script/script.cpp
 	script/script_error.cpp
 	script/sigencoding.cpp
 	script/sign.cpp
 	script/signingprovider.cpp
 	script/standard.cpp
 )
 
 target_link_libraries(script common)
 
 # libbitcoinconsensus
 add_library(bitcoinconsensus
 	arith_uint256.cpp
 	hash.cpp
 	primitives/transaction.cpp
 	pubkey.cpp
 	uint256.cpp
 	util/strencodings.cpp
 	consensus/tx_check.cpp
 )
 
 target_link_libraries(bitcoinconsensus script)
 
 include(InstallationHelper)
 if(BUILD_LIBBITCOINCONSENSUS)
 	target_compile_definitions(bitcoinconsensus
 		PUBLIC
 			BUILD_BITCOIN_INTERNAL
 			HAVE_CONSENSUS_LIB
 	)
 
 	install_shared_library(bitcoinconsensus
 		script/bitcoinconsensus.cpp
 		PUBLIC_HEADER script/bitcoinconsensus.h
 	)
 endif()
 
 # Bitcoin server facilities
 add_library(server
 	addrdb.cpp
 	addrman.cpp
 	avalanche/peermanager.cpp
 	avalanche/processor.cpp
 	avalanche/proof.cpp
 	avalanche/proofbuilder.cpp
 	banman.cpp
 	blockencodings.cpp
 	blockfilter.cpp
 	blockindex.cpp
 	chain.cpp
 	checkpoints.cpp
 	config.cpp
 	consensus/activation.cpp
 	consensus/tx_verify.cpp
 	dbwrapper.cpp
 	flatfile.cpp
 	httprpc.cpp
 	httpserver.cpp
 	index/base.cpp
 	index/blockfilterindex.cpp
 	index/txindex.cpp
 	init.cpp
 	interfaces/chain.cpp
 	interfaces/node.cpp
 	miner.cpp
 	minerfund.cpp
 	net.cpp
 	net_processing.cpp
 	node/coin.cpp
 	node/coinstats.cpp
 	node/context.cpp
 	node/psbt.cpp
 	node/transaction.cpp
 	noui.cpp
 	policy/fees.cpp
 	policy/settings.cpp
 	pow/aserti32d.cpp
 	pow/daa.cpp
 	pow/eda.cpp
 	pow/pow.cpp
 	rest.cpp
 	rpc/abc.cpp
 	rpc/avalanche.cpp
 	rpc/blockchain.cpp
 	rpc/command.cpp
 	rpc/mining.cpp
 	rpc/misc.cpp
 	rpc/net.cpp
 	rpc/rawtransaction.cpp
 	rpc/server.cpp
 	script/scriptcache.cpp
 	script/sigcache.cpp
 	shutdown.cpp
 	timedata.cpp
 	torcontrol.cpp
 	txdb.cpp
 	txmempool.cpp
 	ui_interface.cpp
 	validation.cpp
 	validationinterface.cpp
 	versionbits.cpp
 )
 
 target_include_directories(server PRIVATE leveldb/helpers/memenv)
 
 target_link_libraries(server
 	bitcoinconsensus
 	leveldb
 	memenv
 )
 
 link_event(server event)
 if(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Windows")
 	link_event(server pthreads)
 endif()
 
 if(ENABLE_UPNP)
 	find_package(MiniUPnPc 1.5 REQUIRED)
 	target_link_libraries(server MiniUPnPc::miniupnpc)
 
 	if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
 		# TODO: check if we are really using a static library. Assume this is
 		# the one from the depends for now since the native windows build is not
 		# supported.
 		target_compile_definitions(server
 			PUBLIC -DSTATICLIB
 			PUBLIC -DMINIUPNP_STATICLIB
 		)
 	endif()
 endif()
 
 # Test suites.
 add_subdirectory(test)
 add_subdirectory(avalanche/test)
 add_subdirectory(pow/test)
 
 # Benchmark suite.
 add_subdirectory(bench)
 
 include(BinaryTest)
 include(WindowsVersionInfo)
 
 # Wallet
 if(BUILD_BITCOIN_WALLET)
 	add_subdirectory(wallet)
 	target_link_libraries(server wallet)
 
 	# bitcoin-wallet
 	add_executable(bitcoin-wallet bitcoin-wallet.cpp)
 	generate_windows_version_info(bitcoin-wallet
 		DESCRIPTION "CLI tool for ${PACKAGE_NAME} wallets"
 	)
 
 	target_link_libraries(bitcoin-wallet wallet-tool common util)
 
 	add_to_symbols_check(bitcoin-wallet)
 	add_to_security_check(bitcoin-wallet)
 
 	install_target(bitcoin-wallet)
 	install_manpages(bitcoin-wallet)
 else()
 	target_sources(server PRIVATE dummywallet.cpp)
 endif()
 
 # ZeroMQ
 if(BUILD_BITCOIN_ZMQ)
 	add_subdirectory(zmq)
 	target_link_libraries(server zmq)
 endif()
 
 # RPC client support
 add_library(rpcclient rpc/client.cpp)
 target_link_libraries(rpcclient univalue util)
 
 # bitcoin-seeder
 if(BUILD_BITCOIN_SEEDER)
 	add_subdirectory(seeder)
 endif()
 
 # bitcoin-cli
 if(BUILD_BITCOIN_CLI)
 	add_executable(bitcoin-cli bitcoin-cli.cpp)
 	generate_windows_version_info(bitcoin-cli
 		DESCRIPTION "JSON-RPC client for ${PACKAGE_NAME}"
 	)
 
 	target_link_libraries(bitcoin-cli common rpcclient)
 	link_event(bitcoin-cli event)
 
 	add_to_symbols_check(bitcoin-cli)
 	add_to_security_check(bitcoin-cli)
 
 	install_target(bitcoin-cli)
 	install_manpages(bitcoin-cli)
 endif()
 
 # bitcoin-tx
 if(BUILD_BITCOIN_TX)
 	add_executable(bitcoin-tx bitcoin-tx.cpp)
 	generate_windows_version_info(bitcoin-tx
 		DESCRIPTION "CLI Bitcoin transaction editor utility"
 	)
 
 	target_link_libraries(bitcoin-tx bitcoinconsensus)
 
 	add_to_symbols_check(bitcoin-tx)
 	add_to_security_check(bitcoin-tx)
 
 	install_target(bitcoin-tx)
 	install_manpages(bitcoin-tx)
 endif()
 
 # bitcoind
 add_executable(bitcoind bitcoind.cpp)
 target_link_libraries(bitcoind server)
 generate_windows_version_info(bitcoind
 	DESCRIPTION "Bitcoin node with a JSON-RPC server"
 )
 add_to_symbols_check(bitcoind)
 add_to_security_check(bitcoind)
 
 install_target(bitcoind)
 install_manpages(bitcoind)
 
 # Bitcoin-qt
 if(BUILD_BITCOIN_QT)
 	add_subdirectory(qt)
 endif()
diff --git a/src/blockdb.cpp b/src/blockdb.cpp
new file mode 100644
index 000000000..f96f59a60
--- /dev/null
+++ b/src/blockdb.cpp
@@ -0,0 +1,80 @@
+#include <blockdb.h>
+
+#include <blockindex.h>
+#include <clientversion.h>
+#include <pow/pow.h>
+#include <primitives/block.h>
+#include <streams.h>
+#include <util/system.h>
+
+extern RecursiveMutex cs_main;
+
+FlatFileSeq BlockFileSeq() {
+    return FlatFileSeq(GetBlocksDir(), "blk", BLOCKFILE_CHUNK_SIZE);
+}
+
+FlatFileSeq UndoFileSeq() {
+    return FlatFileSeq(GetBlocksDir(), "rev", UNDOFILE_CHUNK_SIZE);
+}
+
+FILE *OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
+    return BlockFileSeq().Open(pos, fReadOnly);
+}
+
+/** Open an undo file (rev?????.dat) */
+FILE *OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
+    return UndoFileSeq().Open(pos, fReadOnly);
+}
+
+fs::path GetBlockPosFilename(const FlatFilePos &pos) {
+    return BlockFileSeq().FileName(pos);
+}
+
+bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos,
+                       const Consensus::Params &params) {
+    block.SetNull();
+
+    // Open history file to read
+    CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
+    if (filein.IsNull()) {
+        return error("ReadBlockFromDisk: OpenBlockFile failed for %s",
+                     pos.ToString());
+    }
+
+    // Read block
+    try {
+        filein >> block;
+    } catch (const std::exception &e) {
+        return error("%s: Deserialize or I/O error - %s at %s", __func__,
+                     e.what(), pos.ToString());
+    }
+
+    // Check the header
+    if (!CheckProofOfWork(block.GetHash(), block.nBits, params)) {
+        return error("ReadBlockFromDisk: Errors in block header at %s",
+                     pos.ToString());
+    }
+
+    return true;
+}
+
+bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex,
+                       const Consensus::Params &params) {
+    FlatFilePos blockPos;
+    {
+        LOCK(cs_main);
+        blockPos = pindex->GetBlockPos();
+    }
+
+    if (!ReadBlockFromDisk(block, blockPos, params)) {
+        return false;
+    }
+
+    if (block.GetHash() != pindex->GetBlockHash()) {
+        return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() "
+                     "doesn't match index for %s at %s",
+                     pindex->ToString(), pindex->GetBlockPos().ToString());
+    }
+
+    return true;
+}
diff --git a/src/blockdb.h b/src/blockdb.h
new file mode 100644
index 000000000..9db94d7ec
--- /dev/null
+++ b/src/blockdb.h
@@ -0,0 +1,42 @@
+// Copyright 2020 The Bitcoin developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_BLOCKDB_H
+#define BITCOIN_BLOCKDB_H
+
+#include <flatfile.h>
+
+namespace Consensus {
+struct Params;
+}
+
+class CBlock;
+class CBlockIndex;
+
+/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
+static constexpr unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
+/** The pre-allocation chunk size for rev?????.dat files (since 0.8) */
+static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
+
+FlatFileSeq BlockFileSeq();
+FlatFileSeq UndoFileSeq();
+FILE *OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
+
+/**
+ * Translation to a filesystem path.
+ */
+fs::path GetBlockPosFilename(const FlatFilePos &pos);
+
+/**
+ * Open a block file (blk?????.dat).
+ */
+FILE *OpenBlockFile(const FlatFilePos &pos, bool fReadOnly = false);
+
+/** Functions for disk access for blocks */
+bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos,
+                       const Consensus::Params &params);
+bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex,
+                       const Consensus::Params &params);
+
+#endif // BITCOIN_BLOCKDB_H
diff --git a/src/index/base.cpp b/src/index/base.cpp
index b4380de65..cf7357b6d 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -1,328 +1,329 @@
 // Copyright (c) 2017-2018 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
+#include <blockdb.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <config.h>
 #include <index/base.h>
 #include <shutdown.h>
 #include <tinyformat.h>
 #include <ui_interface.h>
 #include <util/system.h>
 #include <validation.h>
 #include <warnings.h>
 
 constexpr char DB_BEST_BLOCK = 'B';
 
 constexpr int64_t SYNC_LOG_INTERVAL = 30;           // seconds
 constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
 
 template <typename... Args>
 static void FatalError(const char *fmt, const Args &... args) {
     std::string strMessage = tfm::format(fmt, args...);
     SetMiscWarning(strMessage);
     LogPrintf("*** %s\n", strMessage);
     uiInterface.ThreadSafeMessageBox(
         "Error: A fatal internal error occurred, see debug.log for details", "",
         CClientUIInterface::MSG_ERROR);
     StartShutdown();
 }
 
 BaseIndex::DB::DB(const fs::path &path, size_t n_cache_size, bool f_memory,
                   bool f_wipe, bool f_obfuscate)
     : CDBWrapper(path, n_cache_size, f_memory, f_wipe, f_obfuscate) {}
 
 bool BaseIndex::DB::ReadBestBlock(CBlockLocator &locator) const {
     bool success = Read(DB_BEST_BLOCK, locator);
     if (!success) {
         locator.SetNull();
     }
     return success;
 }
 
 void BaseIndex::DB::WriteBestBlock(CDBBatch &batch,
                                    const CBlockLocator &locator) {
     batch.Write(DB_BEST_BLOCK, locator);
 }
 
 BaseIndex::~BaseIndex() {
     Interrupt();
     Stop();
 }
 
 bool BaseIndex::Init() {
     CBlockLocator locator;
     if (!GetDB().ReadBestBlock(locator)) {
         locator.SetNull();
     }
 
     LOCK(cs_main);
     if (locator.IsNull()) {
         m_best_block_index = nullptr;
     } else {
         m_best_block_index = FindForkInGlobalIndex(::ChainActive(), locator);
     }
     m_synced = m_best_block_index.load() == ::ChainActive().Tip();
     return true;
 }
 
 static const CBlockIndex *NextSyncBlock(const CBlockIndex *pindex_prev)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
 
     if (!pindex_prev) {
         return ::ChainActive().Genesis();
     }
 
     const CBlockIndex *pindex = ::ChainActive().Next(pindex_prev);
     if (pindex) {
         return pindex;
     }
 
     return ::ChainActive().Next(::ChainActive().FindFork(pindex_prev));
 }
 
 void BaseIndex::ThreadSync() {
     const CBlockIndex *pindex = m_best_block_index.load();
     if (!m_synced) {
         auto &consensus_params = GetConfig().GetChainParams().GetConsensus();
 
         int64_t last_log_time = 0;
         int64_t last_locator_write_time = 0;
         while (true) {
             if (m_interrupt) {
                 m_best_block_index = pindex;
                 // No need to handle errors in Commit. If it fails, the error
                 // will be already be logged. The best way to recover is to
                 // continue, as index cannot be corrupted by a missed commit to
                 // disk for an advanced index state.
                 Commit();
                 return;
             }
 
             {
                 LOCK(cs_main);
                 const CBlockIndex *pindex_next = NextSyncBlock(pindex);
                 if (!pindex_next) {
                     m_best_block_index = pindex;
                     m_synced = true;
                     // No need to handle errors in Commit. See rationale above.
                     Commit();
                     break;
                 }
                 if (pindex_next->pprev != pindex &&
                     !Rewind(pindex, pindex_next->pprev)) {
                     FatalError(
                         "%s: Failed to rewind index %s to a previous chain tip",
                         __func__, GetName());
                     return;
                 }
                 pindex = pindex_next;
             }
 
             int64_t current_time = GetTime();
             if (last_log_time + SYNC_LOG_INTERVAL < current_time) {
                 LogPrintf("Syncing %s with block chain from height %d\n",
                           GetName(), pindex->nHeight);
                 last_log_time = current_time;
             }
 
             if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL <
                 current_time) {
                 m_best_block_index = pindex;
                 last_locator_write_time = current_time;
                 // No need to handle errors in Commit. See rationale above.
                 Commit();
             }
 
             CBlock block;
             if (!ReadBlockFromDisk(block, pindex, consensus_params)) {
                 FatalError("%s: Failed to read block %s from disk", __func__,
                            pindex->GetBlockHash().ToString());
                 return;
             }
             if (!WriteBlock(block, pindex)) {
                 FatalError("%s: Failed to write block %s to index database",
                            __func__, pindex->GetBlockHash().ToString());
                 return;
             }
         }
     }
 
     if (pindex) {
         LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight);
     } else {
         LogPrintf("%s is enabled\n", GetName());
     }
 }
 
 bool BaseIndex::Commit() {
     CDBBatch batch(GetDB());
     if (!CommitInternal(batch) || !GetDB().WriteBatch(batch)) {
         return error("%s: Failed to commit latest %s state", __func__,
                      GetName());
     }
     return true;
 }
 
 bool BaseIndex::CommitInternal(CDBBatch &batch) {
     LOCK(cs_main);
     GetDB().WriteBestBlock(batch,
                            ::ChainActive().GetLocator(m_best_block_index));
     return true;
 }
 
 bool BaseIndex::Rewind(const CBlockIndex *current_tip,
                        const CBlockIndex *new_tip) {
     assert(current_tip == m_best_block_index);
     assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip);
 
     // In the case of a reorg, ensure persisted block locator is not stale.
     m_best_block_index = new_tip;
     if (!Commit()) {
         // If commit fails, revert the best block index to avoid corruption.
         m_best_block_index = current_tip;
         return false;
     }
 
     return true;
 }
 
 void BaseIndex::BlockConnected(
     const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex,
     const std::vector<CTransactionRef> &txn_conflicted) {
     if (!m_synced) {
         return;
     }
 
     const CBlockIndex *best_block_index = m_best_block_index.load();
     if (!best_block_index) {
         if (pindex->nHeight != 0) {
             FatalError("%s: First block connected is not the genesis block "
                        "(height=%d)",
                        __func__, pindex->nHeight);
             return;
         }
     } else {
         // Ensure block connects to an ancestor of the current best block. This
         // should be the case most of the time, but may not be immediately after
         // the the sync thread catches up and sets m_synced. Consider the case
         // where there is a reorg and the blocks on the stale branch are in the
         // ValidationInterface queue backlog even after the sync thread has
         // caught up to the new chain tip. In this unlikely event, log a warning
         // and let the queue clear.
         if (best_block_index->GetAncestor(pindex->nHeight - 1) !=
             pindex->pprev) {
             LogPrintf("%s: WARNING: Block %s does not connect to an ancestor "
                       "of known best chain (tip=%s); not updating index\n",
                       __func__, pindex->GetBlockHash().ToString(),
                       best_block_index->GetBlockHash().ToString());
             return;
         }
         if (best_block_index != pindex->pprev &&
             !Rewind(best_block_index, pindex->pprev)) {
             FatalError("%s: Failed to rewind index %s to a previous chain tip",
                        __func__, GetName());
             return;
         }
     }
 
     if (WriteBlock(*block, pindex)) {
         m_best_block_index = pindex;
     } else {
         FatalError("%s: Failed to write block %s to index", __func__,
                    pindex->GetBlockHash().ToString());
         return;
     }
 }
 
 void BaseIndex::ChainStateFlushed(const CBlockLocator &locator) {
     if (!m_synced) {
         return;
     }
 
     const BlockHash &locator_tip_hash = locator.vHave.front();
     const CBlockIndex *locator_tip_index;
     {
         LOCK(cs_main);
         locator_tip_index = LookupBlockIndex(locator_tip_hash);
     }
 
     if (!locator_tip_index) {
         FatalError("%s: First block (hash=%s) in locator was not found",
                    __func__, locator_tip_hash.ToString());
         return;
     }
 
     // This checks that ChainStateFlushed callbacks are received after
     // BlockConnected. The check may fail immediately after the the sync thread
     // catches up and sets m_synced. Consider the case where there is a reorg
     // and the blocks on the stale branch are in the ValidationInterface queue
     // backlog even after the sync thread has caught up to the new chain tip. In
     // this unlikely event, log a warning and let the queue clear.
     const CBlockIndex *best_block_index = m_best_block_index.load();
     if (best_block_index->GetAncestor(locator_tip_index->nHeight) !=
         locator_tip_index) {
         LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known "
                   "best chain (tip=%s); not writing index locator\n",
                   __func__, locator_tip_hash.ToString(),
                   best_block_index->GetBlockHash().ToString());
         return;
     }
 
     // No need to handle errors in Commit. If it fails, the error will be
     // already be logged. The best way to recover is to continue, as index
     // cannot be corrupted by a missed commit to disk for an advanced index
     // state.
     Commit();
 }
 
 bool BaseIndex::BlockUntilSyncedToCurrentChain() {
     AssertLockNotHeld(cs_main);
 
     if (!m_synced) {
         return false;
     }
 
     {
         // Skip the queue-draining stuff if we know we're caught up with
         // ::ChainActive().Tip().
         LOCK(cs_main);
         const CBlockIndex *chain_tip = ::ChainActive().Tip();
         const CBlockIndex *best_block_index = m_best_block_index.load();
         if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) {
             return true;
         }
     }
 
     LogPrintf("%s: %s is catching up on block notifications\n", __func__,
               GetName());
     SyncWithValidationInterfaceQueue();
     return true;
 }
 
 void BaseIndex::Interrupt() {
     m_interrupt();
 }
 
 void BaseIndex::Start() {
     // Need to register this ValidationInterface before running Init(), so that
     // callbacks are not missed if Init sets m_synced to true.
     RegisterValidationInterface(this);
     if (!Init()) {
         FatalError("%s: %s failed to initialize", __func__, GetName());
         return;
     }
 
     m_thread_sync = std::thread(&TraceThread<std::function<void()>>, GetName(),
                                 std::bind(&BaseIndex::ThreadSync, this));
 }
 
 void BaseIndex::Stop() {
     UnregisterValidationInterface(this);
 
     if (m_thread_sync.joinable()) {
         m_thread_sync.join();
     }
 }
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index fc0b012a2..0af436f1e 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -1,297 +1,298 @@
 // Copyright (c) 2017-2018 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <index/txindex.h>
 
+#include <blockdb.h>
 #include <chain.h>
 #include <shutdown.h>
 #include <ui_interface.h>
 #include <util/system.h>
 #include <util/translation.h>
 #include <validation.h>
 
 #include <boost/thread.hpp>
 
 constexpr char DB_BEST_BLOCK = 'B';
 constexpr char DB_TXINDEX = 't';
 constexpr char DB_TXINDEX_BLOCK = 'T';
 
 std::unique_ptr<TxIndex> g_txindex;
 
 struct CDiskTxPos : public FlatFilePos {
     unsigned int nTxOffset; // after header
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         READWRITEAS(FlatFilePos, *this);
         READWRITE(VARINT(nTxOffset));
     }
 
     CDiskTxPos(const FlatFilePos &blockIn, unsigned int nTxOffsetIn)
         : FlatFilePos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {}
 
     CDiskTxPos() { SetNull(); }
 
     void SetNull() {
         FlatFilePos::SetNull();
         nTxOffset = 0;
     }
 };
 
 /**
  * Access to the txindex database (indexes/txindex/)
  *
  * The database stores a block locator of the chain the database is synced to
  * so that the TxIndex can efficiently determine the point it last stopped at.
  * A locator is used instead of a simple hash of the chain tip because blocks
  * and block index entries may not be flushed to disk until after this database
  * is updated.
  */
 class TxIndex::DB : public BaseIndex::DB {
 public:
     explicit DB(size_t n_cache_size, bool f_memory = false,
                 bool f_wipe = false);
 
     /// Read the disk location of the transaction data with the given ID.
     /// Returns false if the transaction ID is not indexed.
     bool ReadTxPos(const TxId &txid, CDiskTxPos &pos) const;
 
     /// Write a batch of transaction positions to the DB.
     bool WriteTxs(const std::vector<std::pair<TxId, CDiskTxPos>> &v_pos);
 
     /// Migrate txindex data from the block tree DB, where it may be for older
     /// nodes that have not been upgraded yet to the new database.
     bool MigrateData(CBlockTreeDB &block_tree_db,
                      const CBlockLocator &best_locator);
 };
 
 TxIndex::DB::DB(size_t n_cache_size, bool f_memory, bool f_wipe)
     : BaseIndex::DB(GetDataDir() / "indexes" / "txindex", n_cache_size,
                     f_memory, f_wipe) {}
 
 bool TxIndex::DB::ReadTxPos(const TxId &txid, CDiskTxPos &pos) const {
     return Read(std::make_pair(DB_TXINDEX, txid), pos);
 }
 
 bool TxIndex::DB::WriteTxs(
     const std::vector<std::pair<TxId, CDiskTxPos>> &v_pos) {
     CDBBatch batch(*this);
     for (const auto &tuple : v_pos) {
         batch.Write(std::make_pair(DB_TXINDEX, tuple.first), tuple.second);
     }
     return WriteBatch(batch);
 }
 
 /*
  * Safely persist a transfer of data from the old txindex database to the new
  * one, and compact the range of keys updated. This is used internally by
  * MigrateData.
  */
 static void
 WriteTxIndexMigrationBatches(CDBWrapper &newdb, CDBWrapper &olddb,
                              CDBBatch &batch_newdb, CDBBatch &batch_olddb,
                              const std::pair<uint8_t, uint256> &begin_key,
                              const std::pair<uint8_t, uint256> &end_key) {
     // Sync new DB changes to disk before deleting from old DB.
     newdb.WriteBatch(batch_newdb, /*fSync=*/true);
     olddb.WriteBatch(batch_olddb);
     olddb.CompactRange(begin_key, end_key);
 
     batch_newdb.Clear();
     batch_olddb.Clear();
 }
 
 bool TxIndex::DB::MigrateData(CBlockTreeDB &block_tree_db,
                               const CBlockLocator &best_locator) {
     // The prior implementation of txindex was always in sync with block index
     // and presence was indicated with a boolean DB flag. If the flag is set,
     // this means the txindex from a previous version is valid and in sync with
     // the chain tip. The first step of the migration is to unset the flag and
     // write the chain hash to a separate key, DB_TXINDEX_BLOCK. After that, the
     // index entries are copied over in batches to the new database. Finally,
     // DB_TXINDEX_BLOCK is erased from the old database and the block hash is
     // written to the new database.
     //
     // Unsetting the boolean flag ensures that if the node is downgraded to a
     // previous version, it will not see a corrupted, partially migrated index
     // -- it will see that the txindex is disabled. When the node is upgraded
     // again, the migration will pick up where it left off and sync to the block
     // with hash DB_TXINDEX_BLOCK.
     bool f_legacy_flag = false;
     block_tree_db.ReadFlag("txindex", f_legacy_flag);
     if (f_legacy_flag) {
         if (!block_tree_db.Write(DB_TXINDEX_BLOCK, best_locator)) {
             return error("%s: cannot write block indicator", __func__);
         }
         if (!block_tree_db.WriteFlag("txindex", false)) {
             return error("%s: cannot write block index db flag", __func__);
         }
     }
 
     CBlockLocator locator;
     if (!block_tree_db.Read(DB_TXINDEX_BLOCK, locator)) {
         return true;
     }
 
     int64_t count = 0;
     uiInterface.InitMessage(_("Upgrading txindex database").translated);
     LogPrintf("Upgrading txindex database... [0%%]\n");
     uiInterface.ShowProgress(_("Upgrading txindex database").translated, 0,
                              true);
     int report_done = 0;
     const size_t batch_size = 1 << 24; // 16 MiB
 
     CDBBatch batch_newdb(*this);
     CDBBatch batch_olddb(block_tree_db);
 
     std::pair<uint8_t, uint256> key;
     std::pair<uint8_t, uint256> begin_key{DB_TXINDEX, uint256()};
     std::pair<uint8_t, uint256> prev_key = begin_key;
 
     bool interrupted = false;
     std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
     for (cursor->Seek(begin_key); cursor->Valid(); cursor->Next()) {
         boost::this_thread::interruption_point();
         if (ShutdownRequested()) {
             interrupted = true;
             break;
         }
 
         if (!cursor->GetKey(key)) {
             return error("%s: cannot get key from valid cursor", __func__);
         }
         if (key.first != DB_TXINDEX) {
             break;
         }
 
         // Log progress every 10%.
         if (++count % 256 == 0) {
             // Since txids are uniformly random and traversed in increasing
             // order, the high 16 bits of the ID can be used to estimate the
             // current progress.
             const uint256 &txid = key.second;
             uint32_t high_nibble =
                 (static_cast<uint32_t>(*(txid.begin() + 0)) << 8) +
                 (static_cast<uint32_t>(*(txid.begin() + 1)) << 0);
             int percentage_done = (int)(high_nibble * 100.0 / 65536.0 + 0.5);
 
             uiInterface.ShowProgress(_("Upgrading txindex database").translated,
                                      percentage_done, true);
             if (report_done < percentage_done / 10) {
                 LogPrintf("Upgrading txindex database... [%d%%]\n",
                           percentage_done);
                 report_done = percentage_done / 10;
             }
         }
 
         CDiskTxPos value;
         if (!cursor->GetValue(value)) {
             return error("%s: cannot parse txindex record", __func__);
         }
         batch_newdb.Write(key, value);
         batch_olddb.Erase(key);
 
         if (batch_newdb.SizeEstimate() > batch_size ||
             batch_olddb.SizeEstimate() > batch_size) {
             // NOTE: it's OK to delete the key pointed at by the current DB
             // cursor while iterating because LevelDB iterators are guaranteed
             // to provide a consistent view of the underlying data, like a
             // lightweight snapshot.
             WriteTxIndexMigrationBatches(*this, block_tree_db, batch_newdb,
                                          batch_olddb, prev_key, key);
             prev_key = key;
         }
     }
 
     // If these final DB batches complete the migration, write the best block
     // hash marker to the new database and delete from the old one. This signals
     // that the former is fully caught up to that point in the blockchain and
     // that all txindex entries have been removed from the latter.
     if (!interrupted) {
         batch_olddb.Erase(DB_TXINDEX_BLOCK);
         batch_newdb.Write(DB_BEST_BLOCK, locator);
     }
 
     WriteTxIndexMigrationBatches(*this, block_tree_db, batch_newdb, batch_olddb,
                                  begin_key, key);
 
     if (interrupted) {
         LogPrintf("[CANCELLED].\n");
         return false;
     }
 
     uiInterface.ShowProgress("", 100, false);
 
     LogPrintf("[DONE].\n");
     return true;
 }
 
 TxIndex::TxIndex(size_t n_cache_size, bool f_memory, bool f_wipe)
     : m_db(std::make_unique<TxIndex::DB>(n_cache_size, f_memory, f_wipe)) {}
 
 TxIndex::~TxIndex() {}
 
 bool TxIndex::Init() {
     LOCK(cs_main);
 
     // Attempt to migrate txindex from the old database to the new one. Even if
     // chain_tip is null, the node could be reindexing and we still want to
     // delete txindex records in the old database.
     if (!m_db->MigrateData(*pblocktree, ::ChainActive().GetLocator())) {
         return false;
     }
 
     return BaseIndex::Init();
 }
 
 bool TxIndex::WriteBlock(const CBlock &block, const CBlockIndex *pindex) {
     // Exclude genesis block transaction because outputs are not spendable.
     if (pindex->nHeight == 0) {
         return true;
     }
 
     CDiskTxPos pos(pindex->GetBlockPos(),
                    GetSizeOfCompactSize(block.vtx.size()));
     std::vector<std::pair<TxId, CDiskTxPos>> vPos;
     vPos.reserve(block.vtx.size());
     for (const auto &tx : block.vtx) {
         vPos.emplace_back(tx->GetId(), pos);
         pos.nTxOffset += ::GetSerializeSize(*tx, CLIENT_VERSION);
     }
     return m_db->WriteTxs(vPos);
 }
 
 BaseIndex::DB &TxIndex::GetDB() const {
     return *m_db;
 }
 
 bool TxIndex::FindTx(const TxId &txid, BlockHash &block_hash,
                      CTransactionRef &tx) const {
     CDiskTxPos postx;
     if (!m_db->ReadTxPos(txid, postx)) {
         return false;
     }
 
     CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION);
     if (file.IsNull()) {
         return error("%s: OpenBlockFile failed", __func__);
     }
     CBlockHeader header;
     try {
         file >> header;
         if (fseek(file.Get(), postx.nTxOffset, SEEK_CUR)) {
             return error("%s: fseek(...) failed", __func__);
         }
         file >> tx;
     } catch (const std::exception &e) {
         return error("%s: Deserialize or I/O error - %s", __func__, e.what());
     }
     if (tx->GetId() != txid) {
         return error("%s: txid mismatch", __func__);
     }
     block_hash = header.GetHash();
     return true;
 }
diff --git a/src/init.cpp b/src/init.cpp
index 5bd3e144b..b1c6663f8 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1,2798 +1,2799 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2018 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #if defined(HAVE_CONFIG_H)
 #include <config/bitcoin-config.h>
 #endif
 
 #include <init.h>
 
 #include <addrman.h>
 #include <amount.h>
 #include <avalanche/processor.h>
 #include <banman.h>
+#include <blockdb.h>
 #include <blockfilter.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <checkpoints.h>
 #include <coins.h>
 #include <compat/sanity.h>
 #include <config.h>
 #include <consensus/validation.h>
 #include <flatfile.h>
 #include <fs.h>
 #include <httprpc.h>
 #include <httpserver.h>
 #include <index/blockfilterindex.h>
 #include <index/txindex.h>
 #include <interfaces/chain.h>
 #include <key.h>
 #include <miner.h>
 #include <net.h>
 #include <net_permissions.h>
 #include <net_processing.h>
 #include <netbase.h>
 #include <node/context.h>
 #include <policy/mempool.h>
 #include <policy/policy.h>
 #include <policy/settings.h>
 #include <rpc/blockchain.h>
 #include <rpc/register.h>
 #include <rpc/server.h>
 #include <rpc/util.h>
 #include <scheduler.h>
 #include <script/scriptcache.h>
 #include <script/sigcache.h>
 #include <script/standard.h>
 #include <shutdown.h>
 #include <timedata.h>
 #include <torcontrol.h>
 #include <txdb.h>
 #include <txmempool.h>
 #include <ui_interface.h>
 #include <util/moneystr.h>
 #include <util/threadnames.h>
 #include <util/translation.h>
 #include <util/validation.h>
 #include <validation.h>
 #include <validationinterface.h>
 #include <walletinitinterface.h>
 
 #include <boost/algorithm/string/classification.hpp>
 #include <boost/algorithm/string/replace.hpp>
 #include <boost/algorithm/string/split.hpp>
 #include <boost/thread.hpp>
 
 #if ENABLE_ZMQ
 #include <zmq/zmqabstractnotifier.h>
 #include <zmq/zmqnotificationinterface.h>
 #include <zmq/zmqrpc.h>
 #endif
 
 #ifndef WIN32
 #include <attributes.h>
 #include <cerrno>
 #include <csignal>
 #include <sys/stat.h>
 #endif
 #include <cstdint>
 #include <cstdio>
 
 static const bool DEFAULT_PROXYRANDOMIZE = true;
 static const bool DEFAULT_REST_ENABLE = false;
 static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false;
 
 #ifdef WIN32
 // Win32 LevelDB doesn't use filedescriptors, and the ones used for accessing
 // block files don't count towards the fd_set size limit anyway.
 #define MIN_CORE_FILEDESCRIPTORS 0
 #else
 #define MIN_CORE_FILEDESCRIPTORS 150
 #endif
 
 /**
  * The PID file facilities.
  */
 static const char *BITCOIN_PID_FILENAME = "bitcoind.pid";
 
 static fs::path GetPidFile() {
     return AbsPathForConfigVal(
         fs::path(gArgs.GetArg("-pid", BITCOIN_PID_FILENAME)));
 }
 
 NODISCARD static bool CreatePidFile() {
     fsbridge::ofstream file{GetPidFile()};
     if (file) {
 #ifdef WIN32
         tfm::format(file, "%d\n", GetCurrentProcessId());
 #else
         tfm::format(file, "%d\n", getpid());
 #endif
         return true;
     } else {
         return InitError(
             strprintf(_("Unable to create the PID file '%s': %s").translated,
                       GetPidFile().string(), std::strerror(errno)));
     }
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // Shutdown
 //
 
 //
 // Thread management and startup/shutdown:
 //
 // The network-processing threads are all part of a thread group created by
 // AppInit() or the Qt main() function.
 //
 // A clean exit happens when StartShutdown() or the SIGTERM signal handler sets
 // fRequestShutdown, which makes main thread's WaitForShutdown() interrupts the
 // thread group.
 // And then, WaitForShutdown() makes all other on-going threads in the thread
 // group join the main thread.
 // Shutdown() is then called to clean up database connections, and stop other
 // threads that should only be stopped after the main network-processing threads
 // have exited.
 //
 // Shutdown for Qt is very similar, only it uses a QTimer to detect
 // ShutdownRequested() getting set, and then does the normal Qt shutdown thing.
 //
 
 static std::unique_ptr<CCoinsViewErrorCatcher> pcoinscatcher;
 static std::unique_ptr<ECCVerifyHandle> globalVerifyHandle;
 
 static boost::thread_group threadGroup;
 
 void Interrupt(NodeContext &node) {
     InterruptHTTPServer();
     InterruptHTTPRPC();
     InterruptRPC();
     InterruptREST();
     InterruptTorControl();
     InterruptMapPort();
     if (g_avalanche) {
         // Avalanche needs to be stopped before we interrupt the thread group as
         // the scheduler will stop working then.
         g_avalanche->stopEventLoop();
     }
     if (node.connman) {
         node.connman->Interrupt();
     }
     if (g_txindex) {
         g_txindex->Interrupt();
     }
     ForEachBlockFilterIndex([](BlockFilterIndex &index) { index.Interrupt(); });
 }
 
 void Shutdown(NodeContext &node) {
     LogPrintf("%s: In progress...\n", __func__);
     static RecursiveMutex cs_Shutdown;
     TRY_LOCK(cs_Shutdown, lockShutdown);
     if (!lockShutdown) {
         return;
     }
 
     /// Note: Shutdown() must be able to handle cases in which initialization
     /// failed part of the way, for example if the data directory was found to
     /// be locked. Be sure that anything that writes files or flushes caches
     /// only does this if the respective module was initialized.
     util::ThreadRename("shutoff");
     g_mempool.AddTransactionsUpdated(1);
 
     StopHTTPRPC();
     StopREST();
     StopRPC();
     StopHTTPServer();
     for (const auto &client : node.chain_clients) {
         client->flush();
     }
     StopMapPort();
 
     // Because avalanche and the network depend on each other, it is important
     // to shut them down in this order:
     // 1. Stop avalanche event loop.
     // 2. Shutdown network processing.
     // 3. Destroy avalanche::Processor.
     // 4. Destroy CConnman
     if (g_avalanche) {
         g_avalanche->stopEventLoop();
     }
 
     // Because these depend on each-other, we make sure that neither can be
     // using the other before destroying them.
     if (node.peer_logic) {
         UnregisterValidationInterface(node.peer_logic.get());
     }
     if (node.connman) {
         node.connman->Stop();
     }
     if (g_txindex) {
         g_txindex->Stop();
     }
     ForEachBlockFilterIndex([](BlockFilterIndex &index) { index.Stop(); });
 
     StopTorControl();
 
     // After everything has been shut down, but before things get flushed, stop
     // the CScheduler/checkqueue threadGroup
     if (node.scheduler) {
         node.scheduler->stop();
     }
     threadGroup.interrupt_all();
     threadGroup.join_all();
 
     // After the threads that potentially access these pointers have been
     // stopped, destruct and reset all to nullptr.
     node.peer_logic.reset();
 
     // Destroy various global instances
     g_avalanche.reset();
     node.connman.reset();
     node.banman.reset();
     g_txindex.reset();
     DestroyAllBlockFilterIndexes();
 
     if (::g_mempool.IsLoaded() &&
         gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
         DumpMempool(::g_mempool);
     }
 
     // FlushStateToDisk generates a ChainStateFlushed callback, which we should
     // avoid missing
     if (pcoinsTip != nullptr) {
         ::ChainstateActive().ForceFlushStateToDisk();
     }
 
     // After there are no more peers/RPC left to give us new data which may
     // generate CValidationInterface callbacks, flush them...
     GetMainSignals().FlushBackgroundCallbacks();
 
     // Any future callbacks will be dropped. This should absolutely be safe - if
     // missing a callback results in an unrecoverable situation, unclean
     // shutdown would too. The only reason to do the above flushes is to let the
     // wallet catch up with our current chain to avoid any strange pruning edge
     // cases and make next startup faster by avoiding rescan.
 
     {
         LOCK(cs_main);
         if (pcoinsTip != nullptr) {
             ::ChainstateActive().ForceFlushStateToDisk();
         }
         pcoinsTip.reset();
         pcoinscatcher.reset();
         pcoinsdbview.reset();
         pblocktree.reset();
     }
     for (const auto &client : node.chain_clients) {
         client->stop();
     }
 
 #if ENABLE_ZMQ
     if (g_zmq_notification_interface) {
         UnregisterValidationInterface(g_zmq_notification_interface);
         delete g_zmq_notification_interface;
         g_zmq_notification_interface = nullptr;
     }
 #endif
 
     try {
         if (!fs::remove(GetPidFile())) {
             LogPrintf("%s: Unable to remove PID file: File does not exist\n",
                       __func__);
         }
     } catch (const fs::filesystem_error &e) {
         LogPrintf("%s: Unable to remove PID file: %s\n", __func__,
                   fsbridge::get_filesystem_error_message(e));
     }
     node.chain_clients.clear();
     UnregisterAllValidationInterfaces();
     GetMainSignals().UnregisterBackgroundSignalScheduler();
     globalVerifyHandle.reset();
     ECC_Stop();
     if (node.mempool) {
         node.mempool = nullptr;
     }
     node.scheduler.reset();
     LogPrintf("%s: done\n", __func__);
 }
 
 /**
  * Signal handlers are very limited in what they are allowed to do.
  * The execution context the handler is invoked in is not guaranteed,
  * so we restrict handler operations to just touching variables:
  */
 #ifndef WIN32
 static void HandleSIGTERM(int) {
     StartShutdown();
 }
 
 static void HandleSIGHUP(int) {
     LogInstance().m_reopen_file = true;
 }
 #else
 static BOOL WINAPI consoleCtrlHandler(DWORD dwCtrlType) {
     StartShutdown();
     Sleep(INFINITE);
     return true;
 }
 #endif
 
 #ifndef WIN32
 static void registerSignalHandler(int signal, void (*handler)(int)) {
     struct sigaction sa;
     sa.sa_handler = handler;
     sigemptyset(&sa.sa_mask);
     sa.sa_flags = 0;
     sigaction(signal, &sa, NULL);
 }
 #endif
 
 static boost::signals2::connection rpc_notify_block_change_connection;
 static void OnRPCStarted() {
     rpc_notify_block_change_connection =
         uiInterface.NotifyBlockTip_connect(&RPCNotifyBlockChange);
 }
 
 static void OnRPCStopped() {
     rpc_notify_block_change_connection.disconnect();
     RPCNotifyBlockChange(false, nullptr);
     g_best_block_cv.notify_all();
     LogPrint(BCLog::RPC, "RPC stopped.\n");
 }
 
 void SetupServerArgs() {
     const auto defaultBaseParams =
         CreateBaseChainParams(CBaseChainParams::MAIN);
     const auto testnetBaseParams =
         CreateBaseChainParams(CBaseChainParams::TESTNET);
     const auto regtestBaseParams =
         CreateBaseChainParams(CBaseChainParams::REGTEST);
     const auto defaultChainParams = CreateChainParams(CBaseChainParams::MAIN);
     const auto testnetChainParams =
         CreateChainParams(CBaseChainParams::TESTNET);
     const auto regtestChainParams =
         CreateChainParams(CBaseChainParams::REGTEST);
 
     // Hidden Options
     std::vector<std::string> hidden_args = {
         "-h", "-help", "-dbcrashratio", "-forcecompactdb", "-parkdeepreorg",
         "-automaticunparking", "-replayprotectionactivationtime",
         "-enableminerfund",
         // GUI args. These will be overwritten by SetupUIArgs for the GUI
         "-allowselfsignedrootcertificates", "-choosedatadir", "-lang=<lang>",
         "-min", "-resetguisettings", "-rootcertificates=<file>", "-splash",
         "-uiplatform",
         // TODO remove after the November 2020 upgrade
         "-axionactivationtime"};
 
     // Set all of the args and their help
     // When adding new options to the categories, please keep and ensure
     // alphabetical ordering. Do not translate _(...) -help-debug options, Many
     // technical terms, and only a very small audience, so is unnecessary stress
     // to translators.
     gArgs.AddArg("-?", "Print this help message and exit",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY,
                  OptionsCategory::OPTIONS);
 #if defined(HAVE_SYSTEM)
     gArgs.AddArg("-alertnotify=<cmd>",
                  "Execute command when a relevant alert is received or we see "
                  "a really long fork (%s in cmd is replaced by message)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #endif
     gArgs.AddArg(
         "-assumevalid=<hex>",
         strprintf(
             "If this block is in the chain assume that it and its ancestors "
             "are valid and potentially skip their script verification (0 to "
             "verify all, default: %s, testnet: %s)",
             defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(),
             testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-blocksdir=<dir>",
                  "Specify directory to hold blocks subdirectory for *.dat "
                  "files (default: <datadir>)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #if defined(HAVE_SYSTEM)
     gArgs.AddArg("-blocknotify=<cmd>",
                  "Execute command when the best block changes (%s in cmd is "
                  "replaced by block hash)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #endif
     gArgs.AddArg("-blockreconstructionextratxn=<n>",
                  strprintf("Extra transactions to keep in memory for compact "
                            "block reconstructions (default: %u)",
                            DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-blocksonly",
         strprintf(
             "Whether to reject transactions from network peers. Transactions "
             "from the wallet or RPC are not affected. (default: %u)",
             DEFAULT_BLOCKSONLY),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-conf=<file>",
                  strprintf("Specify configuration file. Relative paths will be "
                            "prefixed by datadir location. (default: %s)",
                            BITCOIN_CONF_FILENAME),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-datadir=<dir>", "Specify data directory",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-dbbatchsize",
         strprintf("Maximum database write batch size in bytes (default: %u)",
                   nDefaultDbBatchSize),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-dbcache=<n>",
         strprintf("Set database cache size in MiB (%d to %d, default: %d)",
                   nMinDbCache, nMaxDbCache, nDefaultDbCache),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-debuglogfile=<file>",
                  strprintf("Specify location of debug log file. Relative paths "
                            "will be prefixed by a net-specific datadir "
                            "location. (0 to disable; default: %s)",
                            DEFAULT_DEBUGLOGFILE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-feefilter",
                  strprintf("Tell other nodes to filter invs to us by our "
                            "mempool min fee (default: %d)",
                            DEFAULT_FEEFILTER),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::OPTIONS);
     gArgs.AddArg("-finalizationdelay=<n>",
                  strprintf("Set the minimum amount of time to wait between a "
                            "block header reception and the block finalization. "
                            "Unit is seconds (default: %d)",
                            DEFAULT_MIN_FINALIZATION_DELAY),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-includeconf=<file>",
         "Specify additional configuration file, relative to the -datadir path "
         "(only useable from configuration file, not command line)",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-maxreorgdepth=<n>",
                  strprintf("Configure at what depth blocks are considered "
                            "final (default: %d). Use -1 to disable.",
                            DEFAULT_MAX_REORG_DEPTH),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-loadblock=<file>",
                  "Imports blocks from external blk000??.dat file on startup",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-maxmempool=<n>",
                  strprintf("Keep the transaction memory pool below <n> "
                            "megabytes (default: %u)",
                            DEFAULT_MAX_MEMPOOL_SIZE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-maxorphantx=<n>",
                  strprintf("Keep at most <n> unconnectable transactions in "
                            "memory (default: %u)",
                            DEFAULT_MAX_ORPHAN_TRANSACTIONS),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-mempoolexpiry=<n>",
                  strprintf("Do not keep transactions in the mempool longer "
                            "than <n> hours (default: %u)",
                            DEFAULT_MEMPOOL_EXPIRY),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-minimumchainwork=<hex>",
         strprintf(
             "Minimum work assumed to exist on a valid chain in hex "
             "(default: %s, testnet: %s)",
             defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(),
             testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-par=<n>",
         strprintf("Set the number of script verification threads (%u to %d, 0 "
                   "= auto, <0 = leave that many cores free, default: %d)",
                   -GetNumCores(), MAX_SCRIPTCHECK_THREADS,
                   DEFAULT_SCRIPTCHECK_THREADS),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-persistmempool",
                  strprintf("Whether to save the mempool on shutdown and load "
                            "on restart (default: %u)",
                            DEFAULT_PERSIST_MEMPOOL),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-pid=<file>",
                  strprintf("Specify pid file. Relative paths will be prefixed "
                            "by a net-specific datadir location. (default: %s)",
                            BITCOIN_PID_FILENAME),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-prune=<n>",
         strprintf("Reduce storage requirements by enabling pruning (deleting) "
                   "of old blocks. This allows the pruneblockchain RPC to be "
                   "called to delete specific blocks, and enables automatic "
                   "pruning of old blocks if a target size in MiB is provided. "
                   "This mode is incompatible with -txindex and -rescan. "
                   "Warning: Reverting this setting requires re-downloading the "
                   "entire blockchain. (default: 0 = disable pruning blocks, 1 "
                   "= allow manual pruning via RPC, >=%u = automatically prune "
                   "block files to stay under the specified target size in MiB)",
                   MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-reindex-chainstate",
                  "Rebuild chain state from the currently indexed blocks. When "
                  "in pruning mode or if blocks on disk might be corrupted, use "
                  "full -reindex instead.",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-reindex",
         "Rebuild chain state and block index from the blk*.dat files on disk",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #ifndef WIN32
     gArgs.AddArg(
         "-sysperms",
         "Create new files with system default permissions, instead of umask "
         "077 (only effective with disabled wallet functionality)",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #else
     hidden_args.emplace_back("-sysperms");
 #endif
     gArgs.AddArg("-txindex",
                  strprintf("Maintain a full transaction index, used by the "
                            "getrawtransaction rpc call (default: %d)",
                            DEFAULT_TXINDEX),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-blockfilterindex=<type>",
                  strprintf("Maintain an index of compact filters by block "
                            "(default: %s, values: %s).",
                            DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) +
                      " If <type> is not supplied or if <type> = 1, indexes for "
                      "all known types are enabled.",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-usecashaddr",
                  "Use Cash Address for destination encoding instead of base58 "
                  "(activate by default on Jan, 14)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 
     gArgs.AddArg("-addnode=<ip>",
                  "Add a node to connect to and attempt to keep the connection "
                  "open (see the `addnode` RPC command help for more info)",
                  ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                  OptionsCategory::CONNECTION);
     gArgs.AddArg("-banscore=<n>",
                  strprintf("Threshold for disconnecting and discouraging "
                            "misbehaving peers (default: %u)",
                            DEFAULT_BANSCORE_THRESHOLD),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-bantime=<n>",
                  strprintf("Default duration (in seconds) of manually "
                            "configured bans (default: %u)",
                            DEFAULT_MISBEHAVING_BANTIME),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-bind=<addr>",
                  "Bind to given address and always listen on it. Use "
                  "[host]:port notation for IPv6",
                  ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                  OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-connect=<ip>",
         "Connect only to the specified node(s); -connect=0 disables automatic "
         "connections (the rules for this peer are the same as for -addnode)",
         ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
         OptionsCategory::CONNECTION);
     gArgs.AddArg("-discover",
                  "Discover own IP addresses (default: 1 when listening and no "
                  "-externalip or -proxy)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-dns",
                  strprintf("Allow DNS lookups for -addnode, -seednode and "
                            "-connect (default: %d)",
                            DEFAULT_NAME_LOOKUP),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-dnsseed",
                  "Query for peer addresses via DNS lookup, if low on addresses "
                  "(default: 1 unless -connect used)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-enablebip61",
                  strprintf("Send reject messages per BIP61 (default: %u)",
                            DEFAULT_ENABLE_BIP61),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 
     gArgs.AddArg("-externalip=<ip>", "Specify your own public address",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-forcednsseed",
         strprintf(
             "Always query for peer addresses via DNS lookup (default: %d)",
             DEFAULT_FORCEDNSSEED),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-listen",
         "Accept connections from outside (default: 1 if no -proxy or -connect)",
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-listenonion",
         strprintf("Automatically create Tor hidden service (default: %d)",
                   DEFAULT_LISTEN_ONION),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-maxconnections=<n>",
         strprintf("Maintain at most <n> connections to peers (default: %u)",
                   DEFAULT_MAX_PEER_CONNECTIONS),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-maxreceivebuffer=<n>",
                  strprintf("Maximum per-connection receive buffer, <n>*1000 "
                            "bytes (default: %u)",
                            DEFAULT_MAXRECEIVEBUFFER),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-maxsendbuffer=<n>",
         strprintf(
             "Maximum per-connection send buffer, <n>*1000 bytes (default: %u)",
             DEFAULT_MAXSENDBUFFER),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-maxtimeadjustment",
         strprintf("Maximum allowed median peer time offset adjustment. Local "
                   "perspective of time may be influenced by peers forward or "
                   "backward by this amount. (default: %u seconds)",
                   DEFAULT_MAX_TIME_ADJUSTMENT),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-onion=<ip:port>",
                  strprintf("Use separate SOCKS5 proxy to reach peers via Tor "
                            "hidden services (default: %s)",
                            "-proxy"),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-onlynet=<net>",
         "Make outgoing connections only through network <net> (ipv4, ipv6 or "
         "onion). Incoming connections are not affected by this option. This "
         "option can be specified multiple times to allow multiple networks.",
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-peerbloomfilters",
                  strprintf("Support filtering of blocks and transaction with "
                            "bloom filters (default: %d)",
                            DEFAULT_PEERBLOOMFILTERS),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-permitbaremultisig",
                  strprintf("Relay non-P2SH multisig (default: %d)",
                            DEFAULT_PERMIT_BAREMULTISIG),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-port=<port>",
                  strprintf("Listen for connections on <port> (default: %u, "
                            "testnet: %u, regtest: %u)",
                            defaultChainParams->GetDefaultPort(),
                            testnetChainParams->GetDefaultPort(),
                            regtestChainParams->GetDefaultPort()),
                  ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                  OptionsCategory::CONNECTION);
     gArgs.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-proxyrandomize",
                  strprintf("Randomize credentials for every proxy connection. "
                            "This enables Tor stream isolation (default: %d)",
                            DEFAULT_PROXYRANDOMIZE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-seednode=<ip>",
                  "Connect to a node to retrieve peer addresses, and disconnect",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-timeout=<n>",
                  strprintf("Specify connection timeout in milliseconds "
                            "(minimum: 1, default: %d)",
                            DEFAULT_CONNECT_TIMEOUT),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-peertimeout=<n>",
         strprintf("Specify p2p connection timeout in seconds. This option "
                   "determines the amount of time a peer may be inactive before "
                   "the connection to it is dropped. (minimum: 1, default: %d)",
                   DEFAULT_PEER_CONNECT_TIMEOUT),
         true, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-torcontrol=<ip>:<port>",
         strprintf(
             "Tor control port to use if onion listening enabled (default: %s)",
             DEFAULT_TOR_CONTROL),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-torpassword=<pass>",
                  "Tor control port password (default: empty)",
                  ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                  OptionsCategory::CONNECTION);
 #ifdef USE_UPNP
 #if USE_UPNP
     gArgs.AddArg("-upnp",
                  "Use UPnP to map the listening port (default: 1 when "
                  "listening and no -proxy)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 #else
     gArgs.AddArg(
         "-upnp",
         strprintf("Use UPnP to map the listening port (default: %u)", 0),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 #endif
 #else
     hidden_args.emplace_back("-upnp");
 #endif
     gArgs.AddArg("-whitebind=<addr>",
                  "Bind to given address and whitelist peers connecting to it. "
                  "Use [host]:port notation for IPv6",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-whitelist=<IP address or network>",
                  "Whitelist peers connecting from the given IP address (e.g. "
                  "1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be "
                  "specified multiple times. "
                  "Whitelisted peers cannot be DoS banned and their "
                  "transactions are always relayed, even if they are already in "
                  "the mempool, useful e.g. for a gateway",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-maxuploadtarget=<n>",
         strprintf("Tries to keep outbound traffic under the given target (in "
                   "MiB per 24h), 0 = no limit (default: %d)",
                   DEFAULT_MAX_UPLOAD_TARGET),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 
     g_wallet_init_interface.AddWalletOptions();
 
 #if ENABLE_ZMQ
     gArgs.AddArg("-zmqpubhashblock=<address>",
                  "Enable publish hash block in <address>",
                  ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubhashtx=<address>",
                  "Enable publish hash transaction in <address>",
                  ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubrawblock=<address>",
                  "Enable publish raw block in <address>",
                  ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubrawtx=<address>",
                  "Enable publish raw transaction in <address>",
                  ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubhashblockhwm=<n>",
                  strprintf("Set publish hash block outbound message high water "
                            "mark (default: %d)",
                            CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM),
                  false, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubhashtxhwm=<n>",
                  strprintf("Set publish hash transaction outbound message high "
                            "water mark (default: %d)",
                            CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM),
                  false, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubrawblockhwm=<n>",
                  strprintf("Set publish raw block outbound message high water "
                            "mark (default: %d)",
                            CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM),
                  false, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubrawtxhwm=<n>",
                  strprintf("Set publish raw transaction outbound message high "
                            "water mark (default: %d)",
                            CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM),
                  false, OptionsCategory::ZMQ);
 #else
     hidden_args.emplace_back("-zmqpubhashblock=<address>");
     hidden_args.emplace_back("-zmqpubhashtx=<address>");
     hidden_args.emplace_back("-zmqpubrawblock=<address>");
     hidden_args.emplace_back("-zmqpubrawtx=<address>");
     hidden_args.emplace_back("-zmqpubhashblockhwm=<n>");
     hidden_args.emplace_back("-zmqpubhashtxhwm=<n>");
     hidden_args.emplace_back("-zmqpubrawblockhwm=<n>");
     hidden_args.emplace_back("-zmqpubrawtxhwm=<n>");
 #endif
 
     gArgs.AddArg(
         "-checkblocks=<n>",
         strprintf("How many blocks to check at startup (default: %u, 0 = all)",
                   DEFAULT_CHECKBLOCKS),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-checklevel=<n>",
         strprintf("How thorough the block verification of "
                   "-checkblocks is: "
                   "level 0 reads the blocks from disk, "
                   "level 1 verifies block validity, "
                   "level 2 verifies undo data, "
                   "level 3 checks disconnection of tip blocks, "
                   "and level 4 tries to reconnect the blocks. "
                   "Each level includes the checks of the previous levels "
                   "(0-4, default: %u)",
                   DEFAULT_CHECKLEVEL),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-checkblockindex",
         strprintf("Do a full consistency check for the block tree, "
                   "setBlockIndexCandidates, ::ChainActive() and "
                   "mapBlocksUnlinked occasionally. (default: %u, regtest: %u)",
                   defaultChainParams->DefaultConsistencyChecks(),
                   regtestChainParams->DefaultConsistencyChecks()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-checkmempool=<n>",
         strprintf(
             "Run checks every <n> transactions (default: %u, regtest: %u)",
             defaultChainParams->DefaultConsistencyChecks(),
             regtestChainParams->DefaultConsistencyChecks()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-checkpoints",
                  strprintf("Only accept block chain matching built-in "
                            "checkpoints (default: %d)",
                            DEFAULT_CHECKPOINTS_ENABLED),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-deprecatedrpc=<method>",
                  "Allows deprecated RPC method(s) to be used",
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-dropmessagestest=<n>",
                  "Randomly drop 1 of every <n> network messages",
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-stopafterblockimport",
         strprintf("Stop running after importing blocks from disk (default: %d)",
                   DEFAULT_STOPAFTERBLOCKIMPORT),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-stopatheight",
                  strprintf("Stop running after reaching the given height in "
                            "the main chain (default: %u)",
                            DEFAULT_STOPATHEIGHT),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-limitancestorcount=<n>",
                  strprintf("Do not accept transactions if number of in-mempool "
                            "ancestors is <n> or more (default: %u)",
                            DEFAULT_ANCESTOR_LIMIT),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-limitancestorsize=<n>",
         strprintf("Do not accept transactions whose size with all in-mempool "
                   "ancestors exceeds <n> kilobytes (default: %u)",
                   DEFAULT_ANCESTOR_SIZE_LIMIT),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-limitdescendantcount=<n>",
         strprintf("Do not accept transactions if any ancestor would have <n> "
                   "or more in-mempool descendants (default: %u)",
                   DEFAULT_DESCENDANT_LIMIT),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-limitdescendantsize=<n>",
         strprintf("Do not accept transactions if any ancestor would have more "
                   "than <n> kilobytes of in-mempool descendants (default: %u).",
                   DEFAULT_DESCENDANT_SIZE_LIMIT),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-addrmantest", "Allows to test address relay on localhost",
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
 
     gArgs.AddArg("-debug=<category>",
                  strprintf("Output debugging information (default: %u, "
                            "supplying <category> is optional)",
                            0) +
                      ". " +
                      "If <category> is not supplied or if <category> = 1, "
                      "output all debugging information."
                      "<category> can be: " +
                      ListLogCategories() + ".",
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-debugexclude=<category>",
         strprintf("Exclude debugging information for a category. Can be used "
                   "in conjunction with -debug=1 to output debug logs for all "
                   "categories except one or more specified categories."),
         ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-help-debug",
                  "Print help message with debugging options and exit",
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-logips",
                  strprintf("Include IP addresses in debug output (default: %d)",
                            DEFAULT_LOGIPS),
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-logtimestamps",
                  strprintf("Prepend debug output with timestamp (default: %d)",
                            DEFAULT_LOGTIMESTAMPS),
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-logthreadnames",
         strprintf(
             "Prepend debug output with name of the originating thread (only "
             "available on platforms supporting thread_local) (default: %u)",
             DEFAULT_LOGTHREADNAMES),
         ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-logtimemicros",
         strprintf("Add microsecond precision to debug timestamps (default: %d)",
                   DEFAULT_LOGTIMEMICROS),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-mocktime=<n>",
                  "Replace actual time with " + UNIX_EPOCH_TIME +
                      " (default: 0)",
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-maxsigcachesize=<n>",
         strprintf("Limit size of signature cache to <n> MiB (default: %u)",
                   DEFAULT_MAX_SIG_CACHE_SIZE),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-maxscriptcachesize=<n>",
         strprintf("Limit size of script cache to <n> MiB (default: %u)",
                   DEFAULT_MAX_SCRIPT_CACHE_SIZE),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-maxtipage=<n>",
                  strprintf("Maximum tip age in seconds to consider node in "
                            "initial block download (default: %u)",
                            DEFAULT_MAX_TIP_AGE),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
 
     gArgs.AddArg(
         "-printtoconsole",
         "Send trace/debug info to console instead of debug.log file (default: "
         "1 when no -daemon. To disable logging to file, set debuglogfile=0)",
         ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-printpriority",
                  strprintf("Log transaction priority and fee per kB when "
                            "mining blocks (default: %d)",
                            DEFAULT_PRINTPRIORITY),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-shrinkdebugfile",
         "Shrink debug.log file on client startup (default: 1 when no -debug)",
         ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
 
     gArgs.AddArg("-uacomment=<cmt>", "Append comment to the user agent string",
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
 
     SetupChainParamsBaseOptions();
 
     gArgs.AddArg(
         "-acceptnonstdtxn",
         strprintf(
             "Relay and mine \"non-standard\" transactions (%sdefault: %u)",
             "testnet/regtest only; ", defaultChainParams->RequireStandard()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::NODE_RELAY);
     gArgs.AddArg("-excessiveblocksize=<n>",
                  strprintf("Do not accept blocks larger than this limit, in "
                            "bytes (default: %d)",
                            DEFAULT_MAX_BLOCK_SIZE),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-dustrelayfee=<amt>",
         strprintf("Fee rate (in %s/kB) used to defined dust, the value of an "
                   "output such that it will cost about 1/3 of its value in "
                   "fees at this fee rate to spend it. (default: %s)",
                   CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::NODE_RELAY);
 
     gArgs.AddArg("-bytespersigop",
                  strprintf("Equivalent bytes per sigop in transactions for "
                            "relay and mining (default: %u)",
                            DEFAULT_BYTES_PER_SIGOP),
                  ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-datacarrier",
         strprintf("Relay and mine data carrier transactions (default: %d)",
                   DEFAULT_ACCEPT_DATACARRIER),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg("-datacarriersize",
                  strprintf("Maximum size of data in data carrier transactions "
                            "we relay and mine (default: %u)",
                            MAX_OP_RETURN_RELAY),
                  ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-minrelaytxfee=<amt>",
         strprintf("Fees (in %s/kB) smaller than this are rejected for "
                   "relaying, mining and transaction creation (default: %s)",
                   CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE_PER_KB)),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-whitelistrelay",
         strprintf("Accept relayed transactions received from whitelisted "
                   "peers even when not relaying transactions (default: %d)",
                   DEFAULT_WHITELISTRELAY),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-whitelistforcerelay",
         strprintf("Force relay of transactions from whitelisted peers even if "
                   "they violate local relay policy (default: %d)",
                   DEFAULT_WHITELISTFORCERELAY),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
 
     // Not sure this really belongs here, but it will do for now.
     // FIXME: This doesn't work anyways.
     gArgs.AddArg("-excessutxocharge=<amt>",
                  strprintf("Fees (in %s/kB) to charge per utxo created for "
                            "relaying, and mining (default: %s)",
                            CURRENCY_UNIT, FormatMoney(DEFAULT_UTXO_FEE)),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::NODE_RELAY);
 
     gArgs.AddArg("-blockmaxsize=<n>",
                  strprintf("Set maximum block size in bytes (default: %d)",
                            DEFAULT_MAX_GENERATED_BLOCK_SIZE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
     gArgs.AddArg("-blockmintxfee=<amt>",
                  strprintf("Set lowest fee rate (in %s/kB) for transactions to "
                            "be included in block creation. (default: %s)",
                            CURRENCY_UNIT,
                            FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE_PER_KB)),
                  ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
 
     gArgs.AddArg("-blockversion=<n>",
                  "Override block version to test forking scenarios",
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::BLOCK_CREATION);
 
     gArgs.AddArg("-server", "Accept command line and JSON-RPC commands",
                  ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg("-rest",
                  strprintf("Accept public REST requests (default: %d)",
                            DEFAULT_REST_ENABLE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcbind=<addr>[:port]",
         "Bind to given address to listen for JSON-RPC connections. Do not "
         "expose the RPC server to untrusted networks such as the public "
         "internet! This option is ignored unless -rpcallowip is also passed. "
         "Port is optional and overrides -rpcport.  Use [host]:port notation "
         "for IPv6. This option can be specified multiple times (default: "
         "127.0.0.1 and ::1 i.e., localhost)",
         ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY |
             ArgsManager::SENSITIVE,
         OptionsCategory::RPC);
     gArgs.AddArg("-rpccookiefile=<loc>",
                  "Location of the auth cookie. Relative paths will be prefixed "
                  "by a net-specific datadir location. (default: data dir)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections",
                  ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                  OptionsCategory::RPC);
     gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections",
                  ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                  OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcwhitelist=<whitelist>",
         "Set a whitelist to filter incoming RPC calls for a specific user. The "
         "field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc "
         "2>,...,<rpc n>. If multiple whitelists are set for a given user, they "
         "are set-intersected. See -rpcwhitelistdefault documentation for "
         "information on default whitelist behavior.",
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcwhitelistdefault",
         "Sets default behavior for rpc whitelisting. Unless "
         "rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc "
         "server acts as if all rpc users are subject to "
         "empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault "
         "is set to 1 and no -rpcwhitelist is set, rpc server acts as if all "
         "rpc users are subject to empty whitelists.",
         ArgsManager::ALLOW_BOOL, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcauth=<userpw>",
         "Username and hashed password for JSON-RPC connections. The field "
         "<userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical "
         "python script is included in share/rpcauth. The client then connects "
         "normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of "
         "arguments. This option can be specified multiple times",
         ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
     gArgs.AddArg("-rpcport=<port>",
                  strprintf("Listen for JSON-RPC connections on <port> "
                            "(default: %u, testnet: %u, regtest: %u)",
                            defaultBaseParams->RPCPort(),
                            testnetBaseParams->RPCPort(),
                            regtestBaseParams->RPCPort()),
                  ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                  OptionsCategory::RPC);
     gArgs.AddArg("-rpcallowip=<ip>",
                  "Allow JSON-RPC connections from specified source. Valid for "
                  "<ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. "
                  "1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). "
                  "This option can be specified multiple times",
                  ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcthreads=<n>",
         strprintf(
             "Set the number of threads to service RPC calls (default: %d)",
             DEFAULT_HTTP_THREADS),
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpccorsdomain=value",
         "Domain from which to accept cross origin requests (browser enforced)",
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
 
     gArgs.AddArg("-rpcworkqueue=<n>",
                  strprintf("Set the depth of the work queue to service RPC "
                            "calls (default: %d)",
                            DEFAULT_HTTP_WORKQUEUE),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::RPC);
     gArgs.AddArg("-rpcservertimeout=<n>",
                  strprintf("Timeout during HTTP requests (default: %d)",
                            DEFAULT_HTTP_SERVER_TIMEOUT),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::RPC);
 
 #if HAVE_DECL_DAEMON
     gArgs.AddArg("-daemon",
                  "Run in the background as a daemon and accept commands",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #else
     hidden_args.emplace_back("-daemon");
 #endif
 
     // Avalanche options.
     gArgs.AddArg(
         "-enableavalanche",
         strprintf("Enable avalanche (default: %u)", AVALANCHE_DEFAULT_ENABLED),
         ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
     gArgs.AddArg(
         "-avacooldown",
         strprintf("Mandatory cooldown between two avapoll (default: %u)",
                   AVALANCHE_DEFAULT_COOLDOWN),
         ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
 
     // Add the hidden options
     gArgs.AddHiddenArgs(hidden_args);
 }
 
 std::string LicenseInfo() {
     const std::string URL_SOURCE_CODE =
         "<https://github.com/Bitcoin-ABC/bitcoin-abc>";
     const std::string URL_WEBSITE = "<https://www.bitcoinabc.org>";
 
     return CopyrightHolders(strprintf(_("Copyright (C) %i-%i").translated, 2009,
                                       COPYRIGHT_YEAR) +
                             " ") +
            "\n" + "\n" +
            strprintf(_("Please contribute if you find %s useful. "
                        "Visit %s for further information about the software.")
                          .translated,
                      PACKAGE_NAME, URL_WEBSITE) +
            "\n" +
            strprintf(_("The source code is available from %s.").translated,
                      URL_SOURCE_CODE) +
            "\n" + "\n" + _("This is experimental software.").translated + "\n" +
            strprintf(_("Distributed under the MIT software license, see the "
                        "accompanying file %s or %s")
                          .translated,
                      "COPYING", "<https://opensource.org/licenses/MIT>") +
            "\n" + "\n" +
            strprintf(_("This product includes software developed by the "
                        "OpenSSL Project for use in the OpenSSL Toolkit %s and "
                        "cryptographic software written by Eric Young and UPnP "
                        "software written by Thomas Bernard.")
                          .translated,
                      "<https://www.openssl.org>") +
            "\n";
 }
 
 #if defined(HAVE_SYSTEM)
 static void BlockNotifyCallback(bool initialSync,
                                 const CBlockIndex *pBlockIndex) {
     if (initialSync || !pBlockIndex) {
         return;
     }
 
     std::string strCmd = gArgs.GetArg("-blocknotify", "");
     if (!strCmd.empty()) {
         boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex());
         std::thread t(runCommand, strCmd);
         // thread runs free
         t.detach();
     }
 }
 #endif
 
 static bool fHaveGenesis = false;
 static Mutex g_genesis_wait_mutex;
 static std::condition_variable g_genesis_wait_cv;
 
 static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex) {
     if (pBlockIndex != nullptr) {
         {
             LOCK(g_genesis_wait_mutex);
             fHaveGenesis = true;
         }
         g_genesis_wait_cv.notify_all();
     }
 }
 
 struct CImportingNow {
     CImportingNow() {
         assert(fImporting == false);
         fImporting = true;
     }
 
     ~CImportingNow() {
         assert(fImporting == true);
         fImporting = false;
     }
 };
 
 // If we're using -prune with -reindex, then delete block files that will be
 // ignored by the reindex.  Since reindexing works by starting at block file 0
 // and looping until a blockfile is missing, do the same here to delete any
 // later block files after a gap. Also delete all rev files since they'll be
 // rewritten by the reindex anyway. This ensures that vinfoBlockFile is in sync
 // with what's actually on disk by the time we start downloading, so that
 // pruning works correctly.
 static void CleanupBlockRevFiles() {
     std::map<std::string, fs::path> mapBlockFiles;
 
     // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
     // Remove the rev files immediately and insert the blk file paths into an
     // ordered map keyed by block file index.
     LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for "
               "-reindex with -prune\n");
     const auto directoryIterator = fs::directory_iterator{GetBlocksDir()};
     for (const auto &file : directoryIterator) {
         const auto fileName = file.path().filename().string();
         if (fs::is_regular_file(file) && fileName.length() == 12 &&
             fileName.substr(8, 4) == ".dat") {
             if (fileName.substr(0, 3) == "blk") {
                 mapBlockFiles[fileName.substr(3, 5)] = file.path();
             } else if (fileName.substr(0, 3) == "rev") {
                 remove(file.path());
             }
         }
     }
 
     // Remove all block files that aren't part of a contiguous set starting at
     // zero by walking the ordered map (keys are block file indices) by keeping
     // a separate counter. Once we hit a gap (or if 0 doesn't exist) start
     // removing block files.
     int contiguousCounter = 0;
     for (const auto &item : mapBlockFiles) {
         if (atoi(item.first) == contiguousCounter) {
             contiguousCounter++;
             continue;
         }
         remove(item.second);
     }
 }
 
 static void ThreadImport(const Config &config,
                          std::vector<fs::path> vImportFiles) {
     util::ThreadRename("loadblk");
     ScheduleBatchPriority();
 
     {
         const CChainParams &chainParams = config.GetChainParams();
 
         CImportingNow imp;
 
         // -reindex
         if (fReindex) {
             int nFile = 0;
             while (true) {
                 FlatFilePos pos(nFile, 0);
                 if (!fs::exists(GetBlockPosFilename(pos))) {
                     // No block files left to reindex
                     break;
                 }
                 FILE *file = OpenBlockFile(pos, true);
                 if (!file) {
                     // This error is logged in OpenBlockFile
                     break;
                 }
                 LogPrintf("Reindexing block file blk%05u.dat...\n",
                           (unsigned int)nFile);
                 LoadExternalBlockFile(config, file, &pos);
                 nFile++;
             }
             pblocktree->WriteReindexing(false);
             fReindex = false;
             LogPrintf("Reindexing finished\n");
             // To avoid ending up in a situation without genesis block, re-try
             // initializing (no-op if reindexing worked):
             LoadGenesisBlock(chainParams);
         }
 
         // hardcoded $DATADIR/bootstrap.dat
         fs::path pathBootstrap = GetDataDir() / "bootstrap.dat";
         if (fs::exists(pathBootstrap)) {
             FILE *file = fsbridge::fopen(pathBootstrap, "rb");
             if (file) {
                 fs::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old";
                 LogPrintf("Importing bootstrap.dat...\n");
                 LoadExternalBlockFile(config, file);
                 RenameOver(pathBootstrap, pathBootstrapOld);
             } else {
                 LogPrintf("Warning: Could not open bootstrap file %s\n",
                           pathBootstrap.string());
             }
         }
 
         // -loadblock=
         for (const fs::path &path : vImportFiles) {
             FILE *file = fsbridge::fopen(path, "rb");
             if (file) {
                 LogPrintf("Importing blocks file %s...\n", path.string());
                 LoadExternalBlockFile(config, file);
             } else {
                 LogPrintf("Warning: Could not open blocks file %s\n",
                           path.string());
             }
         }
 
         // Reconsider blocks we know are valid. They may have been marked
         // invalid by, for instance, running an outdated version of the node
         // software.
         const MapCheckpoints &checkpoints =
             chainParams.Checkpoints().mapCheckpoints;
         for (const MapCheckpoints::value_type &i : checkpoints) {
             const BlockHash &hash = i.second;
 
             LOCK(cs_main);
             CBlockIndex *pblockindex = LookupBlockIndex(hash);
             if (pblockindex && !pblockindex->nStatus.isValid()) {
                 LogPrintf("Reconsidering checkpointed block %s ...\n",
                           hash.GetHex());
                 ResetBlockFailureFlags(pblockindex);
             }
         }
 
         // scan for better chains in the block chain database, that are not yet
         // connected in the active best chain
         BlockValidationState state;
         if (!ActivateBestChain(config, state)) {
             LogPrintf("Failed to connect best block (%s)\n",
                       FormatStateMessage(state));
             StartShutdown();
             return;
         }
 
         if (gArgs.GetBoolArg("-stopafterblockimport",
                              DEFAULT_STOPAFTERBLOCKIMPORT)) {
             LogPrintf("Stopping after block import\n");
             StartShutdown();
             return;
         }
     } // End scope of CImportingNow
     if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
         LoadMempool(config, ::g_mempool);
     }
     ::g_mempool.SetIsLoaded(!ShutdownRequested());
 }
 
 /** Sanity checks
  *  Ensure that Bitcoin is running in a usable environment with all
  *  necessary library support.
  */
 static bool InitSanityCheck() {
     if (!ECC_InitSanityCheck()) {
         InitError(
             "Elliptic curve cryptography sanity check failure. Aborting.");
         return false;
     }
 
     if (!glibcxx_sanity_test()) {
         return false;
     }
 
     if (!Random_SanityCheck()) {
         InitError("OS cryptographic RNG sanity check failure. Aborting.");
         return false;
     }
 
     return true;
 }
 
 static bool AppInitServers(Config &config,
                            HTTPRPCRequestProcessor &httpRPCRequestProcessor) {
     RPCServerSignals::OnStarted(&OnRPCStarted);
     RPCServerSignals::OnStopped(&OnRPCStopped);
     if (!InitHTTPServer(config)) {
         return false;
     }
 
     StartRPC();
 
     if (!StartHTTPRPC(httpRPCRequestProcessor)) {
         return false;
     }
     if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE)) {
         StartREST();
     }
 
     StartHTTPServer();
     return true;
 }
 
 // Parameter interaction based on rules
 void InitParameterInteraction() {
     // when specifying an explicit binding address, you want to listen on it
     // even when -connect or -proxy is specified.
     if (gArgs.IsArgSet("-bind")) {
         if (gArgs.SoftSetBoolArg("-listen", true)) {
             LogPrintf(
                 "%s: parameter interaction: -bind set -> setting -listen=1\n",
                 __func__);
         }
     }
     if (gArgs.IsArgSet("-whitebind")) {
         if (gArgs.SoftSetBoolArg("-listen", true)) {
             LogPrintf("%s: parameter interaction: -whitebind set -> setting "
                       "-listen=1\n",
                       __func__);
         }
     }
 
     if (gArgs.IsArgSet("-connect")) {
         // when only connecting to trusted nodes, do not seed via DNS, or listen
         // by default.
         if (gArgs.SoftSetBoolArg("-dnsseed", false)) {
             LogPrintf("%s: parameter interaction: -connect set -> setting "
                       "-dnsseed=0\n",
                       __func__);
         }
         if (gArgs.SoftSetBoolArg("-listen", false)) {
             LogPrintf("%s: parameter interaction: -connect set -> setting "
                       "-listen=0\n",
                       __func__);
         }
     }
 
     if (gArgs.IsArgSet("-proxy")) {
         // to protect privacy, do not listen by default if a default proxy
         // server is specified.
         if (gArgs.SoftSetBoolArg("-listen", false)) {
             LogPrintf(
                 "%s: parameter interaction: -proxy set -> setting -listen=0\n",
                 __func__);
         }
         // to protect privacy, do not use UPNP when a proxy is set. The user may
         // still specify -listen=1 to listen locally, so don't rely on this
         // happening through -listen below.
         if (gArgs.SoftSetBoolArg("-upnp", false)) {
             LogPrintf(
                 "%s: parameter interaction: -proxy set -> setting -upnp=0\n",
                 __func__);
         }
         // to protect privacy, do not discover addresses by default
         if (gArgs.SoftSetBoolArg("-discover", false)) {
             LogPrintf("%s: parameter interaction: -proxy set -> setting "
                       "-discover=0\n",
                       __func__);
         }
     }
 
     if (!gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
         // do not map ports or try to retrieve public IP when not listening
         // (pointless)
         if (gArgs.SoftSetBoolArg("-upnp", false)) {
             LogPrintf(
                 "%s: parameter interaction: -listen=0 -> setting -upnp=0\n",
                 __func__);
         }
         if (gArgs.SoftSetBoolArg("-discover", false)) {
             LogPrintf(
                 "%s: parameter interaction: -listen=0 -> setting -discover=0\n",
                 __func__);
         }
         if (gArgs.SoftSetBoolArg("-listenonion", false)) {
             LogPrintf("%s: parameter interaction: -listen=0 -> setting "
                       "-listenonion=0\n",
                       __func__);
         }
     }
 
     if (gArgs.IsArgSet("-externalip")) {
         // if an explicit public IP is specified, do not try to find others
         if (gArgs.SoftSetBoolArg("-discover", false)) {
             LogPrintf("%s: parameter interaction: -externalip set -> setting "
                       "-discover=0\n",
                       __func__);
         }
     }
 
     // disable whitelistrelay in blocksonly mode
     if (gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
         if (gArgs.SoftSetBoolArg("-whitelistrelay", false)) {
             LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting "
                       "-whitelistrelay=0\n",
                       __func__);
         }
     }
 
     // Forcing relay from whitelisted hosts implies we will accept relays from
     // them in the first place.
     if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
         if (gArgs.SoftSetBoolArg("-whitelistrelay", true)) {
             LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> "
                       "setting -whitelistrelay=1\n",
                       __func__);
         }
     }
 }
 
 /**
  * Initialize global loggers.
  *
  * Note that this is called very early in the process lifetime, so you should be
  * careful about what global state you rely on here.
  */
 void InitLogging() {
     LogInstance().m_print_to_file = !gArgs.IsArgNegated("-debuglogfile");
     LogInstance().m_file_path = AbsPathForConfigVal(
         gArgs.GetArg("-debuglogfile", DEFAULT_DEBUGLOGFILE));
 
     LogInstance().m_print_to_console = gArgs.GetBoolArg(
         "-printtoconsole", !gArgs.GetBoolArg("-daemon", false));
     LogInstance().m_log_timestamps =
         gArgs.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS);
     LogInstance().m_log_time_micros =
         gArgs.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS);
     LogInstance().m_log_threadnames =
         gArgs.GetBoolArg("-logthreadnames", DEFAULT_LOGTHREADNAMES);
 
     fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS);
 
     std::string version_string = FormatFullVersion();
 #ifdef DEBUG
     version_string += " (debug build)";
 #else
     version_string += " (release build)";
 #endif
     LogPrintf("%s version %s\n", CLIENT_NAME, version_string);
 }
 
 namespace { // Variables internal to initialization process only
 
 int nMaxConnections;
 int nUserMaxConnections;
 int nFD;
 ServiceFlags nLocalServices = ServiceFlags(NODE_NETWORK | NODE_NETWORK_LIMITED);
 int64_t peer_connect_timeout;
 std::vector<BlockFilterType> g_enabled_filter_types;
 
 } // namespace
 
 [[noreturn]] static void new_handler_terminate() {
     // Rather than throwing std::bad-alloc if allocation fails, terminate
     // immediately to (try to) avoid chain corruption. Since LogPrintf may
     // itself allocate memory, set the handler directly to terminate first.
     std::set_new_handler(std::terminate);
     LogPrintf("Error: Out of memory. Terminating.\n");
 
     // The log was successful, terminate now.
     std::terminate();
 };
 
 bool AppInitBasicSetup() {
 // Step 1: setup
 #ifdef _MSC_VER
     // Turn off Microsoft heap dump noise
     _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
     _CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, nullptr,
                                              OPEN_EXISTING, 0, 0));
     // Disable confusing "helpful" text message on abort, Ctrl-C
     _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
 #endif
 #ifdef WIN32
     // Enable Data Execution Prevention (DEP)
     SetProcessDEPPolicy(PROCESS_DEP_ENABLE);
 #endif
 
     if (!SetupNetworking()) {
         return InitError("Initializing networking failed");
     }
 
 #ifndef WIN32
     if (!gArgs.GetBoolArg("-sysperms", false)) {
         umask(077);
     }
 
     // Clean shutdown on SIGTERM
     registerSignalHandler(SIGTERM, HandleSIGTERM);
     registerSignalHandler(SIGINT, HandleSIGTERM);
 
     // Reopen debug.log on SIGHUP
     registerSignalHandler(SIGHUP, HandleSIGHUP);
 
     // Ignore SIGPIPE, otherwise it will bring the daemon down if the client
     // closes unexpectedly
     signal(SIGPIPE, SIG_IGN);
 #else
     SetConsoleCtrlHandler(consoleCtrlHandler, true);
 #endif
 
     std::set_new_handler(new_handler_terminate);
 
     return true;
 }
 
 bool AppInitParameterInteraction(Config &config) {
     const CChainParams &chainparams = config.GetChainParams();
     // Step 2: parameter interactions
 
     // also see: InitParameterInteraction()
 
     // Warn if network-specific options (-addnode, -connect, etc) are
     // specified in default section of config file, but not overridden
     // on the command line or in this network's section of the config file.
     std::string network = gArgs.GetChainName();
     for (const auto &arg : gArgs.GetUnsuitableSectionOnlyArgs()) {
         return InitError(strprintf(_("Config setting for %s only applied on %s "
                                      "network when in [%s] section.")
                                        .translated,
                                    arg, network, network));
     }
 
     // Warn if unrecognized section name are present in the config file.
     for (const auto &section : gArgs.GetUnrecognizedSections()) {
         InitWarning(strprintf(
             "%s:%i " + _("Section [%s] is not recognized.").translated,
             section.m_file, section.m_line, section.m_name));
     }
 
     if (!fs::is_directory(GetBlocksDir())) {
         return InitError(strprintf(
             _("Specified blocks directory \"%s\" does not exist.").translated,
             gArgs.GetArg("-blocksdir", "")));
     }
 
     // parse and validate enabled filter types
     std::string blockfilterindex_value =
         gArgs.GetArg("-blockfilterindex", DEFAULT_BLOCKFILTERINDEX);
     if (blockfilterindex_value == "" || blockfilterindex_value == "1") {
         g_enabled_filter_types = AllBlockFilterTypes();
     } else if (blockfilterindex_value != "0") {
         const std::vector<std::string> names =
             gArgs.GetArgs("-blockfilterindex");
         g_enabled_filter_types.reserve(names.size());
         for (const auto &name : names) {
             BlockFilterType filter_type;
             if (!BlockFilterTypeByName(name, filter_type)) {
                 return InitError(strprintf(
                     _("Unknown -blockfilterindex value %s.").translated, name));
             }
             g_enabled_filter_types.push_back(filter_type);
         }
     }
 
     // if using block pruning, then disallow txindex
     if (gArgs.GetArg("-prune", 0)) {
         if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
             return InitError(
                 _("Prune mode is incompatible with -txindex.").translated);
         }
         if (!g_enabled_filter_types.empty()) {
             return InitError(
                 _("Prune mode is incompatible with -blockfilterindex.")
                     .translated);
         }
     }
 
     // -bind and -whitebind can't be set when not listening
     size_t nUserBind =
         gArgs.GetArgs("-bind").size() + gArgs.GetArgs("-whitebind").size();
     if (nUserBind != 0 && !gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
         return InitError(
             "Cannot set -bind or -whitebind together with -listen=0");
     }
 
     // Make sure enough file descriptors are available
     int nBind = std::max(nUserBind, size_t(1));
     nUserMaxConnections =
         gArgs.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
     nMaxConnections = std::max(nUserMaxConnections, 0);
 
     // Trim requested connection counts, to fit into system limitations
     // <int> in std::min<int>(...) to work around FreeBSD compilation issue
     // described in #2695
     nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS +
                                    MAX_ADDNODE_CONNECTIONS);
 #ifdef USE_POLL
     int fd_max = nFD;
 #else
     int fd_max = FD_SETSIZE;
 #endif
     nMaxConnections =
         std::max(std::min<int>(nMaxConnections, fd_max - nBind -
                                                     MIN_CORE_FILEDESCRIPTORS -
                                                     MAX_ADDNODE_CONNECTIONS),
                  0);
     if (nFD < MIN_CORE_FILEDESCRIPTORS) {
         return InitError(
             _("Not enough file descriptors available.").translated);
     }
     nMaxConnections =
         std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS,
                  nMaxConnections);
 
     if (nMaxConnections < nUserMaxConnections) {
         InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, "
                                 "because of system limitations.")
                                   .translated,
                               nUserMaxConnections, nMaxConnections));
     }
 
     // Step 3: parameter-to-internal-flags
     if (gArgs.IsArgSet("-debug")) {
         // Special-case: if -debug=0/-nodebug is set, turn off debugging
         // messages
         const std::vector<std::string> &categories = gArgs.GetArgs("-debug");
         if (std::none_of(
                 categories.begin(), categories.end(),
                 [](std::string cat) { return cat == "0" || cat == "none"; })) {
             for (const auto &cat : categories) {
                 if (!LogInstance().EnableCategory(cat)) {
                     InitWarning(strprintf(
                         _("Unsupported logging category %s=%s.").translated,
                         "-debug", cat));
                 }
             }
         }
     }
 
     // Now remove the logging categories which were explicitly excluded
     for (const std::string &cat : gArgs.GetArgs("-debugexclude")) {
         if (!LogInstance().DisableCategory(cat)) {
             InitWarning(
                 strprintf(_("Unsupported logging category %s=%s.").translated,
                           "-debugexclude", cat));
         }
     }
 
     // Checkmempool and checkblockindex default to true in regtest mode
     int ratio = std::min<int>(
         std::max<int>(
             gArgs.GetArg("-checkmempool",
                          chainparams.DefaultConsistencyChecks() ? 1 : 0),
             0),
         1000000);
     if (ratio != 0) {
         g_mempool.setSanityCheck(1.0 / ratio);
     }
     fCheckBlockIndex = gArgs.GetBoolArg("-checkblockindex",
                                         chainparams.DefaultConsistencyChecks());
     fCheckpointsEnabled =
         gArgs.GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED);
     if (fCheckpointsEnabled) {
         LogPrintf("Checkpoints will be verified.\n");
     } else {
         LogPrintf("Skipping checkpoint verification.\n");
     }
 
     hashAssumeValid = BlockHash::fromHex(
         gArgs.GetArg("-assumevalid",
                      chainparams.GetConsensus().defaultAssumeValid.GetHex()));
     if (!hashAssumeValid.IsNull()) {
         LogPrintf("Assuming ancestors of block %s have valid signatures.\n",
                   hashAssumeValid.GetHex());
     } else {
         LogPrintf("Validating signatures for all blocks.\n");
     }
 
     if (gArgs.IsArgSet("-minimumchainwork")) {
         const std::string minChainWorkStr =
             gArgs.GetArg("-minimumchainwork", "");
         if (!IsHexNumber(minChainWorkStr)) {
             return InitError(strprintf(
                 "Invalid non-hex (%s) minimum chain work value specified",
                 minChainWorkStr));
         }
         nMinimumChainWork = UintToArith256(uint256S(minChainWorkStr));
     } else {
         nMinimumChainWork =
             UintToArith256(chainparams.GetConsensus().nMinimumChainWork);
     }
     LogPrintf("Setting nMinimumChainWork=%s\n", nMinimumChainWork.GetHex());
     if (nMinimumChainWork <
         UintToArith256(chainparams.GetConsensus().nMinimumChainWork)) {
         LogPrintf("Warning: nMinimumChainWork set below default value of %s\n",
                   chainparams.GetConsensus().nMinimumChainWork.GetHex());
     }
 
     // mempool limits
     int64_t nMempoolSizeMax =
         gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
     int64_t nMempoolSizeMin =
         gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) *
         1000 * 40;
     if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin) {
         return InitError(
             strprintf(_("-maxmempool must be at least %d MB").translated,
                       std::ceil(nMempoolSizeMin / 1000000.0)));
     }
 
     // Configure excessive block size.
     const uint64_t nProposedExcessiveBlockSize =
         gArgs.GetArg("-excessiveblocksize", DEFAULT_MAX_BLOCK_SIZE);
     if (!config.SetMaxBlockSize(nProposedExcessiveBlockSize)) {
         return InitError(
             _("Excessive block size must be > 1,000,000 bytes (1MB)")
                 .translated);
     }
 
     // Check blockmaxsize does not exceed maximum accepted block size.
     const uint64_t nProposedMaxGeneratedBlockSize =
         gArgs.GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE);
     if (nProposedMaxGeneratedBlockSize > config.GetMaxBlockSize()) {
         auto msg = _("Max generated block size (blockmaxsize) cannot exceed "
                      "the excessive block size (excessiveblocksize)")
                        .translated;
         return InitError(msg);
     }
 
     // block pruning; get the amount of disk space (in MiB) to allot for block &
     // undo files
     int64_t nPruneArg = gArgs.GetArg("-prune", 0);
     if (nPruneArg < 0) {
         return InitError(
             _("Prune cannot be configured with a negative value.").translated);
     }
     nPruneTarget = (uint64_t)nPruneArg * 1024 * 1024;
     if (nPruneArg == 1) {
         // manual pruning: -prune=1
         LogPrintf("Block pruning enabled.  Use RPC call "
                   "pruneblockchain(height) to manually prune block and undo "
                   "files.\n");
         nPruneTarget = std::numeric_limits<uint64_t>::max();
         fPruneMode = true;
     } else if (nPruneTarget) {
         if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) {
             return InitError(
                 strprintf(_("Prune configured below the minimum of %d MiB. "
                             "Please use a higher number.")
                               .translated,
                           MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
         }
         LogPrintf("Prune configured to target %u MiB on disk for block and "
                   "undo files.\n",
                   nPruneTarget / 1024 / 1024);
         fPruneMode = true;
     }
 
     nConnectTimeout = gArgs.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
     if (nConnectTimeout <= 0) {
         nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
     }
 
     peer_connect_timeout =
         gArgs.GetArg("-peertimeout", DEFAULT_PEER_CONNECT_TIMEOUT);
     if (peer_connect_timeout <= 0) {
         return InitError(
             "peertimeout cannot be configured with a negative value.");
     }
 
     // Obtain the amount to charge excess UTXO
     if (gArgs.IsArgSet("-excessutxocharge")) {
         Amount n = Amount::zero();
         auto parsed = ParseMoney(gArgs.GetArg("-excessutxocharge", ""), n);
         if (!parsed || Amount::zero() > n) {
             return InitError(AmountErrMsg("excessutxocharge",
                                           gArgs.GetArg("-excessutxocharge", ""))
                                  .translated);
         }
         config.SetExcessUTXOCharge(n);
     } else {
         config.SetExcessUTXOCharge(DEFAULT_UTXO_FEE);
     }
 
     if (gArgs.IsArgSet("-minrelaytxfee")) {
         Amount n = Amount::zero();
         auto parsed = ParseMoney(gArgs.GetArg("-minrelaytxfee", ""), n);
         if (!parsed || n == Amount::zero()) {
             return InitError(AmountErrMsg("minrelaytxfee",
                                           gArgs.GetArg("-minrelaytxfee", ""))
                                  .translated);
         }
         // High fee check is done afterward in CWallet::CreateWalletFromFile()
         ::minRelayTxFee = CFeeRate(n);
     }
 
     // Sanity check argument for min fee for including tx in block
     // TODO: Harmonize which arguments need sanity checking and where that
     // happens.
     if (gArgs.IsArgSet("-blockmintxfee")) {
         Amount n = Amount::zero();
         if (!ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n)) {
             return InitError(AmountErrMsg("blockmintxfee",
                                           gArgs.GetArg("-blockmintxfee", ""))
                                  .translated);
         }
     }
 
     // Feerate used to define dust.  Shouldn't be changed lightly as old
     // implementations may inadvertently create non-standard transactions.
     if (gArgs.IsArgSet("-dustrelayfee")) {
         Amount n = Amount::zero();
         auto parsed = ParseMoney(gArgs.GetArg("-dustrelayfee", ""), n);
         if (!parsed || Amount::zero() == n) {
             return InitError(
                 AmountErrMsg("dustrelayfee", gArgs.GetArg("-dustrelayfee", ""))
                     .translated);
         }
         dustRelayFee = CFeeRate(n);
     }
 
     fRequireStandard =
         !gArgs.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard());
     if (!chainparams.IsTestChain() && !fRequireStandard) {
         return InitError(
             strprintf("acceptnonstdtxn is not currently supported for %s chain",
                       chainparams.NetworkIDString()));
     }
     nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp);
 
     if (!g_wallet_init_interface.ParameterInteraction()) {
         return false;
     }
 
     fIsBareMultisigStd =
         gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG);
     fAcceptDatacarrier =
         gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER);
 
     // Option to startup with mocktime set (used for regression testing):
     SetMockTime(gArgs.GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
 
     if (gArgs.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS)) {
         nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
     }
 
     // Signal Bitcoin Cash support.
     // TODO: remove some time after the hardfork when no longer needed
     // to differentiate the network nodes.
     nLocalServices = ServiceFlags(nLocalServices | NODE_BITCOIN_CASH);
 
     nMaxTipAge = gArgs.GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
 
     return true;
 }
 
 static bool LockDataDirectory(bool probeOnly) {
     // Make sure only a single Bitcoin process is using the data directory.
     fs::path datadir = GetDataDir();
     if (!DirIsWritable(datadir)) {
         return InitError(strprintf(
             _("Cannot write to data directory '%s'; check permissions.")
                 .translated,
             datadir.string()));
     }
     if (!LockDirectory(datadir, ".lock", probeOnly)) {
         return InitError(strprintf(_("Cannot obtain a lock on data directory "
                                      "%s. %s is probably already running.")
                                        .translated,
                                    datadir.string(), PACKAGE_NAME));
     }
     return true;
 }
 
 bool AppInitSanityChecks() {
     // Step 4: sanity checks
 
     // Initialize elliptic curve code
     std::string sha256_algo = SHA256AutoDetect();
     LogPrintf("Using the '%s' SHA256 implementation\n", sha256_algo);
     RandomInit();
     ECC_Start();
     globalVerifyHandle.reset(new ECCVerifyHandle());
 
     // Sanity check
     if (!InitSanityCheck()) {
         return InitError(strprintf(
             _("Initialization sanity check failed. %s is shutting down.")
                 .translated,
             PACKAGE_NAME));
     }
 
     // Probe the data directory lock to give an early error message, if possible
     // We cannot hold the data directory lock here, as the forking for daemon()
     // hasn't yet happened, and a fork will cause weird behavior to it.
     return LockDataDirectory(true);
 }
 
 bool AppInitLockDataDirectory() {
     // After daemonization get the data directory lock again and hold on to it
     // until exit. This creates a slight window for a race condition to happen,
     // however this condition is harmless: it will at most make us exit without
     // printing a message to console.
     if (!LockDataDirectory(false)) {
         // Detailed error printed inside LockDataDirectory
         return false;
     }
     return true;
 }
 
 bool AppInitMain(Config &config, RPCServer &rpcServer,
                  HTTPRPCRequestProcessor &httpRPCRequestProcessor,
                  NodeContext &node) {
     // Step 4a: application initialization
     const CChainParams &chainparams = config.GetChainParams();
 
     if (!CreatePidFile()) {
         // Detailed error printed inside CreatePidFile().
         return false;
     }
 
     BCLog::Logger &logger = LogInstance();
     if (logger.m_print_to_file) {
         if (gArgs.GetBoolArg("-shrinkdebugfile",
                              logger.DefaultShrinkDebugFile())) {
             // Do this first since it both loads a bunch of debug.log into
             // memory, and because this needs to happen before any other
             // debug.log printing.
             logger.ShrinkDebugFile();
         }
     }
 
     if (!logger.StartLogging()) {
         return InitError(strprintf("Could not open debug log file %s",
                                    logger.m_file_path.string()));
     }
 
     if (!logger.m_log_timestamps) {
         LogPrintf("Startup time: %s\n", FormatISO8601DateTime(GetTime()));
     }
     LogPrintf("Default data directory %s\n", GetDefaultDataDir().string());
     LogPrintf("Using data directory %s\n", GetDataDir().string());
 
     // Only log conf file usage message if conf file actually exists.
     fs::path config_file_path =
         GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME));
     if (fs::exists(config_file_path)) {
         LogPrintf("Config file: %s\n", config_file_path.string());
     } else if (gArgs.IsArgSet("-conf")) {
         // Warn if no conf file exists at path provided by user
         InitWarning(strprintf(
             _("The specified config file %s does not exist\n").translated,
             config_file_path.string()));
     } else {
         // Not categorizing as "Warning" because it's the default behavior
         LogPrintf("Config file: %s (not found, skipping)\n",
                   config_file_path.string());
     }
 
     // Log the config arguments to debug.log
     gArgs.LogArgs();
 
     LogPrintf("Using at most %i automatic connections (%i file descriptors "
               "available)\n",
               nMaxConnections, nFD);
 
     // Warn about relative -datadir path.
     if (gArgs.IsArgSet("-datadir") &&
         !fs::path(gArgs.GetArg("-datadir", "")).is_absolute()) {
         LogPrintf("Warning: relative datadir option '%s' specified, which will "
                   "be interpreted relative to the current working directory "
                   "'%s'. This is fragile, because if bitcoin is started in the "
                   "future from a different location, it will be unable to "
                   "locate the current data files. There could also be data "
                   "loss if bitcoin is started while in a temporary "
                   "directory.\n",
                   gArgs.GetArg("-datadir", ""), fs::current_path().string());
     }
 
     InitSignatureCache();
     InitScriptExecutionCache();
 
     int script_threads = gArgs.GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS);
     if (script_threads <= 0) {
         // -par=0 means autodetect (number of cores - 1 script threads)
         // -par=-n means "leave n cores free" (number of cores - n - 1 script
         // threads)
         script_threads += GetNumCores();
     }
 
     // Subtract 1 because the main thread counts towards the par threads
     script_threads = std::max(script_threads - 1, 0);
 
     // Number of script-checking threads <= MAX_SCRIPTCHECK_THREADS
     script_threads = std::min(script_threads, MAX_SCRIPTCHECK_THREADS);
 
     LogPrintf("Script verification uses %d additional threads\n",
               script_threads);
     if (script_threads >= 1) {
         for (int i = 0; i < script_threads; ++i) {
             threadGroup.create_thread([i]() { return ThreadScriptCheck(i); });
         }
     }
 
     assert(!node.scheduler);
     node.scheduler = std::make_unique<CScheduler>();
 
     // Start the lightweight task scheduler thread
     CScheduler::Function serviceLoop = [&node] {
         node.scheduler->serviceQueue();
     };
     threadGroup.create_thread(std::bind(&TraceThread<CScheduler::Function>,
                                         "scheduler", serviceLoop));
 
     // Gather some entropy once per minute.
     node.scheduler->scheduleEvery(
         [] {
             RandAddPeriodic();
             return true;
         },
         std::chrono::minutes{1});
 
     GetMainSignals().RegisterBackgroundSignalScheduler(*node.scheduler);
 
     // Create client interfaces for wallets that are supposed to be loaded
     // according to -wallet and -disablewallet options. This only constructs
     // the interfaces, it doesn't load wallet data. Wallets actually get loaded
     // when load() and start() interface methods are called below.
     g_wallet_init_interface.Construct(node);
 
     /**
      * Register RPC commands regardless of -server setting so they will be
      * available in the GUI RPC console even if external calls are disabled.
      */
     RegisterAllRPCCommands(config, rpcServer, tableRPC);
     for (const auto &client : node.chain_clients) {
         client->registerRpcs();
     }
     g_rpc_node = &node;
 #if ENABLE_ZMQ
     RegisterZMQRPCCommands(tableRPC);
 #endif
 
     /**
      * Start the RPC server.  It will be started in "warmup" mode and not
      * process calls yet (but it will verify that the server is there and will
      * be ready later).  Warmup mode will be completed when initialisation is
      * finished.
      */
     if (gArgs.GetBoolArg("-server", false)) {
         uiInterface.InitMessage_connect(SetRPCWarmupStatus);
         if (!AppInitServers(config, httpRPCRequestProcessor)) {
             return InitError(
                 _("Unable to start HTTP server. See debug log for details.")
                     .translated);
         }
     }
 
     // Step 5: verify wallet database integrity
     for (const auto &client : node.chain_clients) {
         if (!client->verify(chainparams)) {
             return false;
         }
     }
 
     // Step 6: network initialization
 
     // Note that we absolutely cannot open any actual connections
     // until the very end ("start node") as the UTXO/block state
     // is not yet setup and may end up being set up twice if we
     // need to reindex later.
 
     assert(!node.banman);
     node.banman = std::make_unique<BanMan>(
         GetDataDir() / "banlist.dat", config.GetChainParams(), &uiInterface,
         gArgs.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME));
     assert(!node.connman);
     node.connman = std::make_unique<CConnman>(
         config, GetRand(std::numeric_limits<uint64_t>::max()),
         GetRand(std::numeric_limits<uint64_t>::max()));
 
     node.peer_logic.reset(new PeerLogicValidation(
         node.connman.get(), node.banman.get(), *node.scheduler,
         gArgs.GetBoolArg("-enablebip61", DEFAULT_ENABLE_BIP61)));
     RegisterValidationInterface(node.peer_logic.get());
 
     // sanitize comments per BIP-0014, format user agent and check total size
     std::vector<std::string> uacomments;
     for (const std::string &cmt : gArgs.GetArgs("-uacomment")) {
         if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT)) {
             return InitError(strprintf(
                 _("User Agent comment (%s) contains unsafe characters.")
                     .translated,
                 cmt));
         }
         uacomments.push_back(cmt);
     }
     const std::string strSubVersion =
         FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments);
     if (strSubVersion.size() > MAX_SUBVERSION_LENGTH) {
         return InitError(strprintf(
             _("Total length of network version string (%i) exceeds maximum "
               "length (%i). Reduce the number or size of uacomments.")
                 .translated,
             strSubVersion.size(), MAX_SUBVERSION_LENGTH));
     }
 
     if (gArgs.IsArgSet("-onlynet")) {
         std::set<enum Network> nets;
         for (const std::string &snet : gArgs.GetArgs("-onlynet")) {
             enum Network net = ParseNetwork(snet);
             if (net == NET_UNROUTABLE) {
                 return InitError(strprintf(
                     _("Unknown network specified in -onlynet: '%s'").translated,
                     snet));
             }
             nets.insert(net);
         }
         for (int n = 0; n < NET_MAX; n++) {
             enum Network net = (enum Network)n;
             if (!nets.count(net)) {
                 SetReachable(net, false);
             }
         }
     }
 
     // Check for host lookup allowed before parsing any network related
     // parameters
     fNameLookup = gArgs.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP);
 
     bool proxyRandomize =
         gArgs.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE);
     // -proxy sets a proxy for all outgoing network traffic
     // -noproxy (or -proxy=0) as well as the empty string can be used to not set
     // a proxy, this is the default
     std::string proxyArg = gArgs.GetArg("-proxy", "");
     SetReachable(NET_ONION, false);
     if (proxyArg != "" && proxyArg != "0") {
         CService proxyAddr;
         if (!Lookup(proxyArg.c_str(), proxyAddr, 9050, fNameLookup)) {
             return InitError(strprintf(
                 _("Invalid -proxy address or hostname: '%s'").translated,
                 proxyArg));
         }
 
         proxyType addrProxy = proxyType(proxyAddr, proxyRandomize);
         if (!addrProxy.IsValid()) {
             return InitError(strprintf(
                 _("Invalid -proxy address or hostname: '%s'").translated,
                 proxyArg));
         }
 
         SetProxy(NET_IPV4, addrProxy);
         SetProxy(NET_IPV6, addrProxy);
         SetProxy(NET_ONION, addrProxy);
         SetNameProxy(addrProxy);
         // by default, -proxy sets onion as reachable, unless -noonion later
         SetReachable(NET_ONION, true);
     }
 
     // -onion can be used to set only a proxy for .onion, or override normal
     // proxy for .onion addresses.
     // -noonion (or -onion=0) disables connecting to .onion entirely. An empty
     // string is used to not override the onion proxy (in which case it defaults
     // to -proxy set above, or none)
     std::string onionArg = gArgs.GetArg("-onion", "");
     if (onionArg != "") {
         if (onionArg == "0") {
             // Handle -noonion/-onion=0
             SetReachable(NET_ONION, false);
         } else {
             CService onionProxy;
             if (!Lookup(onionArg.c_str(), onionProxy, 9050, fNameLookup)) {
                 return InitError(strprintf(
                     _("Invalid -onion address or hostname: '%s'").translated,
                     onionArg));
             }
             proxyType addrOnion = proxyType(onionProxy, proxyRandomize);
             if (!addrOnion.IsValid()) {
                 return InitError(strprintf(
                     _("Invalid -onion address or hostname: '%s'").translated,
                     onionArg));
             }
             SetProxy(NET_ONION, addrOnion);
             SetReachable(NET_ONION, true);
         }
     }
 
     // see Step 2: parameter interactions for more information about these
     fListen = gArgs.GetBoolArg("-listen", DEFAULT_LISTEN);
     fDiscover = gArgs.GetBoolArg("-discover", true);
     g_relay_txes = !gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
 
     for (const std::string &strAddr : gArgs.GetArgs("-externalip")) {
         CService addrLocal;
         if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) &&
             addrLocal.IsValid()) {
             AddLocal(addrLocal, LOCAL_MANUAL);
         } else {
             return InitError(ResolveErrMsg("externalip", strAddr));
         }
     }
 
 #if ENABLE_ZMQ
     g_zmq_notification_interface = CZMQNotificationInterface::Create();
 
     if (g_zmq_notification_interface) {
         RegisterValidationInterface(g_zmq_notification_interface);
     }
 #endif
     // unlimited unless -maxuploadtarget is set
     uint64_t nMaxOutboundLimit = 0;
     uint64_t nMaxOutboundTimeframe = MAX_UPLOAD_TIMEFRAME;
 
     if (gArgs.IsArgSet("-maxuploadtarget")) {
         nMaxOutboundLimit =
             gArgs.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET) * 1024 *
             1024;
     }
 
     // Step 6.5 (I guess ?): Initialize Avalanche.
     g_avalanche = std::make_unique<avalanche::Processor>(node.connman.get());
 
     // Step 7: load block chain
 
     fReindex = gArgs.GetBoolArg("-reindex", false);
     bool fReindexChainState = gArgs.GetBoolArg("-reindex-chainstate", false);
 
     // cache size calculations
     int64_t nTotalCache = (gArgs.GetArg("-dbcache", nDefaultDbCache) << 20);
     // total cache cannot be less than nMinDbCache
     nTotalCache = std::max(nTotalCache, nMinDbCache << 20);
     // total cache cannot be greater than nMaxDbcache
     nTotalCache = std::min(nTotalCache, nMaxDbCache << 20);
     int64_t nBlockTreeDBCache =
         std::min(nTotalCache / 8, nMaxBlockDBCache << 20);
     nTotalCache -= nBlockTreeDBCache;
     int64_t nTxIndexCache =
         std::min(nTotalCache / 8, gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)
                                       ? nMaxTxIndexCache << 20
                                       : 0);
     nTotalCache -= nTxIndexCache;
     int64_t filter_index_cache = 0;
     if (!g_enabled_filter_types.empty()) {
         size_t n_indexes = g_enabled_filter_types.size();
         int64_t max_cache =
             std::min(nTotalCache / 8, max_filter_index_cache << 20);
         filter_index_cache = max_cache / n_indexes;
         nTotalCache -= filter_index_cache * n_indexes;
     }
     // use 25%-50% of the remainder for disk cache
     int64_t nCoinDBCache =
         std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23));
     // cap total coins db cache
     nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20);
     nTotalCache -= nCoinDBCache;
     // the rest goes to in-memory cache
     nCoinCacheUsage = nTotalCache;
     int64_t nMempoolSizeMax =
         gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
     LogPrintf("Cache configuration:\n");
     LogPrintf("* Using %.1f MiB for block index database\n",
               nBlockTreeDBCache * (1.0 / 1024 / 1024));
     if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
         LogPrintf("* Using %.1f MiB for transaction index database\n",
                   nTxIndexCache * (1.0 / 1024 / 1024));
     }
     for (BlockFilterType filter_type : g_enabled_filter_types) {
         LogPrintf("* Using %.1f MiB for %s block filter index database\n",
                   filter_index_cache * (1.0 / 1024 / 1024),
                   BlockFilterTypeName(filter_type));
     }
     LogPrintf("* Using %.1f MiB for chain state database\n",
               nCoinDBCache * (1.0 / 1024 / 1024));
     LogPrintf("* Using %.1f MiB for in-memory UTXO set (plus up to %.1f MiB of "
               "unused mempool space)\n",
               nCoinCacheUsage * (1.0 / 1024 / 1024),
               nMempoolSizeMax * (1.0 / 1024 / 1024));
 
     bool fLoaded = false;
     while (!fLoaded && !ShutdownRequested()) {
         const bool fReset = fReindex;
         std::string strLoadError;
 
         uiInterface.InitMessage(_("Loading block index...").translated);
         do {
             const int64_t load_block_index_start_time = GetTimeMillis();
             try {
                 LOCK(cs_main);
                 UnloadBlockIndex();
                 pcoinsTip.reset();
                 pcoinsdbview.reset();
                 pcoinscatcher.reset();
                 // new CBlockTreeDB tries to delete the existing file, which
                 // fails if it's still open from the previous loop. Close it
                 // first:
                 pblocktree.reset();
                 pblocktree.reset(
                     new CBlockTreeDB(nBlockTreeDBCache, false, fReset));
 
                 if (fReset) {
                     pblocktree->WriteReindexing(true);
                     // If we're reindexing in prune mode, wipe away unusable
                     // block files and all undo data files
                     if (fPruneMode) {
                         CleanupBlockRevFiles();
                     }
                 }
 
                 if (ShutdownRequested()) {
                     break;
                 }
 
                 const Consensus::Params &params = chainparams.GetConsensus();
 
                 // LoadBlockIndex will load fHavePruned if we've ever removed a
                 // block file from disk.
                 // Note that it also sets fReindex based on the disk flag!
                 // From here on out fReindex and fReset mean something
                 // different!
                 if (!LoadBlockIndex(params)) {
                     if (ShutdownRequested()) {
                         break;
                     }
                     strLoadError = _("Error loading block database").translated;
                     break;
                 }
 
                 // If the loaded chain has a wrong genesis, bail out immediately
                 // (we're likely using a testnet datadir, or the other way
                 // around).
                 if (!::BlockIndex().empty() &&
                     !LookupBlockIndex(params.hashGenesisBlock)) {
                     return InitError(_("Incorrect or no genesis block found. "
                                        "Wrong datadir for network?")
                                          .translated);
                 }
 
                 // Check for changed -prune state.  What we are concerned about
                 // is a user who has pruned blocks in the past, but is now
                 // trying to run unpruned.
                 if (fHavePruned && !fPruneMode) {
                     strLoadError =
                         _("You need to rebuild the database using -reindex to "
                           "go back to unpruned mode.  This will redownload the "
                           "entire blockchain")
                             .translated;
                     break;
                 }
 
                 // At this point blocktree args are consistent with what's on
                 // disk. If we're not mid-reindex (based on disk + args), add a
                 // genesis block on disk (otherwise we use the one already on
                 // disk).
                 // This is called again in ThreadImport after the reindex
                 // completes.
                 if (!fReindex && !LoadGenesisBlock(chainparams)) {
                     strLoadError =
                         _("Error initializing block database").translated;
                     break;
                 }
 
                 // At this point we're either in reindex or we've loaded a
                 // useful block tree into BlockIndex()!
 
                 pcoinsdbview.reset(new CCoinsViewDB(
                     nCoinDBCache, false, fReset || fReindexChainState));
                 pcoinscatcher.reset(
                     new CCoinsViewErrorCatcher(pcoinsdbview.get()));
                 pcoinscatcher->AddReadErrCallback([]() {
                     uiInterface.ThreadSafeMessageBox(
                         _("Error reading from database, shutting down.")
                             .translated,
                         "", CClientUIInterface::MSG_ERROR);
                 });
 
                 // If necessary, upgrade from older database format.
                 // This is a no-op if we cleared the coinsviewdb with -reindex
                 // or -reindex-chainstate
                 if (!pcoinsdbview->Upgrade()) {
                     strLoadError =
                         _("Error upgrading chainstate database").translated;
                     break;
                 }
 
                 // ReplayBlocks is a no-op if we cleared the coinsviewdb with
                 // -reindex or -reindex-chainstate
                 if (!ReplayBlocks(params, pcoinsdbview.get())) {
                     strLoadError =
                         _("Unable to replay blocks. You will need to rebuild "
                           "the database using -reindex-chainstate.")
                             .translated;
                     break;
                 }
 
                 // The on-disk coinsdb is now in a good state, create the cache
                 pcoinsTip.reset(new CCoinsViewCache(pcoinscatcher.get()));
 
                 bool is_coinsview_empty = fReset || fReindexChainState ||
                                           pcoinsTip->GetBestBlock().IsNull();
                 if (!is_coinsview_empty) {
                     // LoadChainTip sets ::ChainActive() based on pcoinsTip's
                     // best block
                     if (!LoadChainTip(config)) {
                         strLoadError =
                             _("Error initializing block database").translated;
                         break;
                     }
                     assert(::ChainActive().Tip() != nullptr);
 
                     uiInterface.InitMessage(
                         _("Verifying blocks...").translated);
                     if (fHavePruned &&
                         gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) >
                             MIN_BLOCKS_TO_KEEP) {
                         LogPrintf(
                             "Prune: pruned datadir may not have more than %d "
                             "blocks; only checking available blocks\n",
                             MIN_BLOCKS_TO_KEEP);
                     }
 
                     CBlockIndex *tip = ::ChainActive().Tip();
                     RPCNotifyBlockChange(true, tip);
                     if (tip && tip->nTime >
                                    GetAdjustedTime() + MAX_FUTURE_BLOCK_TIME) {
                         strLoadError =
                             _("The block database contains a block which "
                               "appears to be from the future. This may be due "
                               "to your computer's date and time being set "
                               "incorrectly. Only rebuild the block database if "
                               "you are sure that your computer's date and time "
                               "are correct")
                                 .translated;
                         break;
                     }
 
                     if (!CVerifyDB().VerifyDB(
                             config, pcoinsdbview.get(),
                             gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL),
                             gArgs.GetArg("-checkblocks",
                                          DEFAULT_CHECKBLOCKS))) {
                         strLoadError =
                             _("Corrupted block database detected").translated;
                         break;
                     }
                 }
             } catch (const std::exception &e) {
                 LogPrintf("%s\n", e.what());
                 strLoadError = _("Error opening block database").translated;
                 break;
             }
 
             fLoaded = true;
             LogPrintf(" block index %15dms\n",
                       GetTimeMillis() - load_block_index_start_time);
         } while (false);
 
         if (!fLoaded && !ShutdownRequested()) {
             // first suggest a reindex
             if (!fReset) {
                 bool fRet = uiInterface.ThreadSafeQuestion(
                     strLoadError + ".\n\n" +
                         _("Do you want to rebuild the block database now?")
                             .translated,
                     strLoadError + ".\nPlease restart with -reindex or "
                                    "-reindex-chainstate to recover.",
                     "",
                     CClientUIInterface::MSG_ERROR |
                         CClientUIInterface::BTN_ABORT);
                 if (fRet) {
                     fReindex = true;
                     AbortShutdown();
                 } else {
                     LogPrintf("Aborted block database rebuild. Exiting.\n");
                     return false;
                 }
             } else {
                 return InitError(strLoadError);
             }
         }
     }
 
     // As LoadBlockIndex can take several minutes, it's possible the user
     // requested to kill the GUI during the last operation. If so, exit.
     // As the program has not fully started yet, Shutdown() is possibly
     // overkill.
     if (ShutdownRequested()) {
         LogPrintf("Shutdown requested. Exiting.\n");
         return false;
     }
 
     // Encoded addresses using cashaddr instead of base58.
     // We do this by default to avoid confusion with BTC addresses.
     config.SetCashAddrEncoding(gArgs.GetBoolArg("-usecashaddr", true));
 
     // Now that the chain state is loaded, make mempool generally available in
     // the node context. For example the connection manager, wallet, or RPC
     // threads, which are all started after this, may use it from the node
     // context.
     assert(!node.mempool);
     node.mempool = &::g_mempool;
 
     // Step 8: load indexers
     if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
         g_txindex = std::make_unique<TxIndex>(nTxIndexCache, false, fReindex);
         g_txindex->Start();
     }
 
     for (const auto &filter_type : g_enabled_filter_types) {
         InitBlockFilterIndex(filter_type, filter_index_cache, false, fReindex);
         GetBlockFilterIndex(filter_type)->Start();
     }
 
     // Step 9: load wallet
     for (const auto &client : node.chain_clients) {
         if (!client->load(chainparams)) {
             return false;
         }
     }
 
     // Step 10: data directory maintenance
 
     // if pruning, unset the service bit and perform the initial blockstore
     // prune after any wallet rescanning has taken place.
     if (fPruneMode) {
         LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
         nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
         if (!fReindex) {
             uiInterface.InitMessage(_("Pruning blockstore...").translated);
             ::ChainstateActive().PruneAndFlush();
         }
     }
 
     // Step 11: import blocks
     if (!CheckDiskSpace(GetDataDir())) {
         InitError(strprintf(_("Error: Disk space is low for %s").translated,
                             GetDataDir()));
         return false;
     }
     if (!CheckDiskSpace(GetBlocksDir())) {
         InitError(strprintf(_("Error: Disk space is low for %s").translated,
                             GetBlocksDir()));
         return false;
     }
 
     // Either install a handler to notify us when genesis activates, or set
     // fHaveGenesis directly.
     // No locking, as this happens before any background thread is started.
     boost::signals2::connection block_notify_genesis_wait_connection;
     if (::ChainActive().Tip() == nullptr) {
         block_notify_genesis_wait_connection =
             uiInterface.NotifyBlockTip_connect(BlockNotifyGenesisWait);
     } else {
         fHaveGenesis = true;
     }
 
 #if defined(HAVE_SYSTEM)
     if (gArgs.IsArgSet("-blocknotify")) {
         uiInterface.NotifyBlockTip_connect(BlockNotifyCallback);
     }
 #endif
 
     std::vector<fs::path> vImportFiles;
     for (const std::string &strFile : gArgs.GetArgs("-loadblock")) {
         vImportFiles.push_back(strFile);
     }
 
     threadGroup.create_thread(
         std::bind(&ThreadImport, std::ref(config), vImportFiles));
 
     // Wait for genesis block to be processed
     {
         WAIT_LOCK(g_genesis_wait_mutex, lock);
         // We previously could hang here if StartShutdown() is called prior to
         // ThreadImport getting started, so instead we just wait on a timer to
         // check ShutdownRequested() regularly.
         while (!fHaveGenesis && !ShutdownRequested()) {
             g_genesis_wait_cv.wait_for(lock, std::chrono::milliseconds(500));
         }
         block_notify_genesis_wait_connection.disconnect();
     }
 
     if (ShutdownRequested()) {
         return false;
     }
 
     // Step 12: start node
 
     int chain_active_height;
 
     //// debug print
     {
         LOCK(cs_main);
         LogPrintf("block tree size = %u\n", ::BlockIndex().size());
         chain_active_height = ::ChainActive().Height();
     }
     LogPrintf("nBestHeight = %d\n", chain_active_height);
 
     if (gArgs.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) {
         StartTorControl();
     }
 
     Discover();
 
     // Map ports with UPnP
     if (gArgs.GetBoolArg("-upnp", DEFAULT_UPNP)) {
         StartMapPort();
     }
 
     CConnman::Options connOptions;
     connOptions.nLocalServices = nLocalServices;
     connOptions.nMaxConnections = nMaxConnections;
     connOptions.m_max_outbound_full_relay = std::min(
         MAX_OUTBOUND_FULL_RELAY_CONNECTIONS, connOptions.nMaxConnections);
     connOptions.m_max_outbound_block_relay = std::min(
         MAX_BLOCKS_ONLY_CONNECTIONS,
         connOptions.nMaxConnections - connOptions.m_max_outbound_full_relay);
     connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS;
     connOptions.nMaxFeeler = 1;
     connOptions.nBestHeight = chain_active_height;
     connOptions.uiInterface = &uiInterface;
     connOptions.m_banman = node.banman.get();
     connOptions.m_msgproc = node.peer_logic.get();
     connOptions.nSendBufferMaxSize =
         1000 * gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
     connOptions.nReceiveFloodSize =
         1000 * gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);
     connOptions.m_added_nodes = gArgs.GetArgs("-addnode");
 
     connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe;
     connOptions.nMaxOutboundLimit = nMaxOutboundLimit;
     connOptions.m_peer_connect_timeout = peer_connect_timeout;
 
     for (const std::string &strBind : gArgs.GetArgs("-bind")) {
         CService addrBind;
         if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) {
             return InitError(ResolveErrMsg("bind", strBind));
         }
         connOptions.vBinds.push_back(addrBind);
     }
 
     for (const std::string &strBind : gArgs.GetArgs("-whitebind")) {
         NetWhitebindPermissions whitebind;
         std::string error;
         if (!NetWhitebindPermissions::TryParse(strBind, whitebind, error)) {
             return InitError(error);
         }
         connOptions.vWhiteBinds.push_back(whitebind);
     }
 
     for (const auto &net : gArgs.GetArgs("-whitelist")) {
         NetWhitelistPermissions subnet;
         std::string error;
         if (!NetWhitelistPermissions::TryParse(net, subnet, error)) {
             return InitError(error);
         }
         connOptions.vWhitelistedRange.push_back(subnet);
     }
 
     connOptions.vSeedNodes = gArgs.GetArgs("-seednode");
 
     // Initiate outbound connections unless connect=0
     connOptions.m_use_addrman_outgoing = !gArgs.IsArgSet("-connect");
     if (!connOptions.m_use_addrman_outgoing) {
         const auto connect = gArgs.GetArgs("-connect");
         if (connect.size() != 1 || connect[0] != "0") {
             connOptions.m_specified_outgoing = connect;
         }
     }
     if (!node.connman->Start(*node.scheduler, connOptions)) {
         return false;
     }
 
     // Step 13: finished
 
     SetRPCWarmupFinished();
     uiInterface.InitMessage(_("Done loading").translated);
 
     for (const auto &client : node.chain_clients) {
         client->start(*node.scheduler);
     }
 
     BanMan *banman = node.banman.get();
     node.scheduler->scheduleEvery(
         [banman] {
             banman->DumpBanlist();
             return true;
         },
         DUMP_BANS_INTERVAL);
 
     // Start Avalanche's event loop.
     g_avalanche->startEventLoop(*node.scheduler);
 
     return true;
 }
diff --git a/src/interfaces/chain.cpp b/src/interfaces/chain.cpp
index 67ae03b1c..f60c1cfe6 100644
--- a/src/interfaces/chain.cpp
+++ b/src/interfaces/chain.cpp
@@ -1,452 +1,453 @@
 // Copyright (c) 2018-2019 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <interfaces/chain.h>
 
+#include <blockdb.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <interfaces/handler.h>
 #include <interfaces/wallet.h>
 #include <net.h>
 #include <net_processing.h>
 #include <node/coin.h>
 #include <node/context.h>
 #include <node/transaction.h>
 #include <policy/mempool.h>
 #include <policy/policy.h>
 #include <policy/settings.h>
 #include <primitives/block.h>
 #include <primitives/blockhash.h>
 #include <rpc/protocol.h>
 #include <rpc/server.h>
 #include <shutdown.h>
 #include <sync.h>
 #include <timedata.h>
 #include <txmempool.h>
 #include <ui_interface.h>
 #include <univalue.h>
 #include <util/system.h>
 #include <validation.h>
 #include <validationinterface.h>
 
 #include <utility>
 
 namespace interfaces {
 namespace {
 
     bool FillBlock(const CBlockIndex *index, const FoundBlock &block,
                    UniqueLock<RecursiveMutex> &lock) {
         if (!index) {
             return false;
         }
         if (block.m_hash) {
             *block.m_hash = index->GetBlockHash();
         }
         if (block.m_height) {
             *block.m_height = index->nHeight;
         }
         if (block.m_time) {
             *block.m_time = index->GetBlockTime();
         }
         if (block.m_max_time) {
             *block.m_max_time = index->GetBlockTimeMax();
         }
         if (block.m_mtp_time) {
             *block.m_mtp_time = index->GetMedianTimePast();
         }
         if (block.m_data) {
             REVERSE_LOCK(lock);
             if (!ReadBlockFromDisk(*block.m_data, index,
                                    Params().GetConsensus())) {
                 block.m_data->SetNull();
             }
         }
         return true;
     }
 
     class NotificationsProxy : public CValidationInterface {
     public:
         explicit NotificationsProxy(
             std::shared_ptr<Chain::Notifications> notifications)
             : m_notifications(std::move(notifications)) {}
         virtual ~NotificationsProxy() = default;
         void TransactionAddedToMempool(const CTransactionRef &tx) override {
             m_notifications->TransactionAddedToMempool(tx);
         }
         void TransactionRemovedFromMempool(const CTransactionRef &tx) override {
             m_notifications->TransactionRemovedFromMempool(tx);
         }
         void BlockConnected(
             const std::shared_ptr<const CBlock> &block,
             const CBlockIndex *index,
             const std::vector<CTransactionRef> &tx_conflicted) override {
             m_notifications->BlockConnected(*block, tx_conflicted,
                                             index->nHeight);
         }
         void BlockDisconnected(const std::shared_ptr<const CBlock> &block,
                                const CBlockIndex *index) override {
             m_notifications->BlockDisconnected(*block, index->nHeight);
         }
         void UpdatedBlockTip(const CBlockIndex *index,
                              const CBlockIndex *fork_index,
                              bool is_ibd) override {
             m_notifications->UpdatedBlockTip();
         }
         void ChainStateFlushed(const CBlockLocator &locator) override {
             m_notifications->ChainStateFlushed(locator);
         }
         std::shared_ptr<Chain::Notifications> m_notifications;
     };
 
     class NotificationsHandlerImpl : public Handler {
     public:
         explicit NotificationsHandlerImpl(
             std::shared_ptr<Chain::Notifications> notifications)
             : m_proxy(std::make_shared<NotificationsProxy>(
                   std::move(notifications))) {
             RegisterSharedValidationInterface(m_proxy);
         }
         ~NotificationsHandlerImpl() override { disconnect(); }
         void disconnect() override {
             if (m_proxy) {
                 UnregisterSharedValidationInterface(m_proxy);
                 m_proxy.reset();
             }
         }
         std::shared_ptr<NotificationsProxy> m_proxy;
     };
 
     class RpcHandlerImpl : public Handler {
     public:
         explicit RpcHandlerImpl(const CRPCCommand &command)
             : m_command(command), m_wrapped_command(&command) {
             m_command.actor = [this](Config &config,
                                      const JSONRPCRequest &request,
                                      UniValue &result, bool last_handler) {
                 if (!m_wrapped_command) {
                     return false;
                 }
                 try {
                     return m_wrapped_command->actor(config, request, result,
                                                     last_handler);
                 } catch (const UniValue &e) {
                     // If this is not the last handler and a wallet not found
                     // exception was thrown, return false so the next handler
                     // can try to handle the request. Otherwise, reraise the
                     // exception.
                     if (!last_handler) {
                         const UniValue &code = e["code"];
                         if (code.isNum() &&
                             code.get_int() == RPC_WALLET_NOT_FOUND) {
                             return false;
                         }
                     }
                     throw;
                 }
             };
             ::tableRPC.appendCommand(m_command.name, &m_command);
         }
 
         void disconnect() override final {
             if (m_wrapped_command) {
                 m_wrapped_command = nullptr;
                 ::tableRPC.removeCommand(m_command.name, &m_command);
             }
         }
 
         ~RpcHandlerImpl() override { disconnect(); }
 
         CRPCCommand m_command;
         const CRPCCommand *m_wrapped_command;
     };
 
     class ChainImpl : public Chain {
     public:
         explicit ChainImpl(NodeContext &node, const CChainParams &params)
             : m_node(node), m_params(params) {}
         Optional<int> getHeight() override {
             LOCK(::cs_main);
             int height = ::ChainActive().Height();
             if (height >= 0) {
                 return height;
             }
             return nullopt;
         }
         Optional<int> getBlockHeight(const BlockHash &hash) override {
             LOCK(::cs_main);
             CBlockIndex *block = LookupBlockIndex(hash);
             if (block && ::ChainActive().Contains(block)) {
                 return block->nHeight;
             }
             return nullopt;
         }
         BlockHash getBlockHash(int height) override {
             LOCK(::cs_main);
             CBlockIndex *block = ::ChainActive()[height];
             assert(block);
             return block->GetBlockHash();
         }
         bool haveBlockOnDisk(int height) override {
             LOCK(cs_main);
             CBlockIndex *block = ::ChainActive()[height];
             return block && (block->nStatus.hasData() != 0) && block->nTx > 0;
         }
         Optional<int>
         findFirstBlockWithTimeAndHeight(int64_t time, int height,
                                         BlockHash *hash) override {
             LOCK(cs_main);
             CBlockIndex *block =
                 ::ChainActive().FindEarliestAtLeast(time, height);
             if (block) {
                 if (hash) {
                     *hash = block->GetBlockHash();
                 }
                 return block->nHeight;
             }
             return nullopt;
         }
         CBlockLocator getTipLocator() override {
             LOCK(cs_main);
             return ::ChainActive().GetLocator();
         }
         bool contextualCheckTransactionForCurrentBlock(
             const CTransaction &tx, TxValidationState &state) override {
             LockAssertion lock(::cs_main);
             return ContextualCheckTransactionForCurrentBlock(
                 m_params.GetConsensus(), tx, state);
         }
         Optional<int> findLocatorFork(const CBlockLocator &locator) override {
             LOCK(cs_main);
             if (CBlockIndex *fork =
                     FindForkInGlobalIndex(::ChainActive(), locator)) {
                 return fork->nHeight;
             }
             return nullopt;
         }
         bool findBlock(const BlockHash &hash,
                        const FoundBlock &block) override {
             WAIT_LOCK(cs_main, lock);
             return FillBlock(LookupBlockIndex(hash), block, lock);
         }
         bool findFirstBlockWithTimeAndHeight(int64_t min_time, int min_height,
                                              const FoundBlock &block) override {
             WAIT_LOCK(cs_main, lock);
             return FillBlock(
                 ChainActive().FindEarliestAtLeast(min_time, min_height), block,
                 lock);
         }
         bool findNextBlock(const BlockHash &block_hash, int block_height,
                            const FoundBlock &next, bool *reorg) override {
             WAIT_LOCK(cs_main, lock);
             CBlockIndex *block = ChainActive()[block_height];
             if (block && block->GetBlockHash() != block_hash) {
                 block = nullptr;
             }
             if (reorg) {
                 *reorg = !block;
             }
             return FillBlock(block ? ChainActive()[block_height + 1] : nullptr,
                              next, lock);
         }
         bool findAncestorByHeight(const BlockHash &block_hash,
                                   int ancestor_height,
                                   const FoundBlock &ancestor_out) override {
             WAIT_LOCK(cs_main, lock);
             if (const CBlockIndex *block = LookupBlockIndex(block_hash)) {
                 if (const CBlockIndex *ancestor =
                         block->GetAncestor(ancestor_height)) {
                     return FillBlock(ancestor, ancestor_out, lock);
                 }
             }
             return FillBlock(nullptr, ancestor_out, lock);
         }
         bool findAncestorByHash(const BlockHash &block_hash,
                                 const BlockHash &ancestor_hash,
                                 const FoundBlock &ancestor_out) override {
             WAIT_LOCK(cs_main, lock);
             const CBlockIndex *block = LookupBlockIndex(block_hash);
             const CBlockIndex *ancestor = LookupBlockIndex(ancestor_hash);
             if (block && ancestor &&
                 block->GetAncestor(ancestor->nHeight) != ancestor) {
                 ancestor = nullptr;
             }
             return FillBlock(ancestor, ancestor_out, lock);
         }
         bool findCommonAncestor(const BlockHash &block_hash1,
                                 const BlockHash &block_hash2,
                                 const FoundBlock &ancestor_out,
                                 const FoundBlock &block1_out,
                                 const FoundBlock &block2_out) override {
             WAIT_LOCK(cs_main, lock);
             const CBlockIndex *block1 = LookupBlockIndex(block_hash1);
             const CBlockIndex *block2 = LookupBlockIndex(block_hash2);
             const CBlockIndex *ancestor =
                 block1 && block2 ? LastCommonAncestor(block1, block2) : nullptr;
             return FillBlock(ancestor, ancestor_out, lock) &
                    FillBlock(block1, block1_out, lock) &
                    FillBlock(block2, block2_out, lock);
         }
         void findCoins(std::map<COutPoint, Coin> &coins) override {
             return FindCoins(coins);
         }
         double guessVerificationProgress(const BlockHash &block_hash) override {
             LOCK(cs_main);
             return GuessVerificationProgress(Params().TxData(),
                                              LookupBlockIndex(block_hash));
         }
         bool hasBlocks(const BlockHash &block_hash, int min_height,
                        Optional<int> max_height) override {
             // hasBlocks returns true if all ancestors of block_hash in
             // specified range have block data (are not pruned), false if any
             // ancestors in specified range are missing data.
             //
             // For simplicity and robustness, min_height and max_height are only
             // used to limit the range, and passing min_height that's too low or
             // max_height that's too high will not crash or change the result.
             LOCK(::cs_main);
             if (CBlockIndex *block = LookupBlockIndex(block_hash)) {
                 if (max_height && block->nHeight >= *max_height) {
                     block = block->GetAncestor(*max_height);
                 }
                 for (; block->nStatus.hasData(); block = block->pprev) {
                     // Check pprev to not segfault if min_height is too low
                     if (block->nHeight <= min_height || !block->pprev) {
                         return true;
                     }
                 }
             }
             return false;
         }
         bool hasDescendantsInMempool(const TxId &txid) override {
             LOCK(::g_mempool.cs);
             auto it = ::g_mempool.GetIter(txid);
             return it && (*it)->GetCountWithDescendants() > 1;
         }
         bool broadcastTransaction(const Config &config,
                                   const CTransactionRef &tx,
                                   std::string &err_string,
                                   const Amount &max_tx_fee,
                                   bool relay) override {
             const TransactionError err = BroadcastTransaction(
                 m_node, config, tx, err_string, max_tx_fee, relay,
                 /*wait_callback*/ false);
             // Chain clients only care about failures to accept the tx to the
             // mempool. Disregard non-mempool related failures. Note: this will
             // need to be updated if BroadcastTransactions() is updated to
             // return other non-mempool failures that Chain clients do not need
             // to know about.
             return err == TransactionError::OK;
         }
         void getTransactionAncestry(const TxId &txid, size_t &ancestors,
                                     size_t &descendants) override {
             ::g_mempool.GetTransactionAncestry(txid, ancestors, descendants);
         }
         void getPackageLimits(size_t &limit_ancestor_count,
                               size_t &limit_descendant_count) override {
             limit_ancestor_count = size_t(
                 std::max<int64_t>(1, gArgs.GetArg("-limitancestorcount",
                                                   DEFAULT_ANCESTOR_LIMIT)));
             limit_descendant_count = size_t(
                 std::max<int64_t>(1, gArgs.GetArg("-limitdescendantcount",
                                                   DEFAULT_DESCENDANT_LIMIT)));
         }
         bool checkChainLimits(const CTransactionRef &tx) override {
             LockPoints lp;
             CTxMemPoolEntry entry(tx, Amount(), 0, 0, false, 0, lp);
             CTxMemPool::setEntries ancestors;
             auto limit_ancestor_count =
                 gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
             auto limit_ancestor_size =
                 gArgs.GetArg("-limitancestorsize",
                              DEFAULT_ANCESTOR_SIZE_LIMIT) *
                 1000;
             auto limit_descendant_count =
                 gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
             auto limit_descendant_size =
                 gArgs.GetArg("-limitdescendantsize",
                              DEFAULT_DESCENDANT_SIZE_LIMIT) *
                 1000;
             std::string unused_error_string;
             LOCK(::g_mempool.cs);
             return ::g_mempool.CalculateMemPoolAncestors(
                 entry, ancestors, limit_ancestor_count, limit_ancestor_size,
                 limit_descendant_count, limit_descendant_size,
                 unused_error_string);
         }
         CFeeRate estimateFee() const override {
             return ::g_mempool.estimateFee();
         }
         CFeeRate relayMinFee() override { return ::minRelayTxFee; }
         CFeeRate relayDustFee() override { return ::dustRelayFee; }
         bool havePruned() override {
             LOCK(cs_main);
             return ::fHavePruned;
         }
         bool isReadyToBroadcast() override {
             return !::fImporting && !::fReindex && !isInitialBlockDownload();
         }
         bool isInitialBlockDownload() override {
             return ::ChainstateActive().IsInitialBlockDownload();
         }
         bool shutdownRequested() override { return ShutdownRequested(); }
         int64_t getAdjustedTime() override { return GetAdjustedTime(); }
         void initMessage(const std::string &message) override {
             ::uiInterface.InitMessage(message);
         }
         void initWarning(const std::string &message) override {
             InitWarning(message);
         }
         void initError(const std::string &message) override {
             InitError(message);
         }
         void showProgress(const std::string &title, int progress,
                           bool resume_possible) override {
             ::uiInterface.ShowProgress(title, progress, resume_possible);
         }
         std::unique_ptr<Handler> handleNotifications(
             std::shared_ptr<Notifications> notifications) override {
             return std::make_unique<NotificationsHandlerImpl>(
                 std::move(notifications));
         }
         void
         waitForNotificationsIfTipChanged(const BlockHash &old_tip) override {
             if (!old_tip.IsNull()) {
                 LOCK(::cs_main);
                 if (old_tip == ::ChainActive().Tip()->GetBlockHash()) {
                     return;
                 }
             }
             SyncWithValidationInterfaceQueue();
         }
 
         std::unique_ptr<Handler>
         handleRpc(const CRPCCommand &command) override {
             return std::make_unique<RpcHandlerImpl>(command);
         }
         bool rpcEnableDeprecated(const std::string &method) override {
             return IsDeprecatedRPCEnabled(gArgs, method);
         }
         void rpcRunLater(const std::string &name, std::function<void()> fn,
                          int64_t seconds) override {
             RPCRunLater(name, std::move(fn), seconds);
         }
         int rpcSerializationFlags() override { return RPCSerializationFlags(); }
         void requestMempoolTransactions(Notifications &notifications) override {
             LOCK2(::cs_main, ::g_mempool.cs);
             for (const CTxMemPoolEntry &entry : ::g_mempool.mapTx) {
                 notifications.TransactionAddedToMempool(entry.GetSharedTx());
             }
         }
         NodeContext &m_node;
         const CChainParams &m_params;
     };
 
 } // namespace
 
 std::unique_ptr<Chain> MakeChain(NodeContext &node,
                                  const CChainParams &params) {
     return std::make_unique<ChainImpl>(node, params);
 }
 
 } // namespace interfaces
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index d252dd6aa..f9d4520af 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -1,5108 +1,5109 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <net_processing.h>
 
 #include <addrman.h>
 #include <avalanche/processor.h>
 #include <banman.h>
+#include <blockdb.h>
 #include <blockencodings.h>
 #include <blockvalidity.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <config.h>
 #include <consensus/validation.h>
 #include <hash.h>
 #include <merkleblock.h>
 #include <netbase.h>
 #include <netmessagemaker.h>
 #include <policy/fees.h>
 #include <policy/policy.h>
 #include <primitives/block.h>
 #include <primitives/transaction.h>
 #include <random.h>
 #include <reverse_iterator.h>
 #include <scheduler.h>
 #include <tinyformat.h>
 #include <txmempool.h>
 #include <util/strencodings.h>
 #include <util/system.h>
 #include <util/validation.h>
 #include <validation.h>
 
 #include <memory>
 
 #if defined(NDEBUG)
 #error "Bitcoin cannot be compiled without assertions."
 #endif
 
 /** Expiration time for orphan transactions in seconds */
 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
 /** Minimum time between orphan transactions expire time checks in seconds */
 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
 /** How long to cache transactions in mapRelay for normal relay */
 static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME{15 * 60};
 /**
  * Headers download timeout expressed in microseconds.
  * Timeout = base + per_header * (expected number of headers)
  */
 // 15 minutes
 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000;
 // 1ms/header
 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000;
 /**
  * Protect at least this many outbound peers from disconnection due to
  * slow/behind headers chain.
  */
 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
 /**
  * Timeout for (unprotected) outbound peers to sync to our chainwork, in
  * seconds.
  */
 // 20 minutes
 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60;
 /** How frequently to check for stale tips, in seconds */
 // 10 minutes
 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60;
 /**
  * How frequently to check for extra outbound peers and disconnect, in seconds.
  */
 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
 /**
  * Minimum time an outbound-peer-eviction candidate must be connected for, in
  * order to evict, in seconds.
  */
 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
 /** SHA256("main address relay")[0:8] */
 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
 /// Age after which a stale block will no longer be served if requested as
 /// protection against fingerprinting. Set to one month, denominated in seconds.
 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
 /// Age after which a block is considered historical for purposes of rate
 /// limiting block relay. Set to one week, denominated in seconds.
 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
 /** Maximum number of in-flight transactions from a peer */
 static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100;
 /** Maximum number of announced transactions from a peer */
 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ;
 /** How many microseconds to delay requesting transactions from inbound peers */
 static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{
     std::chrono::seconds{2}};
 /**
  * How long to wait (in microseconds) before downloading a transaction from an
  * additional peer.
  */
 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{
     std::chrono::seconds{60}};
 /**
  * Maximum delay (in microseconds) for transaction requests to avoid biasing
  * some peers over others.
  */
 static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{
     std::chrono::seconds{2}};
 /**
  * How long to wait (in microseconds) before expiring an in-flight getdata
  * request to a peer.
  */
 static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL{
     GETDATA_TX_INTERVAL * 10};
 static_assert(INBOUND_PEER_TX_DELAY >= MAX_GETDATA_RANDOM_DELAY,
               "To preserve security, MAX_GETDATA_RANDOM_DELAY should not "
               "exceed INBOUND_PEER_DELAY");
 /**
  * Limit to avoid sending big packets. Not used in processing incoming GETDATA
  * for compatibility.
  */
 static const unsigned int MAX_GETDATA_SZ = 1000;
 
 /// How many non standard orphan do we consider from a node before ignoring it.
 static constexpr uint32_t MAX_NON_STANDARD_ORPHAN_PER_NODE = 5;
 
 struct COrphanTx {
     // When modifying, adapt the copy of this definition in tests/DoS_tests.
     CTransactionRef tx;
     NodeId fromPeer;
     int64_t nTimeExpire;
     size_t list_pos;
 };
 
 RecursiveMutex g_cs_orphans;
 std::map<TxId, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
 
 void EraseOrphansFor(NodeId peer);
 
 /**
  * Average delay between local address broadcasts in seconds.
  */
 static constexpr unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL =
     24 * 60 * 60;
 /**
  * Average delay between peer address broadcasts in seconds.
  */
 static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
 /**
  * Average delay between trickled inventory transmissions in seconds.
  * Blocks and whitelisted receivers bypass this, outbound peers get half this
  * delay.
  */
 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
 /**
  * Maximum number of inventory items to send per transmission.
  * Limits the impact of low-fee transaction floods.
  */
 static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
     7 * INVENTORY_BROADCAST_INTERVAL;
 /**
  * Average delay between feefilter broadcasts in seconds.
  */
 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
 /**
  * Maximum feefilter broadcast delay after significant change.
  */
 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
 
 // Internal stuff
 namespace {
 /** Number of nodes with fSyncStarted. */
 int nSyncStarted GUARDED_BY(cs_main) = 0;
 
 /**
  * Sources of received blocks, saved to be able to send them reject messages or
  * ban them when processing happens afterwards.
  * Set mapBlockSource[hash].second to false if the node should not be punished
  * if the block is invalid.
  */
 std::map<BlockHash, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
 
 /**
  * Filter for transactions that were recently rejected by AcceptToMemoryPool.
  * These are not rerequested until the chain tip changes, at which point the
  * entire filter is reset.
  *
  * Without this filter we'd be re-requesting txs from each of our peers,
  * increasing bandwidth consumption considerably. For instance, with 100 peers,
  * half of which relay a tx we don't accept, that might be a 50x bandwidth
  * increase. A flooding attacker attempting to roll-over the filter using
  * minimum-sized, 60byte, transactions might manage to send 1000/sec if we have
  * fast peers, so we pick 120,000 to give our peers a two minute window to send
  * invs to us.
  *
  * Decreasing the false positive rate is fairly cheap, so we pick one in a
  * million to make it highly unlikely for users to have issues with this filter.
  *
  * Memory used: 1.3 MB
  */
 std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
 uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
 
 /**
  * Blocks that are in flight, and that are in the queue to be downloaded.
  */
 struct QueuedBlock {
     BlockHash hash;
     //! Optional.
     const CBlockIndex *pindex;
     //! Whether this block has validated headers at the time of request.
     bool fValidatedHeaders;
     //! Optional, used for CMPCTBLOCK downloads
     std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
 };
 std::map<BlockHash, std::pair<NodeId, std::list<QueuedBlock>::iterator>>
     mapBlocksInFlight GUARDED_BY(cs_main);
 
 /** Stack of nodes which we have set to announce using compact blocks */
 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
 
 /** Number of preferable block download peers. */
 int nPreferredDownload GUARDED_BY(cs_main) = 0;
 
 /** Number of peers from which we're downloading blocks. */
 int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
 
 /** Number of outbound peers with m_chain_sync.m_protect. */
 int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
 
 /** When our tip was last updated. */
 std::atomic<int64_t> g_last_tip_update(0);
 
 /** Relay map. */
 typedef std::map<uint256, CTransactionRef> MapRelay;
 MapRelay mapRelay GUARDED_BY(cs_main);
 /**
  * Expiration-time ordered list of (expire time, relay map entry) pairs,
  * protected by cs_main).
  */
 std::deque<std::pair<int64_t, MapRelay::iterator>>
     vRelayExpiration GUARDED_BY(cs_main);
 
 struct IteratorComparator {
     template <typename I> bool operator()(const I &a, const I &b) const {
         return &(*a) < &(*b);
     }
 };
 std::map<COutPoint,
          std::set<std::map<TxId, COrphanTx>::iterator, IteratorComparator>>
     mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
 
 //! For random eviction
 std::vector<std::map<TxId, COrphanTx>::iterator>
     g_orphan_list GUARDED_BY(g_cs_orphans);
 
 static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
 static std::vector<std::pair<TxHash, CTransactionRef>>
     vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
 } // namespace
 
 namespace {
 struct CBlockReject {
     uint8_t chRejectCode;
     std::string strRejectReason;
     uint256 hashBlock;
 };
 
 /**
  * Maintain validation-specific state about nodes, protected by cs_main, instead
  * by CNode's own locks. This simplifies asynchronous operation, where
  * processing of incoming data is done after the ProcessMessage call returns,
  * and we're no longer holding the node's locks.
  */
 struct CNodeState {
     //! The peer's address
     const CService address;
     //! Whether we have a fully established connection.
     bool fCurrentlyConnected;
     //! Accumulated misbehaviour score for this peer.
     int nMisbehavior;
     //! Whether this peer should be disconnected and marked as discouraged
     //! (unless whitelisted with noban).
     bool m_should_discourage;
     //! String name of this peer (debugging/logging purposes).
     const std::string name;
     //! List of asynchronously-determined block rejections to notify this peer
     //! about.
     std::vector<CBlockReject> rejects;
     //! The best known block we know this peer has announced.
     const CBlockIndex *pindexBestKnownBlock;
     //! The hash of the last unknown block this peer has announced.
     BlockHash hashLastUnknownBlock;
     //! The last full block we both have.
     const CBlockIndex *pindexLastCommonBlock;
     //! The best header we have sent our peer.
     const CBlockIndex *pindexBestHeaderSent;
     //! Length of current-streak of unconnecting headers announcements
     int nUnconnectingHeaders;
     //! Whether we've started headers synchronization with this peer.
     bool fSyncStarted;
     //! When to potentially disconnect peer for stalling headers download
     int64_t nHeadersSyncTimeout;
     //! Since when we're stalling block download progress (in microseconds), or
     //! 0.
     int64_t nStallingSince;
     std::list<QueuedBlock> vBlocksInFlight;
     //! When the first entry in vBlocksInFlight started downloading. Don't care
     //! when vBlocksInFlight is empty.
     int64_t nDownloadingSince;
     int nBlocksInFlight;
     int nBlocksInFlightValidHeaders;
     //! Whether we consider this a preferred download peer.
     bool fPreferredDownload;
     //! Whether this peer wants invs or headers (when possible) for block
     //! announcements.
     bool fPreferHeaders;
     //! Whether this peer wants invs or cmpctblocks (when possible) for block
     //! announcements.
     bool fPreferHeaderAndIDs;
     /**
      * Whether this peer will send us cmpctblocks if we request them.
      * This is not used to gate request logic, as we really only care about
      * fSupportsDesiredCmpctVersion, but is used as a flag to "lock in" the
      * version of compact blocks we send.
      */
     bool fProvidesHeaderAndIDs;
     /**
      * If we've announced NODE_WITNESS to this peer: whether the peer sends
      * witnesses in cmpctblocks/blocktxns, otherwise: whether this peer sends
      * non-witnesses in cmpctblocks/blocktxns.
      */
     bool fSupportsDesiredCmpctVersion;
 
     /**
      * State used to enforce CHAIN_SYNC_TIMEOUT
      * Only in effect for outbound, non-manual, full-relay connections, with
      * m_protect == false
      * Algorithm: if a peer's best known block has less work than our tip, set
      * a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
      *   - If at timeout their best known block now has more work than our tip
      * when the timeout was set, then either reset the timeout or clear it
      * (after comparing against our current tip's work)
      *   - If at timeout their best known block still has less work than our tip
      * did when the timeout was set, then send a getheaders message, and set a
      * shorter timeout, HEADERS_RESPONSE_TIME seconds in future. If their best
      * known block is still behind when that new timeout is reached, disconnect.
      */
     struct ChainSyncTimeoutState {
         //! A timeout used for checking whether our peer has sufficiently
         //! synced.
         int64_t m_timeout;
         //! A header with the work we require on our peer's chain.
         const CBlockIndex *m_work_header;
         //! After timeout is reached, set to true after sending getheaders.
         bool m_sent_getheaders;
         //! Whether this peer is protected from disconnection due to a bad/slow
         //! chain.
         bool m_protect;
     };
 
     ChainSyncTimeoutState m_chain_sync;
 
     //! Time of last new block announcement
     int64_t m_last_block_announcement;
 
     /*
      * State associated with transaction download.
      *
      * Tx download algorithm:
      *
      *   When inv comes in, queue up (process_time, txid) inside the peer's
      *   CNodeState (m_tx_process_time) as long as m_tx_announced for the peer
      *   isn't too big (MAX_PEER_TX_ANNOUNCEMENTS).
      *
      *   The process_time for a transaction is set to nNow for outbound peers,
      *   nNow + 2 seconds for inbound peers. This is the time at which we'll
      *   consider trying to request the transaction from the peer in
      *   SendMessages(). The delay for inbound peers is to allow outbound peers
      *   a chance to announce before we request from inbound peers, to prevent
      *   an adversary from using inbound connections to blind us to a
      *   transaction (InvBlock).
      *
      *   When we call SendMessages() for a given peer,
      *   we will loop over the transactions in m_tx_process_time, looking
      *   at the transactions whose process_time <= nNow. We'll request each
      *   such transaction that we don't have already and that hasn't been
      *   requested from another peer recently, up until we hit the
      *   MAX_PEER_TX_IN_FLIGHT limit for the peer. Then we'll update
      *   g_already_asked_for for each requested txid, storing the time of the
      *   GETDATA request. We use g_already_asked_for to coordinate transaction
      *   requests amongst our peers.
      *
      *   For transactions that we still need but we have already recently
      *   requested from some other peer, we'll reinsert (process_time, txid)
      *   back into the peer's m_tx_process_time at the point in the future at
      *   which the most recent GETDATA request would time out (ie
      *   GETDATA_TX_INTERVAL + the request time stored in g_already_asked_for).
      *   We add an additional delay for inbound peers, again to prefer
      *   attempting download from outbound peers first.
      *   We also add an extra small random delay up to 2 seconds
      *   to avoid biasing some peers over others. (e.g., due to fixed ordering
      *   of peer processing in ThreadMessageHandler).
      *
      *   When we receive a transaction from a peer, we remove the txid from the
      *   peer's m_tx_in_flight set and from their recently announced set
      *   (m_tx_announced).  We also clear g_already_asked_for for that entry, so
      *   that if somehow the transaction is not accepted but also not added to
      *   the reject filter, then we will eventually redownload from other
      *   peers.
      */
     struct TxDownloadState {
         /**
          * Track when to attempt download of announced transactions (process
          * time in micros -> txid)
          */
         std::multimap<std::chrono::microseconds, TxId> m_tx_process_time;
 
         //! Store all the transactions a peer has recently announced
         std::set<TxId> m_tx_announced;
 
         //! Store transactions which were requested by us, with timestamp
         std::map<TxId, std::chrono::microseconds> m_tx_in_flight;
 
         //! Periodically check for stuck getdata requests
         std::chrono::microseconds m_check_expiry_timer{0};
     };
 
     TxDownloadState m_tx_download;
 
     struct AvalancheState {
         std::chrono::time_point<std::chrono::steady_clock> last_poll;
     };
 
     AvalancheState m_avalanche_state;
 
     //! Whether this peer is an inbound connection
     bool m_is_inbound;
 
     //! Whether this peer is a manual connection
     bool m_is_manual_connection;
 
     CNodeState(CAddress addrIn, std::string addrNameIn, bool is_inbound,
                bool is_manual)
         : address(addrIn), name(std::move(addrNameIn)),
           m_is_inbound(is_inbound), m_is_manual_connection(is_manual) {
         fCurrentlyConnected = false;
         nMisbehavior = 0;
         m_should_discourage = false;
         pindexBestKnownBlock = nullptr;
         hashLastUnknownBlock = BlockHash();
         pindexLastCommonBlock = nullptr;
         pindexBestHeaderSent = nullptr;
         nUnconnectingHeaders = 0;
         fSyncStarted = false;
         nHeadersSyncTimeout = 0;
         nStallingSince = 0;
         nDownloadingSince = 0;
         nBlocksInFlight = 0;
         nBlocksInFlightValidHeaders = 0;
         fPreferredDownload = false;
         fPreferHeaders = false;
         fPreferHeaderAndIDs = false;
         fProvidesHeaderAndIDs = false;
         fSupportsDesiredCmpctVersion = false;
         m_chain_sync = {0, nullptr, false, false};
         m_last_block_announcement = 0;
     }
 };
 
 // Keeps track of the time (in microseconds) when transactions were requested
 // last time
 limitedmap<TxId, std::chrono::microseconds>
     g_already_asked_for GUARDED_BY(cs_main)(MAX_INV_SZ);
 
 /** Map maintaining per-node state. */
 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
 
 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
     if (it == mapNodeState.end()) {
         return nullptr;
     }
 
     return &it->second;
 }
 
 static void UpdatePreferredDownload(CNode *node, CNodeState *state)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     nPreferredDownload -= state->fPreferredDownload;
 
     // Whether this node should be marked as a preferred download node.
     state->fPreferredDownload =
         (!node->fInbound || node->HasPermission(PF_NOBAN)) && !node->fOneShot &&
         !node->fClient;
 
     nPreferredDownload += state->fPreferredDownload;
 }
 
 static void PushNodeVersion(const Config &config, CNode *pnode,
                             CConnman *connman, int64_t nTime) {
     ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
     uint64_t nonce = pnode->GetLocalNonce();
     int nNodeStartingHeight = pnode->GetMyStartingHeight();
     NodeId nodeid = pnode->GetId();
     CAddress addr = pnode->addr;
 
     CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr)
                             ? addr
                             : CAddress(CService(), addr.nServices));
     CAddress addrMe = CAddress(CService(), nLocalNodeServices);
 
     connman->PushMessage(
         pnode, CNetMsgMaker(INIT_PROTO_VERSION)
                    .Make(NetMsgType::VERSION, PROTOCOL_VERSION,
                          uint64_t(nLocalNodeServices), nTime, addrYou, addrMe,
                          nonce, userAgent(config), nNodeStartingHeight,
                          ::g_relay_txes && pnode->m_tx_relay != nullptr));
 
     if (fLogIPs) {
         LogPrint(BCLog::NET,
                  "send version message: version %d, blocks=%d, us=%s, them=%s, "
                  "peer=%d\n",
                  PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(),
                  addrYou.ToString(), nodeid);
     } else {
         LogPrint(
             BCLog::NET,
             "send version message: version %d, blocks=%d, us=%s, peer=%d\n",
             PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
     }
     LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
 }
 
 // Returns a bool indicating whether we requested this block.
 // Also used if a block was /not/ received and timed out or started with another
 // peer.
 static bool MarkBlockAsReceived(const BlockHash &hash)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     std::map<BlockHash,
              std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
         itInFlight = mapBlocksInFlight.find(hash);
     if (itInFlight != mapBlocksInFlight.end()) {
         CNodeState *state = State(itInFlight->second.first);
         assert(state != nullptr);
         state->nBlocksInFlightValidHeaders -=
             itInFlight->second.second->fValidatedHeaders;
         if (state->nBlocksInFlightValidHeaders == 0 &&
             itInFlight->second.second->fValidatedHeaders) {
             // Last validated block on the queue was received.
             nPeersWithValidatedDownloads--;
         }
         if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
             // First block on the queue was received, update the start download
             // time for the next one
             state->nDownloadingSince =
                 std::max(state->nDownloadingSince, GetTimeMicros());
         }
         state->vBlocksInFlight.erase(itInFlight->second.second);
         state->nBlocksInFlight--;
         state->nStallingSince = 0;
         mapBlocksInFlight.erase(itInFlight);
         return true;
     }
 
     return false;
 }
 
 // returns false, still setting pit, if the block was already in flight from the
 // same peer
 // pit will only be valid as long as the same cs_main lock is being held.
 static bool
 MarkBlockAsInFlight(const Config &config, NodeId nodeid, const BlockHash &hash,
                     const Consensus::Params &consensusParams,
                     const CBlockIndex *pindex = nullptr,
                     std::list<QueuedBlock>::iterator **pit = nullptr)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Short-circuit most stuff in case it is from the same node.
     std::map<BlockHash,
              std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
         itInFlight = mapBlocksInFlight.find(hash);
     if (itInFlight != mapBlocksInFlight.end() &&
         itInFlight->second.first == nodeid) {
         if (pit) {
             *pit = &itInFlight->second.second;
         }
         return false;
     }
 
     // Make sure it's not listed somewhere already.
     MarkBlockAsReceived(hash);
 
     std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
         state->vBlocksInFlight.end(),
         {hash, pindex, pindex != nullptr,
          std::unique_ptr<PartiallyDownloadedBlock>(
              pit ? new PartiallyDownloadedBlock(config, &g_mempool)
                  : nullptr)});
     state->nBlocksInFlight++;
     state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
     if (state->nBlocksInFlight == 1) {
         // We're starting a block download (batch) from this peer.
         state->nDownloadingSince = GetTimeMicros();
     }
 
     if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
         nPeersWithValidatedDownloads++;
     }
 
     itInFlight = mapBlocksInFlight
                      .insert(std::make_pair(hash, std::make_pair(nodeid, it)))
                      .first;
 
     if (pit) {
         *pit = &itInFlight->second.second;
     }
 
     return true;
 }
 
 /** Check whether the last unknown block a peer advertised is not yet known. */
 static void ProcessBlockAvailability(NodeId nodeid)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     if (!state->hashLastUnknownBlock.IsNull()) {
         const CBlockIndex *pindex =
             LookupBlockIndex(state->hashLastUnknownBlock);
         if (pindex && pindex->nChainWork > 0) {
             if (state->pindexBestKnownBlock == nullptr ||
                 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
                 state->pindexBestKnownBlock = pindex;
             }
             state->hashLastUnknownBlock.SetNull();
         }
     }
 }
 
 /** Update tracking information about which blocks a peer is assumed to have. */
 static void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     ProcessBlockAvailability(nodeid);
 
     const CBlockIndex *pindex = LookupBlockIndex(hash);
     if (pindex && pindex->nChainWork > 0) {
         // An actually better block was announced.
         if (state->pindexBestKnownBlock == nullptr ||
             pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
             state->pindexBestKnownBlock = pindex;
         }
     } else {
         // An unknown block was announced; just assume that the latest one is
         // the best one.
         state->hashLastUnknownBlock = hash;
     }
 }
 
 /**
  * When a peer sends us a valid block, instruct it to announce blocks to us
  * using CMPCTBLOCK if possible by adding its nodeid to the end of
  * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
  * removing the first element if necessary.
  */
 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid,
                                                  CConnman *connman)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     CNodeState *nodestate = State(nodeid);
     if (!nodestate) {
         LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
         return;
     }
     if (!nodestate->fProvidesHeaderAndIDs) {
         return;
     }
     for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
          it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
         if (*it == nodeid) {
             lNodesAnnouncingHeaderAndIDs.erase(it);
             lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
             return;
         }
     }
     connman->ForNode(nodeid, [&connman](CNode *pfrom) {
         AssertLockHeld(cs_main);
         uint64_t nCMPCTBLOCKVersion = 1;
         if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
             // As per BIP152, we only get 3 of our peers to announce
             // blocks using compact encodings.
             connman->ForNode(
                 lNodesAnnouncingHeaderAndIDs.front(),
                 [&connman, nCMPCTBLOCKVersion](CNode *pnodeStop) {
                     AssertLockHeld(cs_main);
                     connman->PushMessage(
                         pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion())
                                        .Make(NetMsgType::SENDCMPCT,
                                              /*fAnnounceUsingCMPCTBLOCK=*/false,
                                              nCMPCTBLOCKVersion));
                     return true;
                 });
             lNodesAnnouncingHeaderAndIDs.pop_front();
         }
         connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion())
                                         .Make(NetMsgType::SENDCMPCT,
                                               /*fAnnounceUsingCMPCTBLOCK=*/true,
                                               nCMPCTBLOCKVersion));
         lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
         return true;
     });
 }
 
 static bool TipMayBeStale(const Consensus::Params &consensusParams)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     if (g_last_tip_update == 0) {
         g_last_tip_update = GetTime();
     }
     return g_last_tip_update <
                GetTime() - consensusParams.nPowTargetSpacing * 3 &&
            mapBlocksInFlight.empty();
 }
 
 static bool CanDirectFetch(const Consensus::Params &consensusParams)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     return ::ChainActive().Tip()->GetBlockTime() >
            GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
 }
 
 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     if (state->pindexBestKnownBlock &&
         pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
         return true;
     }
     if (state->pindexBestHeaderSent &&
         pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
         return true;
     }
     return false;
 }
 
 /**
  * Update pindexLastCommonBlock and add not-in-flight missing successors to
  * vBlocks, until it has at most count entries.
  */
 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count,
                                      std::vector<const CBlockIndex *> &vBlocks,
                                      NodeId &nodeStaller,
                                      const Consensus::Params &consensusParams)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     if (count == 0) {
         return;
     }
 
     vBlocks.reserve(vBlocks.size() + count);
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Make sure pindexBestKnownBlock is up to date, we'll need it.
     ProcessBlockAvailability(nodeid);
 
     if (state->pindexBestKnownBlock == nullptr ||
         state->pindexBestKnownBlock->nChainWork <
             ::ChainActive().Tip()->nChainWork ||
         state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
         // This peer has nothing interesting.
         return;
     }
 
     if (state->pindexLastCommonBlock == nullptr) {
         // Bootstrap quickly by guessing a parent of our best tip is the forking
         // point. Guessing wrong in either direction is not a problem.
         state->pindexLastCommonBlock = ::ChainActive()[std::min(
             state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())];
     }
 
     // If the peer reorganized, our previous pindexLastCommonBlock may not be an
     // ancestor of its current tip anymore. Go back enough to fix that.
     state->pindexLastCommonBlock = LastCommonAncestor(
         state->pindexLastCommonBlock, state->pindexBestKnownBlock);
     if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
         return;
     }
 
     std::vector<const CBlockIndex *> vToFetch;
     const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
     // Never fetch further than the best block we know the peer has, or more
     // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
     // common with this peer. The +1 is so we can detect stalling, namely if we
     // would be able to download that next block if the window were 1 larger.
     int nWindowEnd =
         state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
     int nMaxHeight =
         std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
     NodeId waitingfor = -1;
     while (pindexWalk->nHeight < nMaxHeight) {
         // Read up to 128 (or more, if more blocks than that are needed)
         // successors of pindexWalk (towards pindexBestKnownBlock) into
         // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
         // expensive as iterating over ~100 CBlockIndex* entries anyway.
         int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
                                 std::max<int>(count - vBlocks.size(), 128));
         vToFetch.resize(nToFetch);
         pindexWalk = state->pindexBestKnownBlock->GetAncestor(
             pindexWalk->nHeight + nToFetch);
         vToFetch[nToFetch - 1] = pindexWalk;
         for (unsigned int i = nToFetch - 1; i > 0; i--) {
             vToFetch[i - 1] = vToFetch[i]->pprev;
         }
 
         // Iterate over those blocks in vToFetch (in forward direction), adding
         // the ones that are not yet downloaded and not in flight to vBlocks. In
         // the meantime, update pindexLastCommonBlock as long as all ancestors
         // are already downloaded, or if it's already part of our chain (and
         // therefore don't need it even if pruned).
         for (const CBlockIndex *pindex : vToFetch) {
             if (!pindex->IsValid(BlockValidity::TREE)) {
                 // We consider the chain that this peer is on invalid.
                 return;
             }
             if (pindex->nStatus.hasData() || ::ChainActive().Contains(pindex)) {
                 if (pindex->HaveTxsDownloaded()) {
                     state->pindexLastCommonBlock = pindex;
                 }
             } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
                 // The block is not already downloaded, and not yet in flight.
                 if (pindex->nHeight > nWindowEnd) {
                     // We reached the end of the window.
                     if (vBlocks.size() == 0 && waitingfor != nodeid) {
                         // We aren't able to fetch anything, but we would be if
                         // the download window was one larger.
                         nodeStaller = waitingfor;
                     }
                     return;
                 }
                 vBlocks.push_back(pindex);
                 if (vBlocks.size() == count) {
                     return;
                 }
             } else if (waitingfor == -1) {
                 // This is the first already-in-flight block.
                 waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
             }
         }
     }
 }
 
 void EraseTxRequest(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     g_already_asked_for.erase(txid);
 }
 
 std::chrono::microseconds GetTxRequestTime(const TxId &txid)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     auto it = g_already_asked_for.find(txid);
     if (it != g_already_asked_for.end()) {
         return it->second;
     }
     return {};
 }
 
 void UpdateTxRequestTime(const TxId &txid,
                          std::chrono::microseconds request_time)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     auto it = g_already_asked_for.find(txid);
     if (it == g_already_asked_for.end()) {
         g_already_asked_for.insert(std::make_pair(txid, request_time));
     } else {
         g_already_asked_for.update(it, request_time);
     }
 }
 
 std::chrono::microseconds
 CalculateTxGetDataTime(const TxId &txid, std::chrono::microseconds current_time,
                        bool use_inbound_delay)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     std::chrono::microseconds process_time;
     const auto last_request_time = GetTxRequestTime(txid);
     // First time requesting this tx
     if (last_request_time.count() == 0) {
         process_time = current_time;
     } else {
         // Randomize the delay to avoid biasing some peers over others (such as
         // due to fixed ordering of peer processing in ThreadMessageHandler)
         process_time = last_request_time + GETDATA_TX_INTERVAL +
                        GetRandMicros(MAX_GETDATA_RANDOM_DELAY);
     }
 
     // We delay processing announcements from inbound peers
     if (use_inbound_delay) {
         process_time += INBOUND_PEER_TX_DELAY;
     }
 
     return process_time;
 }
 
 void RequestTx(CNodeState *state, const TxId &txid,
                std::chrono::microseconds current_time)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     CNodeState::TxDownloadState &peer_download_state = state->m_tx_download;
     if (peer_download_state.m_tx_announced.size() >=
             MAX_PEER_TX_ANNOUNCEMENTS ||
         peer_download_state.m_tx_process_time.size() >=
             MAX_PEER_TX_ANNOUNCEMENTS ||
         peer_download_state.m_tx_announced.count(txid)) {
         // Too many queued announcements from this peer, or we already have
         // this announcement
         return;
     }
     peer_download_state.m_tx_announced.insert(txid);
 
     // Calculate the time to try requesting this transaction. Use
     // fPreferredDownload as a proxy for outbound peers.
     const auto process_time =
         CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload);
 
     peer_download_state.m_tx_process_time.emplace(process_time, txid);
 }
 
 } // namespace
 
 // This function is used for testing the stale tip eviction logic, see
 // denialofservice_tests.cpp
 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) {
     LOCK(cs_main);
     CNodeState *state = State(node);
     if (state) {
         state->m_last_block_announcement = time_in_seconds;
     }
 }
 
 // Returns true for outbound peers, excluding manual connections, feelers, and
 // one-shots.
 static bool IsOutboundDisconnectionCandidate(const CNode *node) {
     return !(node->fInbound || node->m_manual_connection || node->fFeeler ||
              node->fOneShot);
 }
 
 void PeerLogicValidation::InitializeNode(const Config &config, CNode *pnode) {
     CAddress addr = pnode->addr;
     std::string addrName = pnode->GetAddrName();
     NodeId nodeid = pnode->GetId();
     {
         LOCK(cs_main);
         mapNodeState.emplace_hint(
             mapNodeState.end(), std::piecewise_construct,
             std::forward_as_tuple(nodeid),
             std::forward_as_tuple(addr, std::move(addrName), pnode->fInbound,
                                   pnode->m_manual_connection));
     }
     if (!pnode->fInbound) {
         PushNodeVersion(config, pnode, connman, GetTime());
     }
 }
 
 void PeerLogicValidation::FinalizeNode(const Config &config, NodeId nodeid,
                                        bool &fUpdateConnectionTime) {
     fUpdateConnectionTime = false;
     LOCK(cs_main);
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     if (state->fSyncStarted) {
         nSyncStarted--;
     }
 
     if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
         fUpdateConnectionTime = true;
     }
 
     for (const QueuedBlock &entry : state->vBlocksInFlight) {
         mapBlocksInFlight.erase(entry.hash);
     }
     EraseOrphansFor(nodeid);
     nPreferredDownload -= state->fPreferredDownload;
     nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
     assert(nPeersWithValidatedDownloads >= 0);
     g_outbound_peers_with_protect_from_disconnect -=
         state->m_chain_sync.m_protect;
     assert(g_outbound_peers_with_protect_from_disconnect >= 0);
 
     mapNodeState.erase(nodeid);
 
     if (mapNodeState.empty()) {
         // Do a consistency check after the last peer is removed.
         assert(mapBlocksInFlight.empty());
         assert(nPreferredDownload == 0);
         assert(nPeersWithValidatedDownloads == 0);
         assert(g_outbound_peers_with_protect_from_disconnect == 0);
     }
     LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
 }
 
 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
     LOCK(cs_main);
     CNodeState *state = State(nodeid);
     if (state == nullptr) {
         return false;
     }
     stats.nMisbehavior = state->nMisbehavior;
     stats.nSyncHeight =
         state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
     stats.nCommonHeight = state->pindexLastCommonBlock
                               ? state->pindexLastCommonBlock->nHeight
                               : -1;
     for (const QueuedBlock &queue : state->vBlocksInFlight) {
         if (queue.pindex) {
             stats.vHeightInFlight.push_back(queue.pindex->nHeight);
         }
     }
     return true;
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // mapOrphanTransactions
 //
 
 static void AddToCompactExtraTransactions(const CTransactionRef &tx)
     EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) {
     size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn",
                                         DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
     if (max_extra_txn <= 0) {
         return;
     }
 
     if (!vExtraTxnForCompact.size()) {
         vExtraTxnForCompact.resize(max_extra_txn);
     }
 
     vExtraTxnForCompact[vExtraTxnForCompactIt] =
         std::make_pair(tx->GetHash(), tx);
     vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
 }
 
 bool AddOrphanTx(const CTransactionRef &tx, NodeId peer)
     EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) {
     const TxId &txid = tx->GetId();
     if (mapOrphanTransactions.count(txid)) {
         return false;
     }
 
     // Ignore big transactions, to avoid a send-big-orphans memory exhaustion
     // attack. If a peer has a legitimate large transaction with a missing
     // parent then we assume it will rebroadcast it later, after the parent
     // transaction(s) have been mined or received.
     // 100 orphans, each of which is at most 100,000 bytes big is at most 10
     // megabytes of orphans and somewhat more byprev index (in the worst case):
     unsigned int sz = tx->GetTotalSize();
     if (sz > MAX_STANDARD_TX_SIZE) {
         LogPrint(BCLog::MEMPOOL,
                  "ignoring large orphan tx (size: %u, hash: %s)\n", sz,
                  txid.ToString());
         return false;
     }
 
     auto ret = mapOrphanTransactions.emplace(
         txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME,
                         g_orphan_list.size()});
     assert(ret.second);
     g_orphan_list.push_back(ret.first);
     for (const CTxIn &txin : tx->vin) {
         mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
     }
 
     AddToCompactExtraTransactions(tx);
 
     LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n",
              txid.ToString(), mapOrphanTransactions.size(),
              mapOrphanTransactionsByPrev.size());
     return true;
 }
 
 static int EraseOrphanTx(const TxId id) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) {
     const auto it = mapOrphanTransactions.find(id);
     if (it == mapOrphanTransactions.end()) {
         return 0;
     }
     for (const CTxIn &txin : it->second.tx->vin) {
         const auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
         if (itPrev == mapOrphanTransactionsByPrev.end()) {
             continue;
         }
         itPrev->second.erase(it);
         if (itPrev->second.empty()) {
             mapOrphanTransactionsByPrev.erase(itPrev);
         }
     }
 
     size_t old_pos = it->second.list_pos;
     assert(g_orphan_list[old_pos] == it);
     if (old_pos + 1 != g_orphan_list.size()) {
         // Unless we're deleting the last entry in g_orphan_list, move the last
         // entry to the position we're deleting.
         auto it_last = g_orphan_list.back();
         g_orphan_list[old_pos] = it_last;
         it_last->second.list_pos = old_pos;
     }
     g_orphan_list.pop_back();
 
     mapOrphanTransactions.erase(it);
     return 1;
 }
 
 void EraseOrphansFor(NodeId peer) {
     LOCK(g_cs_orphans);
     int nErased = 0;
     auto iter = mapOrphanTransactions.begin();
     while (iter != mapOrphanTransactions.end()) {
         // Increment to avoid iterator becoming invalid.
         const auto maybeErase = iter++;
         if (maybeErase->second.fromPeer == peer) {
             nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
         }
     }
     if (nErased > 0) {
         LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased,
                  peer);
     }
 }
 
 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) {
     LOCK(g_cs_orphans);
 
     unsigned int nEvicted = 0;
     static int64_t nNextSweep;
     int64_t nNow = GetTime();
     if (nNextSweep <= nNow) {
         // Sweep out expired orphan pool entries:
         int nErased = 0;
         int64_t nMinExpTime =
             nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
         auto iter = mapOrphanTransactions.begin();
         while (iter != mapOrphanTransactions.end()) {
             const auto maybeErase = iter++;
             if (maybeErase->second.nTimeExpire <= nNow) {
                 nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
             } else {
                 nMinExpTime =
                     std::min(maybeErase->second.nTimeExpire, nMinExpTime);
             }
         }
         // Sweep again 5 minutes after the next entry that expires in order to
         // batch the linear scan.
         nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
         if (nErased > 0) {
             LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n",
                      nErased);
         }
     }
     FastRandomContext rng;
     while (mapOrphanTransactions.size() > nMaxOrphans) {
         // Evict a random orphan:
         size_t randompos = rng.randrange(g_orphan_list.size());
         EraseOrphanTx(g_orphan_list[randompos]->first);
         ++nEvicted;
     }
     return nEvicted;
 }
 
 /**
  * Mark a misbehaving peer to be banned depending upon the value of `-banscore`.
  */
 void Misbehaving(NodeId pnode, int howmuch, const std::string &reason)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     if (howmuch == 0) {
         return;
     }
 
     CNodeState *state = State(pnode);
     if (state == nullptr) {
         return;
     }
 
     state->nMisbehavior += howmuch;
     int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
     if (state->nMisbehavior >= banscore &&
         state->nMisbehavior - howmuch < banscore) {
         LogPrintf(
             "%s: %s peer=%d (%d -> %d) reason: %s BAN THRESHOLD EXCEEDED\n",
             __func__, state->name, pnode, state->nMisbehavior - howmuch,
             state->nMisbehavior, reason);
         state->m_should_discourage = true;
     } else {
         LogPrintf("%s: %s peer=%d (%d -> %d) reason: %s\n", __func__,
                   state->name, pnode, state->nMisbehavior - howmuch,
                   state->nMisbehavior, reason);
     }
 }
 
 // overloaded variant of above to operate on CNode*s
 static void Misbehaving(CNode *node, int howmuch, const std::string &reason)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     Misbehaving(node->GetId(), howmuch, reason);
 }
 
 /**
  * Returns true if the given validation state result may result in a peer
  * banning/disconnecting us. We use this to determine which unaccepted
  * transactions from a whitelisted peer that we can safely relay.
  */
 static bool TxRelayMayResultInDisconnect(const TxValidationState &state) {
     return state.GetResult() == TxValidationResult::TX_CONSENSUS;
 }
 
 /**
  * Potentially ban a node based on the contents of a BlockValidationState object
  *
  * @param[in] via_compact_block: this bool is passed in because net_processing
  * should punish peers differently depending on whether the data was provided in
  * a compact block message or not. If the compact block had a valid header, but
  * contained invalid txs, the peer should not be punished. See BIP 152.
  *
  * @return Returns true if the peer was punished (probably disconnected)
  */
 static bool MaybePunishNodeForBlock(NodeId nodeid,
                                     const BlockValidationState &state,
                                     bool via_compact_block,
                                     const std::string &message = "") {
     switch (state.GetResult()) {
         case BlockValidationResult::BLOCK_RESULT_UNSET:
             break;
         // The node is providing invalid data:
         case BlockValidationResult::BLOCK_CONSENSUS:
         case BlockValidationResult::BLOCK_MUTATED:
             if (!via_compact_block) {
                 LOCK(cs_main);
                 Misbehaving(nodeid, 100, message);
                 return true;
             }
             break;
         case BlockValidationResult::BLOCK_CACHED_INVALID: {
             LOCK(cs_main);
             CNodeState *node_state = State(nodeid);
             if (node_state == nullptr) {
                 break;
             }
 
             // Ban outbound (but not inbound) peers if on an invalid chain.
             // Exempt HB compact block peers and manual connections.
             if (!via_compact_block && !node_state->m_is_inbound &&
                 !node_state->m_is_manual_connection) {
                 Misbehaving(nodeid, 100, message);
                 return true;
             }
             break;
         }
         case BlockValidationResult::BLOCK_INVALID_HEADER:
         case BlockValidationResult::BLOCK_CHECKPOINT:
         case BlockValidationResult::BLOCK_INVALID_PREV: {
             LOCK(cs_main);
             Misbehaving(nodeid, 100, message);
         }
             return true;
         case BlockValidationResult::BLOCK_FINALIZATION: {
             // TODO: Use the state object to report this is probably not the
             // best idea. This is effectively unreachable, unless there is a bug
             // somewhere.
             LOCK(cs_main);
             Misbehaving(nodeid, 20, message);
         }
             return true;
         // Conflicting (but not necessarily invalid) data or different policy:
         case BlockValidationResult::BLOCK_MISSING_PREV: {
             // TODO: Handle this much more gracefully (10 DoS points is super
             // arbitrary)
             LOCK(cs_main);
             Misbehaving(nodeid, 10, message);
         }
             return true;
         case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE:
         case BlockValidationResult::BLOCK_TIME_FUTURE:
             break;
     }
     if (message != "") {
         LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
     }
     return false;
 }
 
 /**
  * Potentially ban a node based on the contents of a TxValidationState object
  *
  * @return Returns true if the peer was punished (probably disconnected)
  *
  * Changes here may need to be reflected in TxRelayMayResultInDisconnect().
  */
 static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state,
                                  const std::string &message = "") {
     switch (state.GetResult()) {
         case TxValidationResult::TX_RESULT_UNSET:
             break;
         // The node is providing invalid data:
         case TxValidationResult::TX_CONSENSUS: {
             LOCK(cs_main);
             Misbehaving(nodeid, 100, message);
             return true;
         }
         // Conflicting (but not necessarily invalid) data or different policy:
         case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
         case TxValidationResult::TX_NOT_STANDARD:
         case TxValidationResult::TX_MISSING_INPUTS:
         case TxValidationResult::TX_PREMATURE_SPEND:
         case TxValidationResult::TX_CONFLICT:
         case TxValidationResult::TX_MEMPOOL_POLICY:
             break;
     }
     if (message != "") {
         LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
     }
     return false;
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // blockchain -> download logic notification
 //
 
 // To prevent fingerprinting attacks, only send blocks/headers outside of the
 // active chain if they are no more than a month older (both in time, and in
 // best equivalent proof of work) than the best header chain we know about and
 // we fully-validated them at some point.
 static bool BlockRequestAllowed(const CBlockIndex *pindex,
                                 const Consensus::Params &consensusParams)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     if (::ChainActive().Contains(pindex)) {
         return true;
     }
     return pindex->IsValid(BlockValidity::SCRIPTS) &&
            (pindexBestHeader != nullptr) &&
            (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() <
             STALE_RELAY_AGE_LIMIT) &&
            (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex,
                                         *pindexBestHeader, consensusParams) <
             STALE_RELAY_AGE_LIMIT);
 }
 
 PeerLogicValidation::PeerLogicValidation(CConnman *connmanIn, BanMan *banman,
                                          CScheduler &scheduler,
                                          bool enable_bip61)
     : connman(connmanIn), m_banman(banman), m_stale_tip_check_time(0),
       m_enable_bip61(enable_bip61) {
     // Initialize global variables that cannot be constructed at startup.
     recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
 
     const Consensus::Params &consensusParams = Params().GetConsensus();
     // Stale tip checking and peer eviction are on two different timers, but we
     // don't want them to get out of sync due to drift in the scheduler, so we
     // combine them in one function and schedule at the quicker (peer-eviction)
     // timer.
     static_assert(
         EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL,
         "peer eviction timer should be less than stale tip check timer");
     scheduler.scheduleEvery(
         [this, &consensusParams]() {
             this->CheckForStaleTipAndEvictPeers(consensusParams);
             return true;
         },
         std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
 }
 
 /**
  * Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected
  * block. Also save the time of the last tip update.
  */
 void PeerLogicValidation::BlockConnected(
     const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex,
     const std::vector<CTransactionRef> &vtxConflicted) {
     LOCK(g_cs_orphans);
 
     std::vector<TxId> vOrphanErase;
 
     for (const CTransactionRef &ptx : pblock->vtx) {
         const CTransaction &tx = *ptx;
 
         // Which orphan pool entries must we evict?
         for (const auto &txin : tx.vin) {
             auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
             if (itByPrev == mapOrphanTransactionsByPrev.end()) {
                 continue;
             }
 
             for (auto mi = itByPrev->second.begin();
                  mi != itByPrev->second.end(); ++mi) {
                 const CTransaction &orphanTx = *(*mi)->second.tx;
                 const TxId &orphanId = orphanTx.GetId();
                 vOrphanErase.push_back(orphanId);
             }
         }
     }
 
     // Erase orphan transactions included or precluded by this block
     if (vOrphanErase.size()) {
         int nErased = 0;
         for (const auto &orphanId : vOrphanErase) {
             nErased += EraseOrphanTx(orphanId);
         }
         LogPrint(BCLog::MEMPOOL,
                  "Erased %d orphan tx included or conflicted by block\n",
                  nErased);
     }
 
     g_last_tip_update = GetTime();
 }
 
 // All of the following cache a recent block, and are protected by
 // cs_most_recent_block
 static RecursiveMutex cs_most_recent_block;
 static std::shared_ptr<const CBlock>
     most_recent_block GUARDED_BY(cs_most_recent_block);
 static std::shared_ptr<const CBlockHeaderAndShortTxIDs>
     most_recent_compact_block GUARDED_BY(cs_most_recent_block);
 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
 
 /**
  * Maintain state about the best-seen block and fast-announce a compact block
  * to compatible peers.
  */
 void PeerLogicValidation::NewPoWValidBlock(
     const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
     std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
         std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
     const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
 
     LOCK(cs_main);
 
     static int nHighestFastAnnounce = 0;
     if (pindex->nHeight <= nHighestFastAnnounce) {
         return;
     }
     nHighestFastAnnounce = pindex->nHeight;
 
     uint256 hashBlock(pblock->GetHash());
 
     {
         LOCK(cs_most_recent_block);
         most_recent_block_hash = hashBlock;
         most_recent_block = pblock;
         most_recent_compact_block = pcmpctblock;
     }
 
     connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker,
                           &hashBlock](CNode *pnode) {
         AssertLockHeld(cs_main);
 
         // TODO: Avoid the repeated-serialization here
         if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) {
             return;
         }
         ProcessBlockAvailability(pnode->GetId());
         CNodeState &state = *State(pnode->GetId());
         // If the peer has, or we announced to them the previous block already,
         // but we don't think they have this one, go ahead and announce it.
         if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) &&
             PeerHasHeader(&state, pindex->pprev)) {
             LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n",
                      "PeerLogicValidation::NewPoWValidBlock",
                      hashBlock.ToString(), pnode->GetId());
             connman->PushMessage(
                 pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
             state.pindexBestHeaderSent = pindex;
         }
     });
 }
 
 /**
  * Update our best height and announce any block hashes which weren't previously
  * in ::ChainActive() to our peers.
  */
 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew,
                                           const CBlockIndex *pindexFork,
                                           bool fInitialDownload) {
     const int nNewHeight = pindexNew->nHeight;
     connman->SetBestHeight(nNewHeight);
 
     SetServiceFlagsIBDCache(!fInitialDownload);
     if (!fInitialDownload) {
         // Find the hashes of all blocks that weren't previously in the best
         // chain.
         std::vector<BlockHash> vHashes;
         const CBlockIndex *pindexToAnnounce = pindexNew;
         while (pindexToAnnounce != pindexFork) {
             vHashes.push_back(pindexToAnnounce->GetBlockHash());
             pindexToAnnounce = pindexToAnnounce->pprev;
             if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
                 // Limit announcements in case of a huge reorganization. Rely on
                 // the peer's synchronization mechanism in that case.
                 break;
             }
         }
         // Relay inventory, but don't relay old inventory during initial block
         // download.
         connman->ForEachNode([nNewHeight, &vHashes](CNode *pnode) {
             if (nNewHeight > (pnode->nStartingHeight != -1
                                   ? pnode->nStartingHeight - 2000
                                   : 0)) {
                 for (const BlockHash &hash : reverse_iterate(vHashes)) {
                     pnode->PushBlockHash(hash);
                 }
             }
         });
         connman->WakeMessageHandler();
     }
 }
 
 /**
  * Handle invalid block rejection and consequent peer banning, maintain which
  * peers announce compact blocks.
  */
 void PeerLogicValidation::BlockChecked(const CBlock &block,
                                        const BlockValidationState &state) {
     LOCK(cs_main);
 
     const BlockHash hash = block.GetHash();
     std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
         mapBlockSource.find(hash);
 
     if (state.IsInvalid()) {
         // Don't send reject message with code 0 or an internal reject code.
         if (it != mapBlockSource.end() && State(it->second.first) &&
             state.GetRejectCode() > 0 &&
             state.GetRejectCode() < REJECT_INTERNAL) {
             CBlockReject reject = {
                 uint8_t(state.GetRejectCode()),
                 state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH),
                 hash};
             State(it->second.first)->rejects.push_back(reject);
             MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
                                     /*via_compact_block=*/!it->second.second);
         }
     }
     // Check that:
     // 1. The block is valid
     // 2. We're not in initial block download
     // 3. This is currently the best block we're aware of. We haven't updated
     //    the tip yet so we have no way to check this directly here. Instead we
     //    just check that there are currently no other blocks in flight.
     else if (state.IsValid() &&
              !::ChainstateActive().IsInitialBlockDownload() &&
              mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
         if (it != mapBlockSource.end()) {
             MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman);
         }
     }
 
     if (it != mapBlockSource.end()) {
         mapBlockSource.erase(it);
     }
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // Messages
 //
 
 static bool AlreadyHave(const CInv &inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     switch (inv.type) {
         case MSG_TX: {
             assert(recentRejects);
             if (::ChainActive().Tip()->GetBlockHash() !=
                 hashRecentRejectsChainTip) {
                 // If the chain tip has changed previously rejected transactions
                 // might be now valid, e.g. due to a nLockTime'd tx becoming
                 // valid, or a double-spend. Reset the rejects filter and give
                 // those txs a second chance.
                 hashRecentRejectsChainTip =
                     ::ChainActive().Tip()->GetBlockHash();
                 recentRejects->reset();
             }
 
             {
                 LOCK(g_cs_orphans);
                 if (mapOrphanTransactions.count(TxId{inv.hash})) {
                     return true;
                 }
             }
 
             // Use pcoinsTip->HaveCoinInCache as a quick approximation to
             // exclude requesting or processing some txs which have already been
             // included in a block. As this is best effort, we only check for
             // output 0 and 1. This works well enough in practice and we get
             // diminishing returns with 2 onward.
             const TxId txid(inv.hash);
             return recentRejects->contains(inv.hash) ||
                    g_mempool.exists(txid) ||
                    pcoinsTip->HaveCoinInCache(COutPoint(txid, 0)) ||
                    pcoinsTip->HaveCoinInCache(COutPoint(txid, 1));
         }
         case MSG_BLOCK:
             return LookupBlockIndex(BlockHash(inv.hash)) != nullptr;
     }
     // Don't know what it is, just say we already got one
     return true;
 }
 
 void RelayTransaction(const TxId &txid, const CConnman &connman) {
     CInv inv(MSG_TX, txid);
     connman.ForEachNode([&inv](CNode *pnode) { pnode->PushInventory(inv); });
 }
 
 static void RelayAddress(const CAddress &addr, bool fReachable,
                          CConnman *connman) {
     // Limited relaying of addresses outside our network(s)
     unsigned int nRelayNodes = fReachable ? 2 : 1;
 
     // Relay to a limited number of other nodes.
     // Use deterministic randomness to send to the same nodes for 24 hours at a
     // time so the addrKnowns of the chosen nodes prevent repeats.
     uint64_t hashAddr = addr.GetHash();
     const CSipHasher hasher =
         connman->GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
             .Write(hashAddr << 32)
             .Write((GetTime() + hashAddr) / (24 * 60 * 60));
     FastRandomContext insecure_rand;
 
     std::array<std::pair<uint64_t, CNode *>, 2> best{
         {{0, nullptr}, {0, nullptr}}};
     assert(nRelayNodes <= best.size());
 
     auto sortfunc = [&best, &hasher, nRelayNodes](CNode *pnode) {
         if (pnode->nVersion >= CADDR_TIME_VERSION && pnode->IsAddrRelayPeer()) {
             uint64_t hashKey =
                 CSipHasher(hasher).Write(pnode->GetId()).Finalize();
             for (unsigned int i = 0; i < nRelayNodes; i++) {
                 if (hashKey > best[i].first) {
                     std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
                               best.begin() + i + 1);
                     best[i] = std::make_pair(hashKey, pnode);
                     break;
                 }
             }
         }
     };
 
     auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
         for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
             best[i].second->PushAddress(addr, insecure_rand);
         }
     };
 
     connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
 }
 
 static void ProcessGetBlockData(const Config &config, CNode *pfrom,
                                 const CInv &inv, CConnman *connman,
                                 const std::atomic<bool> &interruptMsgProc) {
     const Consensus::Params &consensusParams =
         config.GetChainParams().GetConsensus();
 
     const BlockHash hash(inv.hash);
 
     bool send = false;
     std::shared_ptr<const CBlock> a_recent_block;
     std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
     {
         LOCK(cs_most_recent_block);
         a_recent_block = most_recent_block;
         a_recent_compact_block = most_recent_compact_block;
     }
 
     bool need_activate_chain = false;
     {
         LOCK(cs_main);
         const CBlockIndex *pindex = LookupBlockIndex(hash);
         if (pindex) {
             if (pindex->HaveTxsDownloaded() &&
                 !pindex->IsValid(BlockValidity::SCRIPTS) &&
                 pindex->IsValid(BlockValidity::TREE)) {
                 // If we have the block and all of its parents, but have not yet
                 // validated it, we might be in the middle of connecting it (ie
                 // in the unlock of cs_main before ActivateBestChain but after
                 // AcceptBlock). In this case, we need to run ActivateBestChain
                 // prior to checking the relay conditions below.
                 need_activate_chain = true;
             }
         }
     } // release cs_main before calling ActivateBestChain
     if (need_activate_chain) {
         BlockValidationState state;
         if (!ActivateBestChain(config, state, a_recent_block)) {
             LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                      FormatStateMessage(state));
         }
     }
 
     LOCK(cs_main);
     const CBlockIndex *pindex = LookupBlockIndex(hash);
     if (pindex) {
         send = BlockRequestAllowed(pindex, consensusParams);
         if (!send) {
             LogPrint(BCLog::NET,
                      "%s: ignoring request from peer=%i for old "
                      "block that isn't in the main chain\n",
                      __func__, pfrom->GetId());
         }
     }
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
     // Disconnect node in case we have reached the outbound limit for serving
     // historical blocks.
     // Never disconnect whitelisted nodes.
     if (send && connman->OutboundTargetReached(true) &&
         (((pindexBestHeader != nullptr) &&
           (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() >
            HISTORICAL_BLOCK_AGE)) ||
          inv.type == MSG_FILTERED_BLOCK) &&
         !pfrom->HasPermission(PF_NOBAN)) {
         LogPrint(BCLog::NET,
                  "historical block serving limit reached, disconnect peer=%d\n",
                  pfrom->GetId());
 
         // disconnect node
         pfrom->fDisconnect = true;
         send = false;
     }
     // Avoid leaking prune-height by never sending blocks below the
     // NODE_NETWORK_LIMITED threshold.
     // Add two blocks buffer extension for possible races
     if (send && !pfrom->HasPermission(PF_NOBAN) &&
         ((((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) ==
            NODE_NETWORK_LIMITED) &&
           ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) &&
           (::ChainActive().Tip()->nHeight - pindex->nHeight >
            (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
         LogPrint(BCLog::NET,
                  "Ignore block request below NODE_NETWORK_LIMITED "
                  "threshold from peer=%d\n",
                  pfrom->GetId());
 
         // disconnect node and prevent it from stalling (would otherwise wait
         // for the missing block)
         pfrom->fDisconnect = true;
         send = false;
     }
     // Pruned nodes may have deleted the block, so check whether it's available
     // before trying to send.
     if (send && pindex->nStatus.hasData()) {
         std::shared_ptr<const CBlock> pblock;
         if (a_recent_block &&
             a_recent_block->GetHash() == pindex->GetBlockHash()) {
             pblock = a_recent_block;
         } else {
             // Send block from disk
             std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
             if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams)) {
                 assert(!"cannot load block from disk");
             }
             pblock = pblockRead;
         }
         if (inv.type == MSG_BLOCK) {
             connman->PushMessage(pfrom,
                                  msgMaker.Make(NetMsgType::BLOCK, *pblock));
         } else if (inv.type == MSG_FILTERED_BLOCK) {
             bool sendMerkleBlock = false;
             CMerkleBlock merkleBlock;
             if (pfrom->m_tx_relay != nullptr) {
                 LOCK(pfrom->m_tx_relay->cs_filter);
                 if (pfrom->m_tx_relay->pfilter) {
                     sendMerkleBlock = true;
                     merkleBlock =
                         CMerkleBlock(*pblock, *pfrom->m_tx_relay->pfilter);
                 }
             }
             if (sendMerkleBlock) {
                 connman->PushMessage(
                     pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
                 // CMerkleBlock just contains hashes, so also push any
                 // transactions in the block the client did not see. This avoids
                 // hurting performance by pointlessly requiring a round-trip.
                 // Note that there is currently no way for a node to request any
                 // single transactions we didn't send here - they must either
                 // disconnect and retry or request the full block. Thus, the
                 // protocol spec specified allows for us to provide duplicate
                 // txn here, however we MUST always provide at least what the
                 // remote peer needs.
                 typedef std::pair<size_t, uint256> PairType;
                 for (PairType &pair : merkleBlock.vMatchedTxn) {
                     connman->PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::TX,
                                              *pblock->vtx[pair.first]));
                 }
             }
             // else
             // no response
         } else if (inv.type == MSG_CMPCT_BLOCK) {
             // If a peer is asking for old blocks, we're almost guaranteed they
             // won't have a useful mempool to match against a compact block, and
             // we don't feel like constructing the object for them, so instead
             // we respond with the full, non-compact block.
             int nSendFlags = 0;
             if (CanDirectFetch(consensusParams) &&
                 pindex->nHeight >=
                     ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) {
                 CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
                 connman->PushMessage(
                     pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
                                          cmpctblock));
             } else {
                 connman->PushMessage(
                     pfrom,
                     msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
             }
         }
 
         // Trigger the peer node to send a getblocks request for the next batch
         // of inventory.
         if (hash == pfrom->hashContinue) {
             // Bypass PushInventory, this must send even if redundant, and we
             // want it right after the last block so they don't wait for other
             // stuff first.
             std::vector<CInv> vInv;
             vInv.push_back(
                 CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash()));
             connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
             pfrom->hashContinue = BlockHash();
         }
     }
 }
 
 static void ProcessGetData(const Config &config, CNode *pfrom,
                            CConnman *connman,
                            const std::atomic<bool> &interruptMsgProc)
     LOCKS_EXCLUDED(cs_main) {
     AssertLockNotHeld(cs_main);
 
     std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
     std::vector<CInv> vNotFound;
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
 
     // Note that if we receive a getdata for a MSG_TX or MSG_WITNESS_TX from a
     // block-relay-only outbound peer, we will stop processing further getdata
     // messages from this peer (likely resulting in our peer eventually
     // disconnecting us).
     if (pfrom->m_tx_relay != nullptr) {
         // mempool entries added before this time have likely expired from
         // mapRelay
         const std::chrono::seconds longlived_mempool_time =
             GetTime<std::chrono::seconds>() - RELAY_TX_CACHE_TIME;
         const std::chrono::seconds mempool_req =
             pfrom->m_tx_relay->m_last_mempool_req.load();
 
         LOCK(cs_main);
 
         while (it != pfrom->vRecvGetData.end() && it->type == MSG_TX) {
             if (interruptMsgProc) {
                 return;
             }
             // Don't bother if send buffer is too full to respond anyway.
             if (pfrom->fPauseSend) {
                 break;
             }
 
             const CInv &inv = *it;
             it++;
 
             // Send stream from relay memory
             bool push = false;
             auto mi = mapRelay.find(inv.hash);
             int nSendFlags = 0;
             if (mi != mapRelay.end()) {
                 connman->PushMessage(
                     pfrom,
                     msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
                 push = true;
             } else {
                 auto txinfo = g_mempool.info(TxId(inv.hash));
                 // To protect privacy, do not answer getdata using the mempool
                 // when that TX couldn't have been INVed in reply to a MEMPOOL
                 // request, or when it's too recent to have expired from
                 // mapRelay.
                 if (txinfo.tx &&
                     ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
                      (txinfo.m_time <= longlived_mempool_time))) {
                     connman->PushMessage(
                         pfrom,
                         msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
                     push = true;
                 }
             }
             if (!push) {
                 vNotFound.push_back(inv);
             }
         }
     } // release cs_main
 
     if (it != pfrom->vRecvGetData.end() && !pfrom->fPauseSend) {
         const CInv &inv = *it;
         if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK ||
             inv.type == MSG_CMPCT_BLOCK) {
             it++;
             ProcessGetBlockData(config, pfrom, inv, connman, interruptMsgProc);
         }
     }
 
     // Unknown types in the GetData stay in vRecvGetData and block any future
     // message from this peer, see vRecvGetData check in ProcessMessages().
     // Depending on future p2p changes, we might either drop unknown getdata on
     // the floor or disconnect the peer.
 
     pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
 
     if (!vNotFound.empty()) {
         // Let the peer know that we didn't find what it asked for, so it
         // doesn't have to wait around forever. SPV clients care about this
         // message: it's needed when they are recursively walking the
         // dependencies of relevant unconfirmed transactions. SPV clients want
         // to do that because they want to know about (and store and rebroadcast
         // and risk analyze) the dependencies of transactions relevant to them,
         // without having to download the entire memory pool. Also, other nodes
         // can use these messages to automatically request a transaction from
         // some other peer that annnounced it, and stop waiting for us to
         // respond. In normal operation, we often send NOTFOUND messages for
         // parents of transactions that we relay; if a peer is missing a parent,
         // they may assume we have them and request the parents from us.
         connman->PushMessage(pfrom,
                              msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
     }
 }
 
 inline static void SendBlockTransactions(const CBlock &block,
                                          const BlockTransactionsRequest &req,
                                          CNode *pfrom, CConnman *connman) {
     BlockTransactions resp(req);
     for (size_t i = 0; i < req.indices.size(); i++) {
         if (req.indices[i] >= block.vtx.size()) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "out-of-bound-tx-index");
             LogPrintf(
                 "Peer %d sent us a getblocktxn with out-of-bounds tx indices\n",
                 pfrom->GetId());
             return;
         }
         resp.txn[i] = block.vtx[req.indices[i]];
     }
     LOCK(cs_main);
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
     int nSendFlags = 0;
     connman->PushMessage(pfrom,
                          msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
 }
 
 static bool ProcessHeadersMessage(const Config &config, CNode *pfrom,
                                   CConnman *connman,
                                   const std::vector<CBlockHeader> &headers,
                                   bool via_compact_block) {
     const CChainParams &chainparams = config.GetChainParams();
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
     size_t nCount = headers.size();
 
     if (nCount == 0) {
         // Nothing interesting. Stop asking this peers for more headers.
         return true;
     }
 
     bool received_new_header = false;
     const CBlockIndex *pindexLast = nullptr;
     {
         LOCK(cs_main);
         CNodeState *nodestate = State(pfrom->GetId());
 
         // If this looks like it could be a block announcement (nCount <
         // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
         // don't connect:
         // - Send a getheaders message in response to try to connect the chain.
         // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
         // don't connect before giving DoS points
         // - Once a headers message is received that is valid and does connect,
         // nUnconnectingHeaders gets reset back to 0.
         if (!LookupBlockIndex(headers[0].hashPrevBlock) &&
             nCount < MAX_BLOCKS_TO_ANNOUNCE) {
             nodestate->nUnconnectingHeaders++;
             connman->PushMessage(
                 pfrom,
                 msgMaker.Make(NetMsgType::GETHEADERS,
                               ::ChainActive().GetLocator(pindexBestHeader),
                               uint256()));
             LogPrint(
                 BCLog::NET,
                 "received header %s: missing prev block %s, sending getheaders "
                 "(%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
                 headers[0].GetHash().ToString(),
                 headers[0].hashPrevBlock.ToString(), pindexBestHeader->nHeight,
                 pfrom->GetId(), nodestate->nUnconnectingHeaders);
             // Set hashLastUnknownBlock for this peer, so that if we eventually
             // get the headers - even from a different peer - we can use this
             // peer to download.
             UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
 
             if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS ==
                 0) {
                 // The peer is sending us many headers we can't connect.
                 Misbehaving(pfrom, 20, "too-many-unconnected-headers");
             }
             return true;
         }
 
         BlockHash hashLastBlock;
         for (const CBlockHeader &header : headers) {
             if (!hashLastBlock.IsNull() &&
                 header.hashPrevBlock != hashLastBlock) {
                 Misbehaving(pfrom, 20, "disconnected-header");
                 return error("non-continuous headers sequence");
             }
             hashLastBlock = header.GetHash();
         }
 
         // If we don't have the last header, then they'll have given us
         // something new (if these headers are valid).
         if (!LookupBlockIndex(hashLastBlock)) {
             received_new_header = true;
         }
     }
 
     BlockValidationState state;
     if (!ProcessNewBlockHeaders(config, headers, state, &pindexLast)) {
         if (state.IsInvalid()) {
             MaybePunishNodeForBlock(pfrom->GetId(), state, via_compact_block,
                                     "invalid header received");
             return false;
         }
     }
 
     {
         LOCK(cs_main);
         CNodeState *nodestate = State(pfrom->GetId());
         if (nodestate->nUnconnectingHeaders > 0) {
             LogPrint(BCLog::NET,
                      "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n",
                      pfrom->GetId(), nodestate->nUnconnectingHeaders);
         }
         nodestate->nUnconnectingHeaders = 0;
 
         assert(pindexLast);
         UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
 
         // From here, pindexBestKnownBlock should be guaranteed to be non-null,
         // because it is set in UpdateBlockAvailability. Some nullptr checks are
         // still present, however, as belt-and-suspenders.
 
         if (received_new_header &&
             pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) {
             nodestate->m_last_block_announcement = GetTime();
         }
 
         if (nCount == MAX_HEADERS_RESULTS) {
             // Headers message had its maximum size; the peer may have more
             // headers.
             // TODO: optimize: if pindexLast is an ancestor of
             // ::ChainActive().Tip or pindexBestHeader, continue from there
             // instead.
             LogPrint(
                 BCLog::NET,
                 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
                 pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
             connman->PushMessage(
                 pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
                                      ::ChainActive().GetLocator(pindexLast),
                                      uint256()));
         }
 
         bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
         // If this set of headers is valid and ends in a block with at least as
         // much work as our tip, download as much as possible.
         if (fCanDirectFetch && pindexLast->IsValid(BlockValidity::TREE) &&
             ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) {
             std::vector<const CBlockIndex *> vToFetch;
             const CBlockIndex *pindexWalk = pindexLast;
             // Calculate all the blocks we'd need to switch to pindexLast, up to
             // a limit.
             while (pindexWalk && !::ChainActive().Contains(pindexWalk) &&
                    vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
                 if (!pindexWalk->nStatus.hasData() &&
                     !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
                     // We don't have this block, and it's not yet in flight.
                     vToFetch.push_back(pindexWalk);
                 }
                 pindexWalk = pindexWalk->pprev;
             }
             // If pindexWalk still isn't on our main chain, we're looking at a
             // very large reorg at a time we think we're close to caught up to
             // the main chain -- this shouldn't really happen. Bail out on the
             // direct fetch and rely on parallel download instead.
             if (!::ChainActive().Contains(pindexWalk)) {
                 LogPrint(
                     BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
                     pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
             } else {
                 std::vector<CInv> vGetData;
                 // Download as much as possible, from earliest to latest.
                 for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
                     if (nodestate->nBlocksInFlight >=
                         MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
                         // Can't download any more from this peer
                         break;
                     }
                     vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
                     MarkBlockAsInFlight(config, pfrom->GetId(),
                                         pindex->GetBlockHash(),
                                         chainparams.GetConsensus(), pindex);
                     LogPrint(BCLog::NET, "Requesting block %s from  peer=%d\n",
                              pindex->GetBlockHash().ToString(), pfrom->GetId());
                 }
                 if (vGetData.size() > 1) {
                     LogPrint(BCLog::NET,
                              "Downloading blocks toward %s (%d) via headers "
                              "direct fetch\n",
                              pindexLast->GetBlockHash().ToString(),
                              pindexLast->nHeight);
                 }
                 if (vGetData.size() > 0) {
                     if (nodestate->fSupportsDesiredCmpctVersion &&
                         vGetData.size() == 1 && mapBlocksInFlight.size() == 1 &&
                         pindexLast->pprev->IsValid(BlockValidity::CHAIN)) {
                         // In any case, we want to download using a compact
                         // block, not a regular one.
                         vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
                     }
                     connman->PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
                 }
             }
         }
         // If we're in IBD, we want outbound peers that will serve us a useful
         // chain. Disconnect peers that are on chains with insufficient work.
         if (::ChainstateActive().IsInitialBlockDownload() &&
             nCount != MAX_HEADERS_RESULTS) {
             // When nCount < MAX_HEADERS_RESULTS, we know we have no more
             // headers to fetch from this peer.
             if (nodestate->pindexBestKnownBlock &&
                 nodestate->pindexBestKnownBlock->nChainWork <
                     nMinimumChainWork) {
                 // This peer has too little work on their headers chain to help
                 // us sync -- disconnect if using an outbound slot (unless
                 // whitelisted or addnode).
                 // Note: We compare their tip to nMinimumChainWork (rather than
                 // ::ChainActive().Tip()) because we won't start block download
                 // until we have a headers chain that has at least
                 // nMinimumChainWork, even if a peer has a chain past our tip,
                 // as an anti-DoS measure.
                 if (IsOutboundDisconnectionCandidate(pfrom)) {
                     LogPrintf("Disconnecting outbound peer %d -- headers "
                               "chain has insufficient work\n",
                               pfrom->GetId());
                     pfrom->fDisconnect = true;
                 }
             }
         }
 
         if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) &&
             nodestate->pindexBestKnownBlock != nullptr &&
             pfrom->m_tx_relay != nullptr) {
             // If this is an outbound full-relay peer, check to see if we should
             // protect it from the bad/lagging chain logic. Note that
             // block-relay-only peers are already implicitly protected, so we
             // only consider setting m_protect for the full-relay peers.
             if (g_outbound_peers_with_protect_from_disconnect <
                     MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT &&
                 nodestate->pindexBestKnownBlock->nChainWork >=
                     ::ChainActive().Tip()->nChainWork &&
                 !nodestate->m_chain_sync.m_protect) {
                 LogPrint(BCLog::NET,
                          "Protecting outbound peer=%d from eviction\n",
                          pfrom->GetId());
                 nodestate->m_chain_sync.m_protect = true;
                 ++g_outbound_peers_with_protect_from_disconnect;
             }
         }
     }
 
     return true;
 }
 
 void static ProcessOrphanTx(const Config &config, CConnman *connman,
                             std::set<TxId> &orphan_work_set)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) {
     AssertLockHeld(cs_main);
     AssertLockHeld(g_cs_orphans);
     std::unordered_map<NodeId, uint32_t> rejectCountPerNode;
     bool done = false;
     while (!done && !orphan_work_set.empty()) {
         const TxId orphanTxId = *orphan_work_set.begin();
         orphan_work_set.erase(orphan_work_set.begin());
 
         auto orphan_it = mapOrphanTransactions.find(orphanTxId);
         if (orphan_it == mapOrphanTransactions.end()) {
             continue;
         }
 
         const CTransactionRef porphanTx = orphan_it->second.tx;
         const CTransaction &orphanTx = *porphanTx;
         NodeId fromPeer = orphan_it->second.fromPeer;
         // Use a new TxValidationState because orphans come from different peers
         // (and we call MaybePunishNodeForTx based on the source peer from the
         // orphan map, not based on the peer that relayed the previous
         // transaction).
         TxValidationState orphan_state;
 
         auto it = rejectCountPerNode.find(fromPeer);
         if (it != rejectCountPerNode.end() &&
             it->second > MAX_NON_STANDARD_ORPHAN_PER_NODE) {
             continue;
         }
 
         if (AcceptToMemoryPool(config, g_mempool, orphan_state, porphanTx,
                                false /* bypass_limits */,
                                Amount::zero() /* nAbsurdFee */)) {
             LogPrint(BCLog::MEMPOOL, "   accepted orphan tx %s\n",
                      orphanTxId.ToString());
             RelayTransaction(orphanTxId, *connman);
             for (size_t i = 0; i < orphanTx.vout.size(); i++) {
                 auto it_by_prev =
                     mapOrphanTransactionsByPrev.find(COutPoint(orphanTxId, i));
                 if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
                     for (const auto &elem : it_by_prev->second) {
                         orphan_work_set.insert(elem->first);
                     }
                 }
             }
             EraseOrphanTx(orphanTxId);
             done = true;
         } else if (orphan_state.GetResult() !=
                    TxValidationResult::TX_MISSING_INPUTS) {
             if (orphan_state.IsInvalid()) {
                 // Punish peer that gave us an invalid orphan tx
                 MaybePunishNodeForTx(fromPeer, orphan_state);
                 LogPrint(BCLog::MEMPOOL, "   invalid orphan tx %s\n",
                          orphanTxId.ToString());
             }
             // Has inputs but not accepted to mempool
             // Probably non-standard or insufficient fee
             LogPrint(BCLog::MEMPOOL, "   removed orphan tx %s\n",
                      orphanTxId.ToString());
 
             assert(recentRejects);
             recentRejects->insert(orphanTxId);
 
             EraseOrphanTx(orphanTxId);
             done = true;
         }
         g_mempool.check(pcoinsTip.get());
     }
 }
 
 static bool ProcessMessage(const Config &config, CNode *pfrom,
                            const std::string &strCommand, CDataStream &vRecv,
                            int64_t nTimeReceived, CConnman *connman,
                            BanMan *banman,
                            const std::atomic<bool> &interruptMsgProc,
                            bool enable_bip61) {
     const CChainParams &chainparams = config.GetChainParams();
     LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n",
              SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
     if (gArgs.IsArgSet("-dropmessagestest") &&
         GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0) {
         LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
         return true;
     }
 
     if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
         (strCommand == NetMsgType::FILTERLOAD ||
          strCommand == NetMsgType::FILTERADD)) {
         if (pfrom->nVersion >= NO_BLOOM_VERSION) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "no-bloom-version");
             return false;
         } else {
             pfrom->fDisconnect = true;
             return false;
         }
     }
 
     if (strCommand == NetMsgType::REJECT) {
         if (LogAcceptCategory(BCLog::NET)) {
             try {
                 std::string strMsg;
                 uint8_t ccode;
                 std::string strReason;
                 vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >>
                     ccode >>
                     LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
 
                 std::ostringstream ss;
                 ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
 
                 if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX) {
                     uint256 hash;
                     vRecv >> hash;
                     ss << ": hash " << hash.ToString();
                 }
                 LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str()));
             } catch (const std::ios_base::failure &) {
                 // Avoid feedback loops by preventing reject messages from
                 // triggering a new reject message.
                 LogPrint(BCLog::NET, "Unparseable reject message received\n");
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::VERSION) {
         // Each connection can only send one version message
         if (pfrom->nVersion != 0) {
             if (enable_bip61) {
                 connman->PushMessage(
                     pfrom,
                     CNetMsgMaker(INIT_PROTO_VERSION)
                         .Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE,
                               std::string("Duplicate version message")));
             }
             LOCK(cs_main);
             Misbehaving(pfrom, 1, "multiple-version");
             return false;
         }
 
         int64_t nTime;
         CAddress addrMe;
         CAddress addrFrom;
         uint64_t nNonce = 1;
         uint64_t nServiceInt;
         ServiceFlags nServices;
         int nVersion;
         int nSendVersion;
         std::string cleanSubVer;
         int nStartingHeight = -1;
         bool fRelay = true;
 
         vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
         nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
         nServices = ServiceFlags(nServiceInt);
         if (!pfrom->fInbound) {
             connman->SetServices(pfrom->addr, nServices);
         }
         if (!pfrom->fInbound && !pfrom->fFeeler &&
             !pfrom->m_manual_connection &&
             !HasAllDesirableServiceFlags(nServices)) {
             LogPrint(BCLog::NET,
                      "peer=%d does not offer the expected services "
                      "(%08x offered, %08x expected); disconnecting\n",
                      pfrom->GetId(), nServices,
                      GetDesirableServiceFlags(nServices));
             if (enable_bip61) {
                 connman->PushMessage(
                     pfrom,
                     CNetMsgMaker(INIT_PROTO_VERSION)
                         .Make(NetMsgType::REJECT, strCommand,
                               REJECT_NONSTANDARD,
                               strprintf("Expected to offer services %08x",
                                         GetDesirableServiceFlags(nServices))));
             }
             pfrom->fDisconnect = true;
             return false;
         }
 
         if (nVersion < MIN_PEER_PROTO_VERSION) {
             // disconnect from peers older than this proto version
             LogPrint(BCLog::NET,
                      "peer=%d using obsolete version %i; disconnecting\n",
                      pfrom->GetId(), nVersion);
             if (enable_bip61) {
                 connman->PushMessage(
                     pfrom,
                     CNetMsgMaker(INIT_PROTO_VERSION)
                         .Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
                               strprintf("Version must be %d or greater",
                                         MIN_PEER_PROTO_VERSION)));
             }
             pfrom->fDisconnect = true;
             return false;
         }
 
         if (!vRecv.empty()) {
             vRecv >> addrFrom >> nNonce;
         }
         if (!vRecv.empty()) {
             std::string strSubVer;
             vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
             cleanSubVer = SanitizeString(strSubVer);
         }
         if (!vRecv.empty()) {
             vRecv >> nStartingHeight;
         }
         if (!vRecv.empty()) {
             vRecv >> fRelay;
         }
         // Disconnect if we connected to ourself
         if (pfrom->fInbound && !connman->CheckIncomingNonce(nNonce)) {
             LogPrintf("connected to self at %s, disconnecting\n",
                       pfrom->addr.ToString());
             pfrom->fDisconnect = true;
             return true;
         }
 
         if (pfrom->fInbound && addrMe.IsRoutable()) {
             SeenLocal(addrMe);
         }
 
         // Be shy and don't send version until we hear
         if (pfrom->fInbound) {
             PushNodeVersion(config, pfrom, connman, GetAdjustedTime());
         }
 
         connman->PushMessage(
             pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
 
         pfrom->nServices = nServices;
         pfrom->SetAddrLocal(addrMe);
         {
             LOCK(pfrom->cs_SubVer);
             pfrom->cleanSubVer = cleanSubVer;
         }
         pfrom->nStartingHeight = nStartingHeight;
 
         // set nodes not relaying blocks and tx and not serving (parts) of the
         // historical blockchain as "clients"
         pfrom->fClient = (!(nServices & NODE_NETWORK) &&
                           !(nServices & NODE_NETWORK_LIMITED));
 
         // set nodes not capable of serving the complete blockchain history as
         // "limited nodes"
         pfrom->m_limited_node =
             (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
 
         if (pfrom->m_tx_relay != nullptr) {
             LOCK(pfrom->m_tx_relay->cs_filter);
             // set to true after we get the first filter* message
             pfrom->m_tx_relay->fRelayTxes = fRelay;
         }
 
         // Change version
         pfrom->SetSendVersion(nSendVersion);
         pfrom->nVersion = nVersion;
 
         // Potentially mark this peer as a preferred download peer.
         {
             LOCK(cs_main);
             UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
         }
 
         if (!pfrom->fInbound && pfrom->IsAddrRelayPeer()) {
             // Advertise our address
             if (fListen && !::ChainstateActive().IsInitialBlockDownload()) {
                 CAddress addr =
                     GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
                 FastRandomContext insecure_rand;
                 if (addr.IsRoutable()) {
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     pfrom->PushAddress(addr, insecure_rand);
                 } else if (IsPeerAddrLocalGood(pfrom)) {
                     addr.SetIP(addrMe);
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     pfrom->PushAddress(addr, insecure_rand);
                 }
             }
 
             // Get recent addresses
             if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION ||
                 connman->GetAddressCount() < 1000) {
                 connman->PushMessage(
                     pfrom,
                     CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
                 pfrom->fGetAddr = true;
             }
             connman->MarkAddressGood(pfrom->addr);
         }
 
         std::string remoteAddr;
         if (fLogIPs) {
             remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
         }
 
         LogPrint(BCLog::NET,
                  "receive version message: [%s] %s: version %d, blocks=%d, "
                  "us=%s, peer=%d%s\n",
                  pfrom->addr.ToString(), cleanSubVer, pfrom->nVersion,
                  pfrom->nStartingHeight, addrMe.ToString(), pfrom->GetId(),
                  remoteAddr);
 
         // Ignore time offsets that are improbable (before the Genesis block)
         // and may underflow the nTimeOffset calculation.
         int64_t currentTime = GetTime();
         if (nTime >= int64_t(chainparams.GenesisBlock().nTime)) {
             int64_t nTimeOffset = nTime - currentTime;
             pfrom->nTimeOffset = nTimeOffset;
             AddTimeData(pfrom->addr, nTimeOffset);
         } else {
             LOCK(cs_main);
             Misbehaving(pfrom, 20,
                         "Ignoring invalid timestamp in version message");
         }
 
         // Feeler connections exist only to verify if address is online.
         if (pfrom->fFeeler) {
             assert(pfrom->fInbound == false);
             pfrom->fDisconnect = true;
         }
         return true;
     }
 
     if (pfrom->nVersion == 0) {
         // Must have a version message before anything else
         LOCK(cs_main);
         Misbehaving(pfrom, 10, "missing-version");
         return false;
     }
 
     // At this point, the outgoing message serialization version can't change.
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
 
     if (strCommand == NetMsgType::VERACK) {
         pfrom->SetRecvVersion(
             std::min(pfrom->nVersion.load(), PROTOCOL_VERSION));
 
         if (!pfrom->fInbound) {
             // Mark this node as currently connected, so we update its timestamp
             // later.
             LOCK(cs_main);
             State(pfrom->GetId())->fCurrentlyConnected = true;
             LogPrintf(
                 "New outbound peer connected: version: %d, blocks=%d, "
                 "peer=%d%s (%s)\n",
                 pfrom->nVersion.load(), pfrom->nStartingHeight, pfrom->GetId(),
                 (fLogIPs ? strprintf(", peeraddr=%s", pfrom->addr.ToString())
                          : ""),
                 pfrom->m_tx_relay == nullptr ? "block-relay" : "full-relay");
         }
 
         if (pfrom->nVersion >= SENDHEADERS_VERSION) {
             // Tell our peer we prefer to receive headers rather than inv's
             // We send this to non-NODE NETWORK peers as well, because even
             // non-NODE NETWORK peers can announce blocks (such as pruning
             // nodes)
             connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
         }
         if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
             // Tell our peer we are willing to provide version 1 or 2
             // cmpctblocks. However, we do not request new block announcements
             // using cmpctblock messages. We send this to non-NODE NETWORK peers
             // as well, because they may wish to request compact blocks from us.
             bool fAnnounceUsingCMPCTBLOCK = false;
             uint64_t nCMPCTBLOCKVersion = 1;
             connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT,
                                                       fAnnounceUsingCMPCTBLOCK,
                                                       nCMPCTBLOCKVersion));
         }
         pfrom->fSuccessfullyConnected = true;
         return true;
     }
 
     if (!pfrom->fSuccessfullyConnected) {
         // Must have a verack message before anything else
         LOCK(cs_main);
         Misbehaving(pfrom, 10, "missing-verack");
         return false;
     }
 
     if (strCommand == NetMsgType::ADDR) {
         std::vector<CAddress> vAddr;
         vRecv >> vAddr;
 
         // Don't want addr from older versions unless seeding
         if (pfrom->nVersion < CADDR_TIME_VERSION &&
             connman->GetAddressCount() > 1000) {
             return true;
         }
         if (!pfrom->IsAddrRelayPeer()) {
             return true;
         }
         if (vAddr.size() > 1000) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "oversized-addr");
             return error("message addr size() = %u", vAddr.size());
         }
 
         // Store the new addresses
         std::vector<CAddress> vAddrOk;
         int64_t nNow = GetAdjustedTime();
         int64_t nSince = nNow - 10 * 60;
         for (CAddress &addr : vAddr) {
             if (interruptMsgProc) {
                 return true;
             }
 
             // We only bother storing full nodes, though this may include things
             // which we would not make an outbound connection to, in part
             // because we may make feeler connections to them.
             if (!MayHaveUsefulAddressDB(addr.nServices) &&
                 !HasAllDesirableServiceFlags(addr.nServices)) {
                 continue;
             }
 
             if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) {
                 addr.nTime = nNow - 5 * 24 * 60 * 60;
             }
             pfrom->AddAddressKnown(addr);
             // Do not process banned/discouraged addresses beyond remembering we
             // received them
             if (banman->IsDiscouraged(addr)) {
                 continue;
             }
             if (banman->IsBanned(addr)) {
                 continue;
             }
             bool fReachable = IsReachable(addr);
             if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 &&
                 addr.IsRoutable()) {
                 // Relay to a limited number of other nodes
                 RelayAddress(addr, fReachable, connman);
             }
             // Do not store addresses outside our network
             if (fReachable) {
                 vAddrOk.push_back(addr);
             }
         }
 
         connman->AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
         if (vAddr.size() < 1000) {
             pfrom->fGetAddr = false;
         }
         if (pfrom->fOneShot) {
             pfrom->fDisconnect = true;
         }
         return true;
     }
 
     if (strCommand == NetMsgType::SENDHEADERS) {
         LOCK(cs_main);
         State(pfrom->GetId())->fPreferHeaders = true;
         return true;
     }
 
     if (strCommand == NetMsgType::SENDCMPCT) {
         bool fAnnounceUsingCMPCTBLOCK = false;
         uint64_t nCMPCTBLOCKVersion = 0;
         vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
         if (nCMPCTBLOCKVersion == 1) {
             LOCK(cs_main);
             // fProvidesHeaderAndIDs is used to "lock in" version of compact
             // blocks we send.
             if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
                 State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
             }
 
             State(pfrom->GetId())->fPreferHeaderAndIDs =
                 fAnnounceUsingCMPCTBLOCK;
             if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
                 State(pfrom->GetId())->fSupportsDesiredCmpctVersion = true;
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::INV) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "oversized-inv");
             return error("message inv size() = %u", vInv.size());
         }
 
         // We won't accept tx inv's if we're in blocks-only mode, or this is a
         // block-relay-only peer
         bool fBlocksOnly = !g_relay_txes || (pfrom->m_tx_relay == nullptr);
 
         // Allow whitelisted peers to send data other than blocks in blocks only
         // mode if whitelistrelay is true
         if (pfrom->HasPermission(PF_RELAY)) {
             fBlocksOnly = false;
         }
 
         LOCK(cs_main);
 
         const auto current_time = GetTime<std::chrono::microseconds>();
 
         for (CInv &inv : vInv) {
             if (interruptMsgProc) {
                 return true;
             }
 
             bool fAlreadyHave = AlreadyHave(inv);
             LogPrint(BCLog::NET, "got inv: %s  %s peer=%d\n", inv.ToString(),
                      fAlreadyHave ? "have" : "new", pfrom->GetId());
 
             if (inv.type == MSG_BLOCK) {
                 const BlockHash hash(inv.hash);
                 UpdateBlockAvailability(pfrom->GetId(), hash);
                 if (!fAlreadyHave && !fImporting && !fReindex &&
                     !mapBlocksInFlight.count(hash)) {
                     // We used to request the full block here, but since
                     // headers-announcements are now the primary method of
                     // announcement on the network, and since, in the case that
                     // a node fell back to inv we probably have a reorg which we
                     // should get the headers for first, we now only provide a
                     // getheaders response here. When we receive the headers, we
                     // will then ask for the blocks we need.
                     connman->PushMessage(
                         pfrom, msgMaker.Make(
                                    NetMsgType::GETHEADERS,
                                    ::ChainActive().GetLocator(pindexBestHeader),
                                    hash));
                     LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
                              pindexBestHeader->nHeight, hash.ToString(),
                              pfrom->GetId());
                 }
             } else {
                 pfrom->AddInventoryKnown(inv);
                 if (fBlocksOnly) {
                     LogPrint(BCLog::NET,
                              "transaction (%s) inv sent in violation of "
                              "protocol, disconnecting peer=%d\n",
                              inv.hash.ToString(), pfrom->GetId());
                     pfrom->fDisconnect = true;
                     return true;
                 } else if (!fAlreadyHave && !fImporting && !fReindex &&
                            !::ChainstateActive().IsInitialBlockDownload()) {
                     RequestTx(State(pfrom->GetId()), TxId(inv.hash),
                               current_time);
                 }
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::GETDATA) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "too-many-inv");
             return error("message getdata size() = %u", vInv.size());
         }
 
         LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
                  vInv.size(), pfrom->GetId());
 
         if (vInv.size() > 0) {
             LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
                      vInv[0].ToString(), pfrom->GetId());
         }
 
         pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(),
                                    vInv.end());
         ProcessGetData(config, pfrom, connman, interruptMsgProc);
         return true;
     }
 
     if (strCommand == NetMsgType::GETBLOCKS) {
         CBlockLocator locator;
         uint256 hashStop;
         vRecv >> locator >> hashStop;
 
         if (locator.vHave.size() > MAX_LOCATOR_SZ) {
             LogPrint(BCLog::NET,
                      "getblocks locator size %lld > %d, disconnect peer=%d\n",
                      locator.vHave.size(), MAX_LOCATOR_SZ, pfrom->GetId());
             pfrom->fDisconnect = true;
             return true;
         }
 
         // We might have announced the currently-being-connected tip using a
         // compact block, which resulted in the peer sending a getblocks
         // request, which we would otherwise respond to without the new block.
         // To avoid this situation we simply verify that we are on our best
         // known chain now. This is super overkill, but we handle it better
         // for getheaders requests, and there are no known nodes which support
         // compact blocks but still use getblocks to request blocks.
         {
             std::shared_ptr<const CBlock> a_recent_block;
             {
                 LOCK(cs_most_recent_block);
                 a_recent_block = most_recent_block;
             }
             BlockValidationState state;
             if (!ActivateBestChain(config, state, a_recent_block)) {
                 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                          FormatStateMessage(state));
             }
         }
 
         LOCK(cs_main);
 
         // Find the last block the caller has in the main chain
         const CBlockIndex *pindex =
             FindForkInGlobalIndex(::ChainActive(), locator);
 
         // Send the rest of the chain
         if (pindex) {
             pindex = ::ChainActive().Next(pindex);
         }
         int nLimit = 500;
         LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
                  pfrom->GetId());
         for (; pindex; pindex = ::ChainActive().Next(pindex)) {
             if (pindex->GetBlockHash() == hashStop) {
                 LogPrint(BCLog::NET, "  getblocks stopping at %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             // If pruning, don't inv blocks unless we have on disk and are
             // likely to still have for some reasonable time window (1 hour)
             // that block relay might require.
             const int nPrunedBlocksLikelyToHave =
                 MIN_BLOCKS_TO_KEEP -
                 3600 / chainparams.GetConsensus().nPowTargetSpacing;
             if (fPruneMode &&
                 (!pindex->nStatus.hasData() ||
                  pindex->nHeight <= ::ChainActive().Tip()->nHeight -
                                         nPrunedBlocksLikelyToHave)) {
                 LogPrint(
                     BCLog::NET,
                     " getblocks stopping, pruned or too old block at %d %s\n",
                     pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
             if (--nLimit <= 0) {
                 // When this block is requested, we'll send an inv that'll
                 // trigger the peer to getblocks the next batch of inventory.
                 LogPrint(BCLog::NET, "  getblocks stopping at limit %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 pfrom->hashContinue = pindex->GetBlockHash();
                 break;
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::GETBLOCKTXN) {
         BlockTransactionsRequest req;
         vRecv >> req;
 
         std::shared_ptr<const CBlock> recent_block;
         {
             LOCK(cs_most_recent_block);
             if (most_recent_block_hash == req.blockhash) {
                 recent_block = most_recent_block;
             }
             // Unlock cs_most_recent_block to avoid cs_main lock inversion
         }
         if (recent_block) {
             SendBlockTransactions(*recent_block, req, pfrom, connman);
             return true;
         }
 
         LOCK(cs_main);
 
         const CBlockIndex *pindex = LookupBlockIndex(req.blockhash);
         if (!pindex || !pindex->nStatus.hasData()) {
             LogPrint(
                 BCLog::NET,
                 "Peer %d sent us a getblocktxn for a block we don't have\n",
                 pfrom->GetId());
             return true;
         }
 
         if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
             // If an older block is requested (should never happen in practice,
             // but can happen in tests) send a block response instead of a
             // blocktxn response. Sending a full block response instead of a
             // small blocktxn response is preferable in the case where a peer
             // might maliciously send lots of getblocktxn requests to trigger
             // expensive disk reads, because it will require the peer to
             // actually receive all the data read from disk over the network.
             LogPrint(BCLog::NET,
                      "Peer %d sent us a getblocktxn for a block > %i deep\n",
                      pfrom->GetId(), MAX_BLOCKTXN_DEPTH);
             CInv inv;
             inv.type = MSG_BLOCK;
             inv.hash = req.blockhash;
             pfrom->vRecvGetData.push_back(inv);
             // The message processing loop will go around again (without
             // pausing) and we'll respond then (without cs_main)
             return true;
         }
 
         CBlock block;
         bool ret = ReadBlockFromDisk(block, pindex, chainparams.GetConsensus());
         assert(ret);
 
         SendBlockTransactions(block, req, pfrom, connman);
         return true;
     }
 
     if (strCommand == NetMsgType::GETHEADERS) {
         CBlockLocator locator;
         BlockHash hashStop;
         vRecv >> locator >> hashStop;
 
         if (locator.vHave.size() > MAX_LOCATOR_SZ) {
             LogPrint(BCLog::NET,
                      "getheaders locator size %lld > %d, disconnect peer=%d\n",
                      locator.vHave.size(), MAX_LOCATOR_SZ, pfrom->GetId());
             pfrom->fDisconnect = true;
             return true;
         }
 
         LOCK(cs_main);
         if (::ChainstateActive().IsInitialBlockDownload() &&
             !pfrom->HasPermission(PF_NOBAN)) {
             LogPrint(BCLog::NET,
                      "Ignoring getheaders from peer=%d because node is in "
                      "initial block download\n",
                      pfrom->GetId());
             return true;
         }
 
         CNodeState *nodestate = State(pfrom->GetId());
         const CBlockIndex *pindex = nullptr;
         if (locator.IsNull()) {
             // If locator is null, return the hashStop block
             pindex = LookupBlockIndex(hashStop);
             if (!pindex) {
                 return true;
             }
 
             if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) {
                 LogPrint(BCLog::NET,
                          "%s: ignoring request from peer=%i for old block "
                          "header that isn't in the main chain\n",
                          __func__, pfrom->GetId());
                 return true;
             }
         } else {
             // Find the last block the caller has in the main chain
             pindex = FindForkInGlobalIndex(::ChainActive(), locator);
             if (pindex) {
                 pindex = ::ChainActive().Next(pindex);
             }
         }
 
         // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
         // count at the end
         std::vector<CBlock> vHeaders;
         int nLimit = MAX_HEADERS_RESULTS;
         LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(),
                  pfrom->GetId());
         for (; pindex; pindex = ::ChainActive().Next(pindex)) {
             vHeaders.push_back(pindex->GetBlockHeader());
             if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
                 break;
             }
         }
         // pindex can be nullptr either if we sent ::ChainActive().Tip() OR
         // if our peer has ::ChainActive().Tip() (and thus we are sending an
         // empty headers message). In both cases it's safe to update
         // pindexBestHeaderSent to be our tip.
         //
         // It is important that we simply reset the BestHeaderSent value here,
         // and not max(BestHeaderSent, newHeaderSent). We might have announced
         // the currently-being-connected tip using a compact block, which
         // resulted in the peer sending a headers request, which we respond to
         // without the new block. By resetting the BestHeaderSent, we ensure we
         // will re-announce the new block via headers (or compact blocks again)
         // in the SendMessages logic.
         nodestate->pindexBestHeaderSent =
             pindex ? pindex : ::ChainActive().Tip();
         connman->PushMessage(pfrom,
                              msgMaker.Make(NetMsgType::HEADERS, vHeaders));
         return true;
     }
 
     if (strCommand == NetMsgType::TX) {
         // Stop processing the transaction early if
         // We are in blocks only mode and peer is either not whitelisted or
         // whitelistrelay is off or if this peer is supposed to be a
         // block-relay-only peer
         if ((!g_relay_txes && !pfrom->HasPermission(PF_RELAY)) ||
             (pfrom->m_tx_relay == nullptr)) {
             LogPrint(BCLog::NET,
                      "transaction sent in violation of protocol peer=%d\n",
                      pfrom->GetId());
             pfrom->fDisconnect = true;
             return true;
         }
 
         CTransactionRef ptx;
         vRecv >> ptx;
         const CTransaction &tx = *ptx;
         const TxId &txid = tx.GetId();
 
         CInv inv(MSG_TX, txid);
         pfrom->AddInventoryKnown(inv);
 
         LOCK2(cs_main, g_cs_orphans);
 
         TxValidationState state;
 
         CNodeState *nodestate = State(pfrom->GetId());
         nodestate->m_tx_download.m_tx_announced.erase(txid);
         nodestate->m_tx_download.m_tx_in_flight.erase(txid);
         EraseTxRequest(txid);
 
         if (!AlreadyHave(inv) &&
             AcceptToMemoryPool(config, g_mempool, state, ptx,
                                false /* bypass_limits */,
                                Amount::zero() /* nAbsurdFee */)) {
             g_mempool.check(pcoinsTip.get());
             RelayTransaction(tx.GetId(), *connman);
             for (size_t i = 0; i < tx.vout.size(); i++) {
                 auto it_by_prev =
                     mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
                 if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
                     for (const auto &elem : it_by_prev->second) {
                         pfrom->orphan_work_set.insert(elem->first);
                     }
                 }
             }
 
             pfrom->nLastTXTime = GetTime();
 
             LogPrint(BCLog::MEMPOOL,
                      "AcceptToMemoryPool: peer=%d: accepted %s "
                      "(poolsz %u txn, %u kB)\n",
                      pfrom->GetId(), tx.GetId().ToString(), g_mempool.size(),
                      g_mempool.DynamicMemoryUsage() / 1000);
 
             // Recursively process any orphan transactions that depended on this
             // one
             ProcessOrphanTx(config, connman, pfrom->orphan_work_set);
         } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
             // It may be the case that the orphans parents have all been
             // rejected.
             bool fRejectedParents = false;
             for (const CTxIn &txin : tx.vin) {
                 if (recentRejects->contains(txin.prevout.GetTxId())) {
                     fRejectedParents = true;
                     break;
                 }
             }
             if (!fRejectedParents) {
                 const auto current_time = GetTime<std::chrono::microseconds>();
 
                 for (const CTxIn &txin : tx.vin) {
                     // FIXME: MSG_TX should use a TxHash, not a TxId.
                     const TxId _txid = txin.prevout.GetTxId();
                     CInv _inv(MSG_TX, _txid);
                     pfrom->AddInventoryKnown(_inv);
                     if (!AlreadyHave(_inv)) {
                         RequestTx(State(pfrom->GetId()), _txid, current_time);
                     }
                 }
                 AddOrphanTx(ptx, pfrom->GetId());
 
                 // DoS prevention: do not allow mapOrphanTransactions to grow
                 // unbounded
                 unsigned int nMaxOrphanTx = (unsigned int)std::max(
                     int64_t(0), gArgs.GetArg("-maxorphantx",
                                              DEFAULT_MAX_ORPHAN_TRANSACTIONS));
                 unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
                 if (nEvicted > 0) {
                     LogPrint(BCLog::MEMPOOL,
                              "mapOrphan overflow, removed %u tx\n", nEvicted);
                 }
             } else {
                 LogPrint(BCLog::MEMPOOL,
                          "not keeping orphan with rejected parents %s\n",
                          tx.GetId().ToString());
                 // We will continue to reject this tx since it has rejected
                 // parents so avoid re-requesting it from other peers.
                 recentRejects->insert(tx.GetId());
             }
         } else {
             assert(recentRejects);
             recentRejects->insert(tx.GetId());
 
             if (RecursiveDynamicUsage(*ptx) < 100000) {
                 AddToCompactExtraTransactions(ptx);
             }
 
             if (pfrom->HasPermission(PF_FORCERELAY)) {
                 // Always relay transactions received from whitelisted peers,
                 // even if they were already in the mempool or rejected from it
                 // due to policy, allowing the node to function as a gateway for
                 // nodes hidden behind it.
                 //
                 // Never relay transactions that might result in being
                 // disconnected (or banned).
                 if (state.IsInvalid() && TxRelayMayResultInDisconnect(state)) {
                     LogPrintf("Not relaying invalid transaction %s from "
                               "whitelisted peer=%d (%s)\n",
                               tx.GetId().ToString(), pfrom->GetId(),
                               FormatStateMessage(state));
                 } else {
                     LogPrintf("Force relaying tx %s from whitelisted peer=%d\n",
                               tx.GetId().ToString(), pfrom->GetId());
                     RelayTransaction(tx.GetId(), *connman);
                 }
             }
         }
 
         // If a tx has been detected by recentRejects, we will have reached
         // this point and the tx will have been ignored. Because we haven't run
         // the tx through AcceptToMemoryPool, we won't have computed a DoS
         // score for it or determined exactly why we consider it invalid.
         //
         // This means we won't penalize any peer subsequently relaying a DoSy
         // tx (even if we penalized the first peer who gave it to us) because
         // we have to account for recentRejects showing false positives. In
         // other words, we shouldn't penalize a peer if we aren't *sure* they
         // submitted a DoSy tx.
         //
         // Note that recentRejects doesn't just record DoSy or invalid
         // transactions, but any tx not accepted by the mempool, which may be
         // due to node policy (vs. consensus). So we can't blanket penalize a
         // peer simply for relaying a tx that our recentRejects has caught,
         // regardless of false positives.
 
         if (state.IsInvalid()) {
             LogPrint(BCLog::MEMPOOLREJ,
                      "%s from peer=%d was not accepted: %s\n",
                      tx.GetHash().ToString(), pfrom->GetId(),
                      FormatStateMessage(state));
             // Never send AcceptToMemoryPool's internal codes over P2P
             if (enable_bip61 && state.GetRejectCode() > 0 &&
                 state.GetRejectCode() < REJECT_INTERNAL) {
                 connman->PushMessage(
                     pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand,
                                          uint8_t(state.GetRejectCode()),
                                          state.GetRejectReason().substr(
                                              0, MAX_REJECT_MESSAGE_LENGTH),
                                          inv.hash));
             }
             MaybePunishNodeForTx(pfrom->GetId(), state);
         }
         return true;
     }
 
     if (strCommand == NetMsgType::CMPCTBLOCK) {
         // Ignore cmpctblock received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected cmpctblock message received from peer %d\n",
                      pfrom->GetId());
             return true;
         }
 
         CBlockHeaderAndShortTxIDs cmpctblock;
         vRecv >> cmpctblock;
 
         bool received_new_header = false;
 
         {
             LOCK(cs_main);
 
             if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
                 // Doesn't connect (or is genesis), instead of DoSing in
                 // AcceptBlockHeader, request deeper headers
                 if (!::ChainstateActive().IsInitialBlockDownload()) {
                     connman->PushMessage(
                         pfrom, msgMaker.Make(
                                    NetMsgType::GETHEADERS,
                                    ::ChainActive().GetLocator(pindexBestHeader),
                                    uint256()));
                 }
                 return true;
             }
 
             if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
                 received_new_header = true;
             }
         }
 
         const CBlockIndex *pindex = nullptr;
         BlockValidationState state;
         if (!ProcessNewBlockHeaders(config, {cmpctblock.header}, state,
                                     &pindex)) {
             if (state.IsInvalid()) {
                 MaybePunishNodeForBlock(pfrom->GetId(), state,
                                         /*via_compact_block*/ true,
                                         "invalid header via cmpctblock");
                 return true;
             }
         }
 
         // When we succeed in decoding a block's txids from a cmpctblock
         // message we typically jump to the BLOCKTXN handling code, with a
         // dummy (empty) BLOCKTXN message, to re-use the logic there in
         // completing processing of the putative block (without cs_main).
         bool fProcessBLOCKTXN = false;
         CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
 
         // If we end up treating this as a plain headers message, call that as
         // well
         // without cs_main.
         bool fRevertToHeaderProcessing = false;
 
         // Keep a CBlock for "optimistic" compactblock reconstructions (see
         // below)
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockReconstructed = false;
 
         {
             LOCK2(cs_main, g_cs_orphans);
             // If AcceptBlockHeader returned true, it set pindex
             assert(pindex);
             UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
 
             CNodeState *nodestate = State(pfrom->GetId());
 
             // If this was a new header with more work than our tip, update the
             // peer's last block announcement time
             if (received_new_header &&
                 pindex->nChainWork > ::ChainActive().Tip()->nChainWork) {
                 nodestate->m_last_block_announcement = GetTime();
             }
 
             std::map<BlockHash,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator blockInFlightIt =
                     mapBlocksInFlight.find(pindex->GetBlockHash());
             bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
 
             if (pindex->nStatus.hasData()) {
                 // Nothing to do here
                 return true;
             }
 
             if (pindex->nChainWork <=
                     ::ChainActive()
                         .Tip()
                         ->nChainWork || // We know something better
                 pindex->nTx != 0) {
                 // We had this block at some point, but pruned it
                 if (fAlreadyInFlight) {
                     // We requested this block for some reason, but our mempool
                     // will probably be useless so we just grab the block via
                     // normal getdata.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                     connman->PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                 }
                 return true;
             }
 
             // If we're not close to tip yet, give up and let parallel block
             // fetch work its magic.
             if (!fAlreadyInFlight &&
                 !CanDirectFetch(chainparams.GetConsensus())) {
                 return true;
             }
 
             // We want to be a bit conservative just to be extra careful about
             // DoS possibilities in compact block processing...
             if (pindex->nHeight <= ::ChainActive().Height() + 2) {
                 if ((!fAlreadyInFlight && nodestate->nBlocksInFlight <
                                               MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
                     (fAlreadyInFlight &&
                      blockInFlightIt->second.first == pfrom->GetId())) {
                     std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
                     if (!MarkBlockAsInFlight(config, pfrom->GetId(),
                                              pindex->GetBlockHash(),
                                              chainparams.GetConsensus(), pindex,
                                              &queuedBlockIt)) {
                         if (!(*queuedBlockIt)->partialBlock) {
                             (*queuedBlockIt)
                                 ->partialBlock.reset(
                                     new PartiallyDownloadedBlock(config,
                                                                  &g_mempool));
                         } else {
                             // The block was already in flight using compact
                             // blocks from the same peer.
                             LogPrint(BCLog::NET, "Peer sent us compact block "
                                                  "we were already syncing!\n");
                             return true;
                         }
                     }
 
                     PartiallyDownloadedBlock &partialBlock =
                         *(*queuedBlockIt)->partialBlock;
                     ReadStatus status =
                         partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status == READ_STATUS_INVALID) {
                         // Reset in-flight state in case of whitelist
                         MarkBlockAsReceived(pindex->GetBlockHash());
                         Misbehaving(pfrom, 100, "invalid-cmpctblk");
                         LogPrintf("Peer %d sent us invalid compact block\n",
                                   pfrom->GetId());
                         return true;
                     } else if (status == READ_STATUS_FAILED) {
                         // Duplicate txindices, the block is now in-flight, so
                         // just request it.
                         std::vector<CInv> vInv(1);
                         vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                         connman->PushMessage(
                             pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                         return true;
                     }
 
                     BlockTransactionsRequest req;
                     for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
                         if (!partialBlock.IsTxAvailable(i)) {
                             req.indices.push_back(i);
                         }
                     }
                     if (req.indices.empty()) {
                         // Dirty hack to jump to BLOCKTXN code (TODO: move
                         // message handling into their own functions)
                         BlockTransactions txn;
                         txn.blockhash = cmpctblock.header.GetHash();
                         blockTxnMsg << txn;
                         fProcessBLOCKTXN = true;
                     } else {
                         req.blockhash = pindex->GetBlockHash();
                         connman->PushMessage(
                             pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
                     }
                 } else {
                     // This block is either already in flight from a different
                     // peer, or this peer has too many blocks outstanding to
                     // download from. Optimistically try to reconstruct anyway
                     // since we might be able to without any round trips.
                     PartiallyDownloadedBlock tempBlock(config, &g_mempool);
                     ReadStatus status =
                         tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status != READ_STATUS_OK) {
                         // TODO: don't ignore failures
                         return true;
                     }
                     std::vector<CTransactionRef> dummy;
                     status = tempBlock.FillBlock(*pblock, dummy);
                     if (status == READ_STATUS_OK) {
                         fBlockReconstructed = true;
                     }
                 }
             } else {
                 if (fAlreadyInFlight) {
                     // We requested this block, but its far into the future, so
                     // our mempool will probably be useless - request the block
                     // normally.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                     connman->PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                     return true;
                 } else {
                     // If this was an announce-cmpctblock, we want the same
                     // treatment as a header message.
                     fRevertToHeaderProcessing = true;
                 }
             }
         } // cs_main
 
         if (fProcessBLOCKTXN) {
             return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
                                   blockTxnMsg, nTimeReceived, connman, banman,
                                   interruptMsgProc, enable_bip61);
         }
 
         if (fRevertToHeaderProcessing) {
             // Headers received from HB compact block peers are permitted to be
             // relayed before full validation (see BIP 152), so we don't want to
             // disconnect the peer if the header turns out to be for an invalid
             // block. Note that if a peer tries to build on an invalid chain,
             // that will be detected and the peer will be banned.
             return ProcessHeadersMessage(config, pfrom, connman,
                                          {cmpctblock.header},
                                          /*via_compact_block=*/true);
         }
 
         if (fBlockReconstructed) {
             // If we got here, we were able to optimistically reconstruct a
             // block that is in flight from some other peer.
             {
                 LOCK(cs_main);
                 mapBlockSource.emplace(pblock->GetHash(),
                                        std::make_pair(pfrom->GetId(), false));
             }
             bool fNewBlock = false;
             // Setting fForceProcessing to true means that we bypass some of
             // our anti-DoS protections in AcceptBlock, which filters
             // unrequested blocks that might be trying to waste our resources
             // (eg disk space). Because we only try to reconstruct blocks when
             // we're close to caught up (via the CanDirectFetch() requirement
             // above, combined with the behavior of not requesting blocks until
             // we have a chain with at least nMinimumChainWork), and we ignore
             // compact blocks with less work than our tip, it is safe to treat
             // reconstructed compact blocks as having been requested.
             ProcessNewBlock(config, pblock, /*fForceProcessing=*/true,
                             &fNewBlock);
             if (fNewBlock) {
                 pfrom->nLastBlockTime = GetTime();
             } else {
                 LOCK(cs_main);
                 mapBlockSource.erase(pblock->GetHash());
             }
 
             // hold cs_main for CBlockIndex::IsValid()
             LOCK(cs_main);
             if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
                 // Clear download state for this block, which is in process from
                 // some other peer. We do this after calling. ProcessNewBlock so
                 // that a malleated cmpctblock announcement can't be used to
                 // interfere with block relay.
                 MarkBlockAsReceived(pblock->GetHash());
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::BLOCKTXN) {
         // Ignore blocktxn received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected blocktxn message received from peer %d\n",
                      pfrom->GetId());
             return true;
         }
 
         BlockTransactions resp;
         vRecv >> resp;
 
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockRead = false;
         {
             LOCK(cs_main);
 
             std::map<BlockHash,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator it = mapBlocksInFlight.find(resp.blockhash);
             if (it == mapBlocksInFlight.end() ||
                 !it->second.second->partialBlock ||
                 it->second.first != pfrom->GetId()) {
                 LogPrint(BCLog::NET,
                          "Peer %d sent us block transactions for block "
                          "we weren't expecting\n",
                          pfrom->GetId());
                 return true;
             }
 
             PartiallyDownloadedBlock &partialBlock =
                 *it->second.second->partialBlock;
             ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
             if (status == READ_STATUS_INVALID) {
                 // Reset in-flight state in case of whitelist.
                 MarkBlockAsReceived(resp.blockhash);
                 Misbehaving(pfrom, 100, "invalid-cmpctblk-txns");
                 LogPrintf("Peer %d sent us invalid compact block/non-matching "
                           "block transactions\n",
                           pfrom->GetId());
                 return true;
             } else if (status == READ_STATUS_FAILED) {
                 // Might have collided, fall back to getdata now :(
                 std::vector<CInv> invs;
                 invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
                 connman->PushMessage(pfrom,
                                      msgMaker.Make(NetMsgType::GETDATA, invs));
             } else {
                 // Block is either okay, or possibly we received
                 // READ_STATUS_CHECKBLOCK_FAILED.
                 // Note that CheckBlock can only fail for one of a few reasons:
                 // 1. bad-proof-of-work (impossible here, because we've already
                 //    accepted the header)
                 // 2. merkleroot doesn't match the transactions given (already
                 //    caught in FillBlock with READ_STATUS_FAILED, so
                 //    impossible here)
                 // 3. the block is otherwise invalid (eg invalid coinbase,
                 //    block is too big, too many legacy sigops, etc).
                 // So if CheckBlock failed, #3 is the only possibility.
                 // Under BIP 152, we don't DoS-ban unless proof of work is
                 // invalid (we don't require all the stateless checks to have
                 // been run). This is handled below, so just treat this as
                 // though the block was successfully read, and rely on the
                 // handling in ProcessNewBlock to ensure the block index is
                 // updated, reject messages go out, etc.
 
                 // it is now an empty pointer
                 MarkBlockAsReceived(resp.blockhash);
                 fBlockRead = true;
                 // mapBlockSource is only used for sending reject messages and
                 // DoS scores, so the race between here and cs_main in
                 // ProcessNewBlock is fine. BIP 152 permits peers to relay
                 // compact blocks after validating the header only; we should
                 // not punish peers if the block turns out to be invalid.
                 mapBlockSource.emplace(resp.blockhash,
                                        std::make_pair(pfrom->GetId(), false));
             }
         } // Don't hold cs_main when we call into ProcessNewBlock
         if (fBlockRead) {
             bool fNewBlock = false;
             // Since we requested this block (it was in mapBlocksInFlight),
             // force it to be processed, even if it would not be a candidate for
             // new tip (missing previous block, chain not long enough, etc)
             // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
             // disk-space attacks), but this should be safe due to the
             // protections in the compact block handler -- see related comment
             // in compact block optimistic reconstruction handling.
             ProcessNewBlock(config, pblock, /*fForceProcessing=*/true,
                             &fNewBlock);
             if (fNewBlock) {
                 pfrom->nLastBlockTime = GetTime();
             } else {
                 LOCK(cs_main);
                 mapBlockSource.erase(pblock->GetHash());
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::HEADERS) {
         // Ignore headers received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected headers message received from peer %d\n",
                      pfrom->GetId());
             return true;
         }
 
         std::vector<CBlockHeader> headers;
 
         // Bypass the normal CBlock deserialization, as we don't want to risk
         // deserializing 2000 full blocks.
         unsigned int nCount = ReadCompactSize(vRecv);
         if (nCount > MAX_HEADERS_RESULTS) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "too-many-headers");
             return error("headers message size = %u", nCount);
         }
         headers.resize(nCount);
         for (unsigned int n = 0; n < nCount; n++) {
             vRecv >> headers[n];
             // Ignore tx count; assume it is 0.
             ReadCompactSize(vRecv);
         }
 
         return ProcessHeadersMessage(config, pfrom, connman, headers,
                                      /*via_compact_block=*/false);
     }
 
     if (strCommand == NetMsgType::BLOCK) {
         // Ignore block received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected block message received from peer %d\n",
                      pfrom->GetId());
             return true;
         }
 
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         vRecv >> *pblock;
 
         LogPrint(BCLog::NET, "received block %s peer=%d\n",
                  pblock->GetHash().ToString(), pfrom->GetId());
 
         // Process all blocks from whitelisted peers, even if not requested,
         // unless we're still syncing with the network. Such an unrequested
         // block may still be processed, subject to the conditions in
         // AcceptBlock().
         bool forceProcessing = pfrom->HasPermission(PF_NOBAN) &&
                                !::ChainstateActive().IsInitialBlockDownload();
         const BlockHash hash = pblock->GetHash();
         {
             LOCK(cs_main);
             // Also always process if we requested the block explicitly, as we
             // may need it even though it is not a candidate for a new best tip.
             forceProcessing |= MarkBlockAsReceived(hash);
             // mapBlockSource is only used for sending reject messages and DoS
             // scores, so the race between here and cs_main in ProcessNewBlock
             // is fine.
             mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
         }
         bool fNewBlock = false;
         ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock);
         if (fNewBlock) {
             pfrom->nLastBlockTime = GetTime();
         } else {
             LOCK(cs_main);
             mapBlockSource.erase(hash);
         }
         return true;
     }
 
     // Ignore avalanche requests while importing
     if (strCommand == NetMsgType::AVAPOLL && !fImporting && !fReindex &&
         g_avalanche &&
         gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) {
         auto now = std::chrono::steady_clock::now();
         int64_t cooldown =
             gArgs.GetArg("-avacooldown", AVALANCHE_DEFAULT_COOLDOWN);
 
         {
             LOCK(cs_main);
             auto &node_state = State(pfrom->GetId())->m_avalanche_state;
 
             if (now <
                 node_state.last_poll + std::chrono::milliseconds(cooldown)) {
                 Misbehaving(pfrom, 20, "avapool-cooldown");
             }
 
             node_state.last_poll = now;
         }
 
         uint64_t round;
         Unserialize(vRecv, round);
 
         unsigned int nCount = ReadCompactSize(vRecv);
         if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "too-many-ava-poll");
             return error("poll message size = %u", nCount);
         }
 
         std::vector<avalanche::Vote> votes;
         votes.reserve(nCount);
 
         LogPrint(BCLog::NET, "received avalanche poll from peer=%d\n",
                  pfrom->GetId());
 
         {
             LOCK(cs_main);
 
             for (unsigned int n = 0; n < nCount; n++) {
                 CInv inv;
                 vRecv >> inv;
 
                 uint32_t error = -1;
                 if (inv.type == MSG_BLOCK) {
                     auto blockIndex = LookupBlockIndex(BlockHash(inv.hash));
                     if (blockIndex) {
                         error = ::ChainActive().Contains(blockIndex) ? 0 : 1;
                     }
                 }
 
                 votes.emplace_back(error, inv.hash);
             }
         }
 
         // Send the query to the node.
         g_avalanche->sendResponse(
             pfrom, avalanche::Response(round, cooldown, std::move(votes)));
         return true;
     }
 
     // Ignore avalanche requests while importing
     if (strCommand == NetMsgType::AVARESPONSE && !fImporting && !fReindex &&
         g_avalanche &&
         gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) {
         // As long as QUIC is not implemented, we need to sign response and
         // verify response's signatures in order to avoid any manipulation of
         // messages at the transport level.
         CHashVerifier<CDataStream> verifier(&vRecv);
         avalanche::Response response;
         verifier >> response;
 
         if (!g_avalanche->forNode(
                 pfrom->GetId(), [&](const avalanche::Node &n) {
                     std::array<uint8_t, 64> sig;
                     vRecv >> sig;
 
                     // Unfortunately, the verify API require a vector.
                     std::vector<uint8_t> vchSig{sig.begin(), sig.end()};
                     return n.pubkey.VerifySchnorr(verifier.GetHash(), vchSig);
                 })) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "invalid-ava-response-signature");
             return true;
         }
 
         std::vector<avalanche::BlockUpdate> updates;
         if (!g_avalanche->registerVotes(pfrom->GetId(), response, updates)) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "invalid-ava-response-content");
             return true;
         }
 
         if (updates.size()) {
             for (avalanche::BlockUpdate &u : updates) {
                 CBlockIndex *pindex = u.getBlockIndex();
                 switch (u.getStatus()) {
                     case avalanche::BlockUpdate::Status::Invalid:
                     case avalanche::BlockUpdate::Status::Rejected: {
                         BlockValidationState state;
                         ::ChainstateActive().ParkBlock(config, state, pindex);
                         if (!state.IsValid()) {
                             return error("Database error: %s",
                                          state.GetRejectReason());
                         }
                     } break;
                     case avalanche::BlockUpdate::Status::Accepted:
                     case avalanche::BlockUpdate::Status::Finalized: {
                         LOCK(cs_main);
                         UnparkBlock(pindex);
                     } break;
                 }
             }
 
             BlockValidationState state;
             if (!ActivateBestChain(config, state)) {
                 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                          FormatStateMessage(state));
             }
         }
 
         return true;
     }
 
     if (strCommand == NetMsgType::GETADDR) {
         // This asymmetric behavior for inbound and outbound connections was
         // introduced to prevent a fingerprinting attack: an attacker can send
         // specific fake addresses to users' AddrMan and later request them by
         // sending getaddr messages. Making nodes which are behind NAT and can
         // only make outgoing connections ignore the getaddr message mitigates
         // the attack.
         if (!pfrom->fInbound) {
             LogPrint(BCLog::NET,
                      "Ignoring \"getaddr\" from outbound connection. peer=%d\n",
                      pfrom->GetId());
             return true;
         }
         if (!pfrom->IsAddrRelayPeer()) {
             LogPrint(BCLog::NET,
                      "Ignoring \"getaddr\" from block-relay-only connection. "
                      "peer=%d\n",
                      pfrom->GetId());
             return true;
         }
 
         // Only send one GetAddr response per connection to reduce resource
         // waste and discourage addr stamping of INV announcements.
         if (pfrom->fSentAddr) {
             LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
                      pfrom->GetId());
             return true;
         }
         pfrom->fSentAddr = true;
 
         pfrom->vAddrToSend.clear();
         std::vector<CAddress> vAddr = connman->GetAddresses();
         FastRandomContext insecure_rand;
         for (const CAddress &addr : vAddr) {
             if (!banman->IsDiscouraged(addr) && !banman->IsBanned(addr)) {
                 pfrom->PushAddress(addr, insecure_rand);
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::MEMPOOL) {
         if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
             !pfrom->HasPermission(PF_MEMPOOL)) {
             if (!pfrom->HasPermission(PF_NOBAN)) {
                 LogPrint(BCLog::NET,
                          "mempool request with bloom filters disabled, "
                          "disconnect peer=%d\n",
                          pfrom->GetId());
                 pfrom->fDisconnect = true;
             }
             return true;
         }
 
         if (connman->OutboundTargetReached(false) &&
             !pfrom->HasPermission(PF_MEMPOOL)) {
             if (!pfrom->HasPermission(PF_NOBAN)) {
                 LogPrint(BCLog::NET,
                          "mempool request with bandwidth limit reached, "
                          "disconnect peer=%d\n",
                          pfrom->GetId());
                 pfrom->fDisconnect = true;
             }
             return true;
         }
 
         if (pfrom->m_tx_relay != nullptr) {
             LOCK(pfrom->m_tx_relay->cs_tx_inventory);
             pfrom->m_tx_relay->fSendMempool = true;
         }
         return true;
     }
 
     if (strCommand == NetMsgType::PING) {
         if (pfrom->nVersion > BIP0031_VERSION) {
             uint64_t nonce = 0;
             vRecv >> nonce;
             // Echo the message back with the nonce. This allows for two useful
             // features:
             //
             // 1) A remote node can quickly check if the connection is
             // operational.
             // 2) Remote nodes can measure the latency of the network thread. If
             // this node is overloaded it won't respond to pings quickly and the
             // remote node can avoid sending us more work, like chain download
             // requests.
             //
             // The nonce stops the remote getting confused between different
             // pings: without it, if the remote node sends a ping once per
             // second and this node takes 5 seconds to respond to each, the 5th
             // ping the remote sends would appear to return very quickly.
             connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
         }
         return true;
     }
 
     if (strCommand == NetMsgType::PONG) {
         int64_t pingUsecEnd = nTimeReceived;
         uint64_t nonce = 0;
         size_t nAvail = vRecv.in_avail();
         bool bPingFinished = false;
         std::string sProblem;
 
         if (nAvail >= sizeof(nonce)) {
             vRecv >> nonce;
 
             // Only process pong message if there is an outstanding ping (old
             // ping without nonce should never pong)
             if (pfrom->nPingNonceSent != 0) {
                 if (nonce == pfrom->nPingNonceSent) {
                     // Matching pong received, this ping is no longer
                     // outstanding
                     bPingFinished = true;
                     int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
                     if (pingUsecTime > 0) {
                         // Successful ping time measurement, replace previous
                         pfrom->nPingUsecTime = pingUsecTime;
                         pfrom->nMinPingUsecTime = std::min(
                             pfrom->nMinPingUsecTime.load(), pingUsecTime);
                     } else {
                         // This should never happen
                         sProblem = "Timing mishap";
                     }
                 } else {
                     // Nonce mismatches are normal when pings are overlapping
                     sProblem = "Nonce mismatch";
                     if (nonce == 0) {
                         // This is most likely a bug in another implementation
                         // somewhere; cancel this ping
                         bPingFinished = true;
                         sProblem = "Nonce zero";
                     }
                 }
             } else {
                 sProblem = "Unsolicited pong without ping";
             }
         } else {
             // This is most likely a bug in another implementation somewhere;
             // cancel this ping
             bPingFinished = true;
             sProblem = "Short payload";
         }
 
         if (!(sProblem.empty())) {
             LogPrint(BCLog::NET,
                      "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
                      pfrom->GetId(), sProblem, pfrom->nPingNonceSent, nonce,
                      nAvail);
         }
         if (bPingFinished) {
             pfrom->nPingNonceSent = 0;
         }
         return true;
     }
 
     if (strCommand == NetMsgType::FILTERLOAD) {
         CBloomFilter filter;
         vRecv >> filter;
 
         if (!filter.IsWithinSizeConstraints()) {
             // There is no excuse for sending a too-large filter
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "oversized-bloom-filter");
         } else if (pfrom->m_tx_relay != nullptr) {
             LOCK(pfrom->m_tx_relay->cs_filter);
             pfrom->m_tx_relay->pfilter.reset(new CBloomFilter(filter));
             pfrom->m_tx_relay->pfilter->UpdateEmptyFull();
             pfrom->m_tx_relay->fRelayTxes = true;
         }
         return true;
     }
 
     if (strCommand == NetMsgType::FILTERADD) {
         std::vector<uint8_t> vData;
         vRecv >> vData;
 
         // Nodes must NEVER send a data item > 520 bytes (the max size for a
         // script data object, and thus, the maximum size any matched object can
         // have) in a filteradd message.
         bool bad = false;
         if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
             bad = true;
         } else if (pfrom->m_tx_relay != nullptr) {
             LOCK(pfrom->m_tx_relay->cs_filter);
             if (pfrom->m_tx_relay->pfilter) {
                 pfrom->m_tx_relay->pfilter->insert(vData);
             } else {
                 bad = true;
             }
         }
         if (bad) {
             LOCK(cs_main);
             // The structure of this code doesn't really allow for a good error
             // code. We'll go generic.
             Misbehaving(pfrom, 100, "invalid-filteradd");
         }
         return true;
     }
 
     if (strCommand == NetMsgType::FILTERCLEAR) {
         if (pfrom->m_tx_relay == nullptr) {
             return true;
         }
         LOCK(pfrom->m_tx_relay->cs_filter);
         if (pfrom->GetLocalServices() & NODE_BLOOM) {
             pfrom->m_tx_relay->pfilter.reset(new CBloomFilter());
         }
         pfrom->m_tx_relay->fRelayTxes = true;
         return true;
     }
 
     if (strCommand == NetMsgType::FEEFILTER) {
         Amount newFeeFilter = Amount::zero();
         vRecv >> newFeeFilter;
         if (MoneyRange(newFeeFilter)) {
             if (pfrom->m_tx_relay != nullptr) {
                 LOCK(pfrom->m_tx_relay->cs_feeFilter);
                 pfrom->m_tx_relay->minFeeFilter = newFeeFilter;
             }
             LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
                      CFeeRate(newFeeFilter).ToString(), pfrom->GetId());
         }
         return true;
     }
 
     if (strCommand == NetMsgType::NOTFOUND) {
         // Remove the NOTFOUND transactions from the peer
         LOCK(cs_main);
         CNodeState *state = State(pfrom->GetId());
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() <=
             MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
             for (CInv &inv : vInv) {
                 if (inv.type == MSG_TX) {
                     const TxId txid(inv.hash);
                     // If we receive a NOTFOUND message for a txid we requested,
                     // erase it from our data structures for this peer.
                     auto in_flight_it =
                         state->m_tx_download.m_tx_in_flight.find(txid);
                     if (in_flight_it ==
                         state->m_tx_download.m_tx_in_flight.end()) {
                         // Skip any further work if this is a spurious NOTFOUND
                         // message.
                         continue;
                     }
                     state->m_tx_download.m_tx_in_flight.erase(in_flight_it);
                     state->m_tx_download.m_tx_announced.erase(txid);
                 }
             }
         }
         return true;
     }
 
     // Ignore unknown commands for extensibility
     LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
              SanitizeString(strCommand), pfrom->GetId());
     return true;
 }
 
 bool PeerLogicValidation::SendRejectsAndCheckIfBanned(CNode *pnode,
                                                       bool enable_bip61)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     CNodeState &state = *State(pnode->GetId());
 
     if (enable_bip61) {
         for (const CBlockReject &reject : state.rejects) {
             connman->PushMessage(
                 pnode,
                 CNetMsgMaker(INIT_PROTO_VERSION)
                     .Make(NetMsgType::REJECT, std::string(NetMsgType::BLOCK),
                           reject.chRejectCode, reject.strRejectReason,
                           reject.hashBlock));
         }
     }
     state.rejects.clear();
 
     if (state.m_should_discourage) {
         state.m_should_discourage = false;
         if (pnode->HasPermission(PF_NOBAN)) {
             LogPrintf("Warning: not punishing whitelisted peer %s!\n",
                       pnode->addr.ToString());
         } else if (pnode->m_manual_connection) {
             LogPrintf("Warning: not punishing manually-connected peer %s!\n",
                       pnode->addr.ToString());
         } else if (pnode->addr.IsLocal()) {
             // Disconnect but don't discourage this local node
             LogPrintf(
                 "Warning: disconnecting but not discouraging local peer %s!\n",
                 pnode->addr.ToString());
             pnode->fDisconnect = true;
         } else {
             // Disconnect and discourage all nodes sharing the address
             LogPrintf("Disconnecting and discouraging peer %s!\n",
                       pnode->addr.ToString());
             if (m_banman) {
                 m_banman->Discourage(pnode->addr);
             }
             connman->DisconnectNode(pnode->addr);
         }
         return true;
     }
     return false;
 }
 
 bool PeerLogicValidation::ProcessMessages(const Config &config, CNode *pfrom,
                                           std::atomic<bool> &interruptMsgProc) {
     const CChainParams &chainparams = config.GetChainParams();
     //
     // Message format
     //  (4) message start
     //  (12) command
     //  (4) size
     //  (4) checksum
     //  (x) data
     //
     bool fMoreWork = false;
 
     if (!pfrom->vRecvGetData.empty()) {
         ProcessGetData(config, pfrom, connman, interruptMsgProc);
     }
 
     if (!pfrom->orphan_work_set.empty()) {
         LOCK2(cs_main, g_cs_orphans);
         ProcessOrphanTx(config, connman, pfrom->orphan_work_set);
     }
 
     if (pfrom->fDisconnect) {
         return false;
     }
 
     // this maintains the order of responses and prevents vRecvGetData from
     // growing unbounded
     if (!pfrom->vRecvGetData.empty()) {
         return true;
     }
     if (!pfrom->orphan_work_set.empty()) {
         return true;
     }
 
     // Don't bother if send buffer is too full to respond anyway
     if (pfrom->fPauseSend) {
         return false;
     }
 
     std::list<CNetMessage> msgs;
     {
         LOCK(pfrom->cs_vProcessMsg);
         if (pfrom->vProcessMsg.empty()) {
             return false;
         }
         // Just take one message
         msgs.splice(msgs.begin(), pfrom->vProcessMsg,
                     pfrom->vProcessMsg.begin());
         pfrom->nProcessQueueSize -=
             msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
         pfrom->fPauseRecv =
             pfrom->nProcessQueueSize > connman->GetReceiveFloodSize();
         fMoreWork = !pfrom->vProcessMsg.empty();
     }
     CNetMessage &msg(msgs.front());
 
     msg.SetVersion(pfrom->GetRecvVersion());
 
     // Scan for message start
     if (memcmp(std::begin(msg.hdr.pchMessageStart),
                std::begin(chainparams.NetMagic()),
                CMessageHeader::MESSAGE_START_SIZE) != 0) {
         LogPrint(BCLog::NET,
                  "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
                  SanitizeString(msg.hdr.GetCommand()), pfrom->GetId());
 
         // Make sure we discourage where that come from for some time.
         if (m_banman) {
             m_banman->Discourage(pfrom->addr);
         }
         connman->DisconnectNode(pfrom->addr);
 
         pfrom->fDisconnect = true;
         return false;
     }
 
     // Read header
     CMessageHeader &hdr = msg.hdr;
     if (!hdr.IsValid(config)) {
         LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
                  SanitizeString(hdr.GetCommand()), pfrom->GetId());
         return fMoreWork;
     }
     std::string strCommand = hdr.GetCommand();
 
     // Message size
     unsigned int nMessageSize = hdr.nMessageSize;
 
     // Checksum
     CDataStream &vRecv = msg.vRecv;
     const uint256 &hash = msg.GetMessageHash();
     if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) !=
         0) {
         LogPrint(
             BCLog::NET,
             "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s from "
             "peer=%d\n",
             __func__, SanitizeString(strCommand), nMessageSize,
             HexStr(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE),
             HexStr(hdr.pchChecksum,
                    hdr.pchChecksum + CMessageHeader::CHECKSUM_SIZE),
             pfrom->GetId());
         if (m_banman) {
             m_banman->Discourage(pfrom->addr);
         }
         connman->DisconnectNode(pfrom->addr);
         return fMoreWork;
     }
 
     // Process message
     bool fRet = false;
     try {
         fRet =
             ProcessMessage(config, pfrom, strCommand, vRecv, msg.nTime, connman,
                            m_banman, interruptMsgProc, m_enable_bip61);
         if (interruptMsgProc) {
             return false;
         }
 
         if (!pfrom->vRecvGetData.empty()) {
             fMoreWork = true;
         }
     } catch (const std::ios_base::failure &e) {
         if (m_enable_bip61) {
             connman->PushMessage(
                 pfrom,
                 CNetMsgMaker(INIT_PROTO_VERSION)
                     .Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED,
                           std::string("error parsing message")));
         }
         if (strstr(e.what(), "end of data")) {
             // Allow exceptions from under-length message on vRecv
             LogPrint(BCLog::NET,
                      "%s(%s, %u bytes): Exception '%s' caught, normally caused "
                      "by a message being shorter than its stated length\n",
                      __func__, SanitizeString(strCommand), nMessageSize,
                      e.what());
         } else if (strstr(e.what(), "size too large")) {
             // Allow exceptions from over-long size
             LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' caught\n",
                      __func__, SanitizeString(strCommand), nMessageSize,
                      e.what());
         } else if (strstr(e.what(), "non-canonical ReadCompactSize()")) {
             // Allow exceptions from non-canonical encoding
             LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' caught\n",
                      __func__, SanitizeString(strCommand), nMessageSize,
                      e.what());
         } else {
             PrintExceptionContinue(&e, "ProcessMessages()");
         }
     } catch (const std::exception &e) {
         PrintExceptionContinue(&e, "ProcessMessages()");
     } catch (...) {
         PrintExceptionContinue(nullptr, "ProcessMessages()");
     }
 
     if (!fRet) {
         LogPrint(BCLog::NET, "%s(%s, %u bytes) FAILED peer=%d\n", __func__,
                  SanitizeString(strCommand), nMessageSize, pfrom->GetId());
     }
 
     LOCK(cs_main);
     SendRejectsAndCheckIfBanned(pfrom, m_enable_bip61);
 
     return fMoreWork;
 }
 
 void PeerLogicValidation::ConsiderEviction(CNode *pto,
                                            int64_t time_in_seconds) {
     AssertLockHeld(cs_main);
 
     CNodeState &state = *State(pto->GetId());
     const CNetMsgMaker msgMaker(pto->GetSendVersion());
 
     if (!state.m_chain_sync.m_protect &&
         IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
         // This is an outbound peer subject to disconnection if they don't
         // announce a block with as much work as the current tip within
         // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
         // chain has more work than ours, we should sync to it, unless it's
         // invalid, in which case we should find that out and disconnect from
         // them elsewhere).
         if (state.pindexBestKnownBlock != nullptr &&
             state.pindexBestKnownBlock->nChainWork >=
                 ::ChainActive().Tip()->nChainWork) {
             if (state.m_chain_sync.m_timeout != 0) {
                 state.m_chain_sync.m_timeout = 0;
                 state.m_chain_sync.m_work_header = nullptr;
                 state.m_chain_sync.m_sent_getheaders = false;
             }
         } else if (state.m_chain_sync.m_timeout == 0 ||
                    (state.m_chain_sync.m_work_header != nullptr &&
                     state.pindexBestKnownBlock != nullptr &&
                     state.pindexBestKnownBlock->nChainWork >=
                         state.m_chain_sync.m_work_header->nChainWork)) {
             // Our best block known by this peer is behind our tip, and we're
             // either noticing that for the first time, OR this peer was able to
             // catch up to some earlier point where we checked against our tip.
             // Either way, set a new timeout based on current tip.
             state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
             state.m_chain_sync.m_work_header = ::ChainActive().Tip();
             state.m_chain_sync.m_sent_getheaders = false;
         } else if (state.m_chain_sync.m_timeout > 0 &&
                    time_in_seconds > state.m_chain_sync.m_timeout) {
             // No evidence yet that our peer has synced to a chain with work
             // equal to that of our tip, when we first detected it was behind.
             // Send a single getheaders message to give the peer a chance to
             // update us.
             if (state.m_chain_sync.m_sent_getheaders) {
                 // They've run out of time to catch up!
                 LogPrintf(
                     "Disconnecting outbound peer %d for old chain, best known "
                     "block = %s\n",
                     pto->GetId(),
                     state.pindexBestKnownBlock != nullptr
                         ? state.pindexBestKnownBlock->GetBlockHash().ToString()
                         : "<none>");
                 pto->fDisconnect = true;
             } else {
                 assert(state.m_chain_sync.m_work_header);
                 LogPrint(
                     BCLog::NET,
                     "sending getheaders to outbound peer=%d to verify chain "
                     "work (current best known block:%s, benchmark blockhash: "
                     "%s)\n",
                     pto->GetId(),
                     state.pindexBestKnownBlock != nullptr
                         ? state.pindexBestKnownBlock->GetBlockHash().ToString()
                         : "<none>",
                     state.m_chain_sync.m_work_header->GetBlockHash()
                         .ToString());
                 connman->PushMessage(
                     pto,
                     msgMaker.Make(NetMsgType::GETHEADERS,
                                   ::ChainActive().GetLocator(
                                       state.m_chain_sync.m_work_header->pprev),
                                   uint256()));
                 state.m_chain_sync.m_sent_getheaders = true;
                 // 2 minutes
                 constexpr int64_t HEADERS_RESPONSE_TIME = 120;
                 // Bump the timeout to allow a response, which could clear the
                 // timeout (if the response shows the peer has synced), reset
                 // the timeout (if the peer syncs to the required work but not
                 // to our tip), or result in disconnect (if we advance to the
                 // timeout and pindexBestKnownBlock has not sufficiently
                 // progressed)
                 state.m_chain_sync.m_timeout =
                     time_in_seconds + HEADERS_RESPONSE_TIME;
             }
         }
     }
 }
 
 void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds) {
     // Check whether we have too many outbound peers
     int extra_peers = connman->GetExtraOutboundCount();
     if (extra_peers <= 0) {
         return;
     }
 
     // If we have more outbound peers than we target, disconnect one.
     // Pick the outbound peer that least recently announced us a new block, with
     // ties broken by choosing the more recent connection (higher node id)
     NodeId worst_peer = -1;
     int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
 
     connman->ForEachNode([&](CNode *pnode) {
         AssertLockHeld(cs_main);
 
         // Ignore non-outbound peers, or nodes marked for disconnect already
         if (!IsOutboundDisconnectionCandidate(pnode) || pnode->fDisconnect) {
             return;
         }
         CNodeState *state = State(pnode->GetId());
         if (state == nullptr) {
             // shouldn't be possible, but just in case
             return;
         }
         // Don't evict our protected peers
         if (state->m_chain_sync.m_protect) {
             return;
         }
         // Don't evict our block-relay-only peers.
         if (pnode->m_tx_relay == nullptr) {
             return;
         }
 
         if (state->m_last_block_announcement < oldest_block_announcement ||
             (state->m_last_block_announcement == oldest_block_announcement &&
              pnode->GetId() > worst_peer)) {
             worst_peer = pnode->GetId();
             oldest_block_announcement = state->m_last_block_announcement;
         }
     });
 
     if (worst_peer == -1) {
         return;
     }
 
     bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
         AssertLockHeld(cs_main);
 
         // Only disconnect a peer that has been connected to us for some
         // reasonable fraction of our check-frequency, to give it time for new
         // information to have arrived.
         // Also don't disconnect any peer we're trying to download a block from.
         CNodeState &state = *State(pnode->GetId());
         if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME &&
             state.nBlocksInFlight == 0) {
             LogPrint(BCLog::NET,
                      "disconnecting extra outbound peer=%d (last block "
                      "announcement received at time %d)\n",
                      pnode->GetId(), oldest_block_announcement);
             pnode->fDisconnect = true;
             return true;
         } else {
             LogPrint(BCLog::NET,
                      "keeping outbound peer=%d chosen for eviction "
                      "(connect time: %d, blocks_in_flight: %d)\n",
                      pnode->GetId(), pnode->nTimeConnected,
                      state.nBlocksInFlight);
             return false;
         }
     });
 
     if (disconnected) {
         // If we disconnected an extra peer, that means we successfully
         // connected to at least one peer after the last time we detected a
         // stale tip. Don't try any more extra peers until we next detect a
         // stale tip, to limit the load we put on the network from these extra
         // connections.
         connman->SetTryNewOutboundPeer(false);
     }
 }
 
 void PeerLogicValidation::CheckForStaleTipAndEvictPeers(
     const Consensus::Params &consensusParams) {
     LOCK(cs_main);
 
     if (connman == nullptr) {
         return;
     }
 
     int64_t time_in_seconds = GetTime();
 
     EvictExtraOutboundPeers(time_in_seconds);
 
     if (time_in_seconds <= m_stale_tip_check_time) {
         return;
     }
 
     // Check whether our tip is stale, and if so, allow using an extra outbound
     // peer.
     if (!fImporting && !fReindex && connman->GetNetworkActive() &&
         connman->GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) {
         LogPrintf("Potential stale tip detected, will try using extra outbound "
                   "peer (last tip update: %d seconds ago)\n",
                   time_in_seconds - g_last_tip_update);
         connman->SetTryNewOutboundPeer(true);
     } else if (connman->GetTryNewOutboundPeer()) {
         connman->SetTryNewOutboundPeer(false);
     }
     m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
 }
 
 namespace {
 class CompareInvMempoolOrder {
     CTxMemPool *mp;
 
 public:
     explicit CompareInvMempoolOrder(CTxMemPool *_mempool) { mp = _mempool; }
 
     bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
         /**
          * As std::make_heap produces a max-heap, we want the entries with the
          * fewest ancestors/highest fee to sort later.
          */
         return mp->CompareDepthAndScore(*b, *a);
     }
 };
 } // namespace
 
 bool PeerLogicValidation::SendMessages(const Config &config, CNode *pto,
                                        std::atomic<bool> &interruptMsgProc) {
     const Consensus::Params &consensusParams =
         config.GetChainParams().GetConsensus();
 
     // Don't send anything until the version handshake is complete
     if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
         return true;
     }
 
     // If we get here, the outgoing message serialization version is set and
     // can't change.
     const CNetMsgMaker msgMaker(pto->GetSendVersion());
 
     //
     // Message: ping
     //
     bool pingSend = false;
     if (pto->fPingQueued) {
         // RPC ping request by user
         pingSend = true;
     }
     if (pto->nPingNonceSent == 0 &&
         pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
         // Ping automatically sent as a latency probe & keepalive.
         pingSend = true;
     }
     if (pingSend) {
         uint64_t nonce = 0;
         while (nonce == 0) {
             GetRandBytes((uint8_t *)&nonce, sizeof(nonce));
         }
         pto->fPingQueued = false;
         pto->nPingUsecStart = GetTimeMicros();
         if (pto->nVersion > BIP0031_VERSION) {
             pto->nPingNonceSent = nonce;
             connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
         } else {
             // Peer is too old to support ping command with nonce, pong will
             // never arrive.
             pto->nPingNonceSent = 0;
             connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
         }
     }
 
     // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
     TRY_LOCK(cs_main, lockMain);
     if (!lockMain) {
         return true;
     }
 
     if (SendRejectsAndCheckIfBanned(pto, m_enable_bip61)) {
         return true;
     }
     CNodeState &state = *State(pto->GetId());
 
     // Address refresh broadcast
     int64_t nNow = GetTimeMicros();
     if (pto->IsAddrRelayPeer() &&
         !::ChainstateActive().IsInitialBlockDownload() &&
         pto->nNextLocalAddrSend < nNow) {
         AdvertiseLocal(pto);
         pto->nNextLocalAddrSend =
             PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
     }
 
     //
     // Message: addr
     //
     if (pto->IsAddrRelayPeer() && pto->nNextAddrSend < nNow) {
         pto->nNextAddrSend =
             PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
         std::vector<CAddress> vAddr;
         vAddr.reserve(pto->vAddrToSend.size());
         for (const CAddress &addr : pto->vAddrToSend) {
             if (!pto->addrKnown.contains(addr.GetKey())) {
                 pto->addrKnown.insert(addr.GetKey());
                 vAddr.push_back(addr);
                 // receiver rejects addr messages larger than 1000
                 if (vAddr.size() >= 1000) {
                     connman->PushMessage(
                         pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
                     vAddr.clear();
                 }
             }
         }
         pto->vAddrToSend.clear();
         if (!vAddr.empty()) {
             connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
         }
 
         // we only send the big addr message once
         if (pto->vAddrToSend.capacity() > 40) {
             pto->vAddrToSend.shrink_to_fit();
         }
     }
 
     // Start block sync
     if (pindexBestHeader == nullptr) {
         pindexBestHeader = ::ChainActive().Tip();
     }
 
     // Download if this is a nice peer, or we have no nice peers and this one
     // might do.
     bool fFetch = state.fPreferredDownload ||
                   (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot);
 
     if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
         // Only actively request headers from a single peer, unless we're close
         // to today.
         if ((nSyncStarted == 0 && fFetch) ||
             pindexBestHeader->GetBlockTime() >
                 GetAdjustedTime() - 24 * 60 * 60) {
             state.fSyncStarted = true;
             state.nHeadersSyncTimeout =
                 GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE +
                 HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER *
                     (GetAdjustedTime() - pindexBestHeader->GetBlockTime()) /
                     (consensusParams.nPowTargetSpacing);
             nSyncStarted++;
             const CBlockIndex *pindexStart = pindexBestHeader;
             /**
              * If possible, start at the block preceding the currently best
              * known header. This ensures that we always get a non-empty list of
              * headers back as long as the peer is up-to-date. With a non-empty
              * response, we can initialise the peer's known best block. This
              * wouldn't be possible if we requested starting at pindexBestHeader
              * and got back an empty response.
              */
             if (pindexStart->pprev) {
                 pindexStart = pindexStart->pprev;
             }
 
             LogPrint(BCLog::NET,
                      "initial getheaders (%d) to peer=%d (startheight:%d)\n",
                      pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
             connman->PushMessage(
                 pto, msgMaker.Make(NetMsgType::GETHEADERS,
                                    ::ChainActive().GetLocator(pindexStart),
                                    uint256()));
         }
     }
 
     //
     // Try sending block announcements via headers
     //
     {
         // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
         // hashes we're relaying, and our peer wants headers announcements, then
         // find the first header not yet known to our peer but would connect,
         // and send. If no header would connect, or if we have too many blocks,
         // or if the peer doesn't want headers, just add all to the inv queue.
         LOCK(pto->cs_inventory);
         std::vector<CBlock> vHeaders;
         bool fRevertToInv =
             ((!state.fPreferHeaders &&
               (!state.fPreferHeaderAndIDs ||
                pto->vBlockHashesToAnnounce.size() > 1)) ||
              pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
         // last header queued for delivery
         const CBlockIndex *pBestIndex = nullptr;
         // ensure pindexBestKnownBlock is up-to-date
         ProcessBlockAvailability(pto->GetId());
 
         if (!fRevertToInv) {
             bool fFoundStartingHeader = false;
             // Try to find first header that our peer doesn't have, and then
             // send all headers past that one. If we come across an headers that
             // aren't on ::ChainActive(), give up.
             for (const BlockHash &hash : pto->vBlockHashesToAnnounce) {
                 const CBlockIndex *pindex = LookupBlockIndex(hash);
                 assert(pindex);
                 if (::ChainActive()[pindex->nHeight] != pindex) {
                     // Bail out if we reorged away from this block
                     fRevertToInv = true;
                     break;
                 }
                 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
                     // This means that the list of blocks to announce don't
                     // connect to each other. This shouldn't really be possible
                     // to hit during regular operation (because reorgs should
                     // take us to a chain that has some block not on the prior
                     // chain, which should be caught by the prior check), but
                     // one way this could happen is by using invalidateblock /
                     // reconsiderblock repeatedly on the tip, causing it to be
                     // added multiple times to vBlockHashesToAnnounce. Robustly
                     // deal with this rare situation by reverting to an inv.
                     fRevertToInv = true;
                     break;
                 }
                 pBestIndex = pindex;
                 if (fFoundStartingHeader) {
                     // add this to the headers message
                     vHeaders.push_back(pindex->GetBlockHeader());
                 } else if (PeerHasHeader(&state, pindex)) {
                     // Keep looking for the first new block.
                     continue;
                 } else if (pindex->pprev == nullptr ||
                            PeerHasHeader(&state, pindex->pprev)) {
                     // Peer doesn't have this header but they do have the prior
                     // one.
                     // Start sending headers.
                     fFoundStartingHeader = true;
                     vHeaders.push_back(pindex->GetBlockHeader());
                 } else {
                     // Peer doesn't have this header or the prior one --
                     // nothing will connect, so bail out.
                     fRevertToInv = true;
                     break;
                 }
             }
         }
         if (!fRevertToInv && !vHeaders.empty()) {
             if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
                 // We only send up to 1 block as header-and-ids, as otherwise
                 // probably means we're doing an initial-ish-sync or they're
                 // slow.
                 LogPrint(BCLog::NET,
                          "%s sending header-and-ids %s to peer=%d\n", __func__,
                          vHeaders.front().GetHash().ToString(), pto->GetId());
 
                 int nSendFlags = 0;
 
                 bool fGotBlockFromCache = false;
                 {
                     LOCK(cs_most_recent_block);
                     if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
                         CBlockHeaderAndShortTxIDs cmpctblock(
                             *most_recent_block);
                         connman->PushMessage(
                             pto,
                             msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
                                           cmpctblock));
                         fGotBlockFromCache = true;
                     }
                 }
                 if (!fGotBlockFromCache) {
                     CBlock block;
                     bool ret =
                         ReadBlockFromDisk(block, pBestIndex, consensusParams);
                     assert(ret);
                     CBlockHeaderAndShortTxIDs cmpctblock(block);
                     connman->PushMessage(
                         pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
                                            cmpctblock));
                 }
                 state.pindexBestHeaderSent = pBestIndex;
             } else if (state.fPreferHeaders) {
                 if (vHeaders.size() > 1) {
                     LogPrint(BCLog::NET,
                              "%s: %u headers, range (%s, %s), to peer=%d\n",
                              __func__, vHeaders.size(),
                              vHeaders.front().GetHash().ToString(),
                              vHeaders.back().GetHash().ToString(),
                              pto->GetId());
                 } else {
                     LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n",
                              __func__, vHeaders.front().GetHash().ToString(),
                              pto->GetId());
                 }
                 connman->PushMessage(
                     pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
                 state.pindexBestHeaderSent = pBestIndex;
             } else {
                 fRevertToInv = true;
             }
         }
         if (fRevertToInv) {
             // If falling back to using an inv, just try to inv the tip. The
             // last entry in vBlockHashesToAnnounce was our tip at some point in
             // the past.
             if (!pto->vBlockHashesToAnnounce.empty()) {
                 const BlockHash &hashToAnnounce =
                     pto->vBlockHashesToAnnounce.back();
                 const CBlockIndex *pindex = LookupBlockIndex(hashToAnnounce);
                 assert(pindex);
 
                 // Warn if we're announcing a block that is not on the main
                 // chain. This should be very rare and could be optimized out.
                 // Just log for now.
                 if (::ChainActive()[pindex->nHeight] != pindex) {
                     LogPrint(BCLog::NET,
                              "Announcing block %s not on main chain (tip=%s)\n",
                              hashToAnnounce.ToString(),
                              ::ChainActive().Tip()->GetBlockHash().ToString());
                 }
 
                 // If the peer's chain has this block, don't inv it back.
                 if (!PeerHasHeader(&state, pindex)) {
                     pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
                     LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n",
                              __func__, pto->GetId(), hashToAnnounce.ToString());
                 }
             }
         }
         pto->vBlockHashesToAnnounce.clear();
     }
 
     //
     // Message: inventory
     //
     std::vector<CInv> vInv;
     {
         LOCK(pto->cs_inventory);
         vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(),
                                       INVENTORY_BROADCAST_MAX_PER_MB *
                                           config.GetMaxBlockSize() / 1000000));
 
         // Add blocks
         for (const BlockHash &hash : pto->vInventoryBlockToSend) {
             vInv.push_back(CInv(MSG_BLOCK, hash));
             if (vInv.size() == MAX_INV_SZ) {
                 connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
                 vInv.clear();
             }
         }
         pto->vInventoryBlockToSend.clear();
 
         if (pto->m_tx_relay != nullptr) {
             LOCK(pto->m_tx_relay->cs_tx_inventory);
             // Check whether periodic sends should happen
             bool fSendTrickle = pto->HasPermission(PF_NOBAN);
             if (pto->m_tx_relay->nNextInvSend < nNow) {
                 fSendTrickle = true;
                 if (pto->fInbound) {
                     pto->m_tx_relay->nNextInvSend =
                         connman->PoissonNextSendInbound(
                             nNow, INVENTORY_BROADCAST_INTERVAL);
                 } else {
                     // Use half the delay for outbound peers, as there is less
                     // privacy concern for them.
                     pto->m_tx_relay->nNextInvSend = PoissonNextSend(
                         nNow, INVENTORY_BROADCAST_INTERVAL >> 1);
                 }
             }
 
             // Time to send but the peer has requested we not relay
             // transactions.
             if (fSendTrickle) {
                 LOCK(pto->m_tx_relay->cs_filter);
                 if (!pto->m_tx_relay->fRelayTxes) {
                     pto->m_tx_relay->setInventoryTxToSend.clear();
                 }
             }
 
             // Respond to BIP35 mempool requests
             if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
                 auto vtxinfo = g_mempool.infoAll();
                 pto->m_tx_relay->fSendMempool = false;
                 Amount filterrate = Amount::zero();
                 {
                     LOCK(pto->m_tx_relay->cs_feeFilter);
                     filterrate = pto->m_tx_relay->minFeeFilter;
                 }
 
                 LOCK(pto->m_tx_relay->cs_filter);
 
                 for (const auto &txinfo : vtxinfo) {
                     const TxId &txid = txinfo.tx->GetId();
                     CInv inv(MSG_TX, txid);
                     pto->m_tx_relay->setInventoryTxToSend.erase(txid);
                     if (filterrate != Amount::zero() &&
                         txinfo.feeRate.GetFeePerK() < filterrate) {
                         continue;
                     }
                     if (pto->m_tx_relay->pfilter &&
                         !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(
                             *txinfo.tx)) {
                         continue;
                     }
                     pto->m_tx_relay->filterInventoryKnown.insert(txid);
                     vInv.push_back(inv);
                     if (vInv.size() == MAX_INV_SZ) {
                         connman->PushMessage(
                             pto, msgMaker.Make(NetMsgType::INV, vInv));
                         vInv.clear();
                     }
                 }
                 pto->m_tx_relay->m_last_mempool_req =
                     GetTime<std::chrono::seconds>();
             }
 
             // Determine transactions to relay
             if (fSendTrickle) {
                 // Produce a vector with all candidates for sending
                 std::vector<std::set<TxId>::iterator> vInvTx;
                 vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
                 for (std::set<TxId>::iterator it =
                          pto->m_tx_relay->setInventoryTxToSend.begin();
                      it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
                     vInvTx.push_back(it);
                 }
                 Amount filterrate = Amount::zero();
                 {
                     LOCK(pto->m_tx_relay->cs_feeFilter);
                     filterrate = pto->m_tx_relay->minFeeFilter;
                 }
                 // Topologically and fee-rate sort the inventory we send for
                 // privacy and priority reasons. A heap is used so that not all
                 // items need sorting if only a few are being sent.
                 CompareInvMempoolOrder compareInvMempoolOrder(&g_mempool);
                 std::make_heap(vInvTx.begin(), vInvTx.end(),
                                compareInvMempoolOrder);
                 // No reason to drain out at many times the network's capacity,
                 // especially since we have many peers and some will draw much
                 // shorter delays.
                 unsigned int nRelayedTransactions = 0;
                 LOCK(pto->m_tx_relay->cs_filter);
                 while (!vInvTx.empty() &&
                        nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB *
                                                   config.GetMaxBlockSize() /
                                                   1000000) {
                     // Fetch the top element from the heap
                     std::pop_heap(vInvTx.begin(), vInvTx.end(),
                                   compareInvMempoolOrder);
                     std::set<TxId>::iterator it = vInvTx.back();
                     vInvTx.pop_back();
                     const TxId txid = *it;
                     // Remove it from the to-be-sent set
                     pto->m_tx_relay->setInventoryTxToSend.erase(it);
                     // Check if not in the filter already
                     if (pto->m_tx_relay->filterInventoryKnown.contains(txid)) {
                         continue;
                     }
                     // Not in the mempool anymore? don't bother sending it.
                     auto txinfo = g_mempool.info(txid);
                     if (!txinfo.tx) {
                         continue;
                     }
                     if (filterrate != Amount::zero() &&
                         txinfo.feeRate.GetFeePerK() < filterrate) {
                         continue;
                     }
                     if (pto->m_tx_relay->pfilter &&
                         !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(
                             *txinfo.tx)) {
                         continue;
                     }
                     // Send
                     vInv.push_back(CInv(MSG_TX, txid));
                     nRelayedTransactions++;
                     {
                         // Expire old relay messages
                         while (!vRelayExpiration.empty() &&
                                vRelayExpiration.front().first < nNow) {
                             mapRelay.erase(vRelayExpiration.front().second);
                             vRelayExpiration.pop_front();
                         }
 
                         auto ret = mapRelay.insert(
                             std::make_pair(txid, std::move(txinfo.tx)));
                         if (ret.second) {
                             vRelayExpiration.push_back(
                                 std::make_pair(nNow +
                                                    std::chrono::microseconds{
                                                        RELAY_TX_CACHE_TIME}
                                                        .count(),
                                                ret.first));
                         }
                     }
                     if (vInv.size() == MAX_INV_SZ) {
                         connman->PushMessage(
                             pto, msgMaker.Make(NetMsgType::INV, vInv));
                         vInv.clear();
                     }
                     pto->m_tx_relay->filterInventoryKnown.insert(txid);
                 }
             }
         }
     }
     if (!vInv.empty()) {
         connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
     }
 
     // Detect whether we're stalling
     const auto current_time = GetTime<std::chrono::microseconds>();
     // nNow is the current system time (GetTimeMicros is not mockable) and
     // should be replaced by the mockable current_time eventually
     nNow = GetTimeMicros();
     if (state.nStallingSince &&
         state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
         // Stalling only triggers when the block download window cannot move.
         // During normal steady state, the download window should be much larger
         // than the to-be-downloaded set of blocks, so disconnection should only
         // happen during initial block download.
         LogPrintf("Peer=%d is stalling block download, disconnecting\n",
                   pto->GetId());
         pto->fDisconnect = true;
         return true;
     }
     // In case there is a block that has been in flight from this peer for 2 +
     // 0.5 * N times the block interval (with N the number of peers from which
     // we're downloading validated blocks), disconnect due to timeout. We
     // compensate for other peers to prevent killing off peers due to our own
     // downstream link being saturated. We only count validated in-flight blocks
     // so peers can't advertise non-existing block hashes to unreasonably
     // increase our timeout.
     if (state.vBlocksInFlight.size() > 0) {
         QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
         int nOtherPeersWithValidatedDownloads =
             nPeersWithValidatedDownloads -
             (state.nBlocksInFlightValidHeaders > 0);
         if (nNow > state.nDownloadingSince +
                        consensusParams.nPowTargetSpacing *
                            (BLOCK_DOWNLOAD_TIMEOUT_BASE +
                             BLOCK_DOWNLOAD_TIMEOUT_PER_PEER *
                                 nOtherPeersWithValidatedDownloads)) {
             LogPrintf("Timeout downloading block %s from peer=%d, "
                       "disconnecting\n",
                       queuedBlock.hash.ToString(), pto->GetId());
             pto->fDisconnect = true;
             return true;
         }
     }
 
     // Check for headers sync timeouts
     if (state.fSyncStarted &&
         state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
         // Detect whether this is a stalling initial-headers-sync peer
         if (pindexBestHeader->GetBlockTime() <=
             GetAdjustedTime() - 24 * 60 * 60) {
             if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 &&
                 (nPreferredDownload - state.fPreferredDownload >= 1)) {
                 // Disconnect a (non-whitelisted) peer if it is our only sync
                 // peer, and we have others we could be using instead.
                 // Note: If all our peers are inbound, then we won't disconnect
                 // our sync peer for stalling; we have bigger problems if we
                 // can't get any outbound peers.
                 if (!pto->HasPermission(PF_NOBAN)) {
                     LogPrintf("Timeout downloading headers from peer=%d, "
                               "disconnecting\n",
                               pto->GetId());
                     pto->fDisconnect = true;
                     return true;
                 } else {
                     LogPrintf("Timeout downloading headers from whitelisted "
                               "peer=%d, not disconnecting\n",
                               pto->GetId());
                     // Reset the headers sync state so that we have a chance to
                     // try downloading from a different peer.
                     // Note: this will also result in at least one more
                     // getheaders message to be sent to this peer (eventually).
                     state.fSyncStarted = false;
                     nSyncStarted--;
                     state.nHeadersSyncTimeout = 0;
                 }
             }
         } else {
             // After we've caught up once, reset the timeout so we can't trigger
             // disconnect later.
             state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
         }
     }
 
     // Check that outbound peers have reasonable chains GetTime() is used by
     // this anti-DoS logic so we can test this using mocktime.
     ConsiderEviction(pto, GetTime());
 
     //
     // Message: getdata (blocks)
     //
     std::vector<CInv> vGetData;
     if (!pto->fClient &&
         ((fFetch && !pto->m_limited_node) ||
          !::ChainstateActive().IsInitialBlockDownload()) &&
         state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
         std::vector<const CBlockIndex *> vToDownload;
         NodeId staller = -1;
         FindNextBlocksToDownload(pto->GetId(),
                                  MAX_BLOCKS_IN_TRANSIT_PER_PEER -
                                      state.nBlocksInFlight,
                                  vToDownload, staller, consensusParams);
         for (const CBlockIndex *pindex : vToDownload) {
             vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
             MarkBlockAsInFlight(config, pto->GetId(), pindex->GetBlockHash(),
                                 consensusParams, pindex);
             LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
                      pindex->GetBlockHash().ToString(), pindex->nHeight,
                      pto->GetId());
         }
         if (state.nBlocksInFlight == 0 && staller != -1) {
             if (State(staller)->nStallingSince == 0) {
                 State(staller)->nStallingSince = nNow;
                 LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
             }
         }
     }
 
     //
     // Message: getdata (transactions)
     //
 
     // For robustness, expire old requests after a long timeout, so that we can
     // resume downloading transactions from a peer even if they were
     // unresponsive in the past. Eventually we should consider disconnecting
     // peers, but this is conservative.
     if (state.m_tx_download.m_check_expiry_timer <= current_time) {
         for (auto it = state.m_tx_download.m_tx_in_flight.begin();
              it != state.m_tx_download.m_tx_in_flight.end();) {
             if (it->second <= current_time - TX_EXPIRY_INTERVAL) {
                 LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n",
                          it->first.ToString(), pto->GetId());
                 state.m_tx_download.m_tx_announced.erase(it->first);
                 state.m_tx_download.m_tx_in_flight.erase(it++);
             } else {
                 ++it;
             }
         }
         // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize
         // so that we're not doing this for all peers at the same time.
         state.m_tx_download.m_check_expiry_timer =
             current_time + TX_EXPIRY_INTERVAL / 2 +
             GetRandMicros(TX_EXPIRY_INTERVAL);
     }
 
     auto &tx_process_time = state.m_tx_download.m_tx_process_time;
     while (!tx_process_time.empty() &&
            tx_process_time.begin()->first <= current_time &&
            state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) {
         const TxId txid = tx_process_time.begin()->second;
         // Erase this entry from tx_process_time (it may be added back for
         // processing at a later time, see below)
         tx_process_time.erase(tx_process_time.begin());
         CInv inv(MSG_TX, txid);
         if (!AlreadyHave(inv)) {
             // If this transaction was last requested more than 1 minute ago,
             // then request.
             const auto last_request_time = GetTxRequestTime(txid);
             if (last_request_time <= current_time - GETDATA_TX_INTERVAL) {
                 LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(),
                          pto->GetId());
                 vGetData.push_back(inv);
                 if (vGetData.size() >= MAX_GETDATA_SZ) {
                     connman->PushMessage(
                         pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
                     vGetData.clear();
                 }
                 UpdateTxRequestTime(txid, current_time);
                 state.m_tx_download.m_tx_in_flight.emplace(txid, current_time);
             } else {
                 // This transaction is in flight from someone else; queue
                 // up processing to happen after the download times out
                 // (with a slight delay for inbound peers, to prefer
                 // requests to outbound peers).
                 const auto next_process_time = CalculateTxGetDataTime(
                     txid, current_time, !state.fPreferredDownload);
                 tx_process_time.emplace(next_process_time, txid);
             }
         } else {
             // We have already seen this transaction, no need to download.
             state.m_tx_download.m_tx_announced.erase(txid);
             state.m_tx_download.m_tx_in_flight.erase(txid);
         }
     }
 
     if (!vGetData.empty()) {
         connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
     }
 
     //
     // Message: feefilter
     //
     // We don't want white listed peers to filter txs to us if we have
     // -whitelistforcerelay
     if (pto->m_tx_relay != nullptr && pto->nVersion >= FEEFILTER_VERSION &&
         gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
         !pto->HasPermission(PF_FORCERELAY)) {
         Amount currentFilter =
             g_mempool
                 .GetMinFee(
                     gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
                     1000000)
                 .GetFeePerK();
         int64_t timeNow = GetTimeMicros();
         if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) {
             static CFeeRate default_feerate =
                 CFeeRate(DEFAULT_MIN_RELAY_TX_FEE_PER_KB);
             static FeeFilterRounder filterRounder(default_feerate);
             Amount filterToSend = filterRounder.round(currentFilter);
             filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
 
             if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
                 connman->PushMessage(
                     pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
                 pto->m_tx_relay->lastSentFeeFilter = filterToSend;
             }
             pto->m_tx_relay->nextSendTimeFeeFilter =
                 PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
         }
         // If the fee filter has changed substantially and it's still more than
         // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
         // broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
         else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 <
                      pto->m_tx_relay->nextSendTimeFeeFilter &&
                  (currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 ||
                   currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
             pto->m_tx_relay->nextSendTimeFeeFilter =
                 timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
         }
     }
     return true;
 }
 
 class CNetProcessingCleanup {
 public:
     CNetProcessingCleanup() {}
     ~CNetProcessingCleanup() {
         // orphan transactions
         mapOrphanTransactions.clear();
         mapOrphanTransactionsByPrev.clear();
     }
 };
 static CNetProcessingCleanup instance_of_cnetprocessingcleanup;
diff --git a/src/rest.cpp b/src/rest.cpp
index 76d22d43e..0c424a1f2 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -1,741 +1,742 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
+#include <blockdb.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <config.h>
 #include <core_io.h>
 #include <httpserver.h>
 #include <index/txindex.h>
 #include <primitives/block.h>
 #include <primitives/transaction.h>
 #include <rpc/blockchain.h>
 #include <rpc/protocol.h>
 #include <rpc/server.h>
 #include <streams.h>
 #include <sync.h>
 #include <txmempool.h>
 #include <util/strencodings.h>
 #include <validation.h>
 #include <version.h>
 
 #include <boost/algorithm/string.hpp>
 
 #include <univalue.h>
 
 // Allow a max of 15 outpoints to be queried at once.
 static const size_t MAX_GETUTXOS_OUTPOINTS = 15;
 
 enum class RetFormat {
     UNDEF,
     BINARY,
     HEX,
     JSON,
 };
 
 static const struct {
     enum RetFormat rf;
     const char *name;
 } rf_names[] = {
     {RetFormat::UNDEF, ""},
     {RetFormat::BINARY, "bin"},
     {RetFormat::HEX, "hex"},
     {RetFormat::JSON, "json"},
 };
 
 struct CCoin {
     uint32_t nHeight;
     CTxOut out;
 
     CCoin() : nHeight(0) {}
     explicit CCoin(Coin in)
         : nHeight(in.GetHeight()), out(std::move(in.GetTxOut())) {}
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         uint32_t nTxVerDummy = 0;
         READWRITE(nTxVerDummy);
         READWRITE(nHeight);
         READWRITE(out);
     }
 };
 
 static bool RESTERR(HTTPRequest *req, enum HTTPStatusCode status,
                     std::string message) {
     req->WriteHeader("Content-Type", "text/plain");
     req->WriteReply(status, message + "\r\n");
     return false;
 }
 
 static enum RetFormat ParseDataFormat(std::string &param,
                                       const std::string &strReq) {
     const std::string::size_type pos = strReq.rfind('.');
     if (pos == std::string::npos) {
         param = strReq;
         return rf_names[0].rf;
     }
 
     param = strReq.substr(0, pos);
     const std::string suff(strReq, pos + 1);
 
     for (size_t i = 0; i < ARRAYLEN(rf_names); i++) {
         if (suff == rf_names[i].name) {
             return rf_names[i].rf;
         }
     }
 
     /* If no suffix is found, return original string.  */
     param = strReq;
     return rf_names[0].rf;
 }
 
 static std::string AvailableDataFormatsString() {
     std::string formats;
     for (size_t i = 0; i < ARRAYLEN(rf_names); i++) {
         if (strlen(rf_names[i].name) > 0) {
             formats.append(".");
             formats.append(rf_names[i].name);
             formats.append(", ");
         }
     }
 
     if (formats.length() > 0) {
         return formats.substr(0, formats.length() - 2);
     }
 
     return formats;
 }
 
 static bool CheckWarmup(HTTPRequest *req) {
     std::string statusmessage;
     if (RPCIsInWarmup(&statusmessage)) {
         return RESTERR(req, HTTP_SERVICE_UNAVAILABLE,
                        "Service temporarily unavailable: " + statusmessage);
     }
 
     return true;
 }
 
 static bool rest_headers(Config &config, HTTPRequest *req,
                          const std::string &strURIPart) {
     if (!CheckWarmup(req)) {
         return false;
     }
 
     std::string param;
     const RetFormat rf = ParseDataFormat(param, strURIPart);
     std::vector<std::string> path;
     boost::split(path, param, boost::is_any_of("/"));
 
     if (path.size() != 2) {
         return RESTERR(req, HTTP_BAD_REQUEST,
                        "No header count specified. Use "
                        "/rest/headers/<count>/<hash>.<ext>.");
     }
 
     long count = strtol(path[0].c_str(), nullptr, 10);
     if (count < 1 || count > 2000) {
         return RESTERR(req, HTTP_BAD_REQUEST,
                        "Header count out of range: " + path[0]);
     }
 
     std::string hashStr = path[1];
     uint256 rawHash;
     if (!ParseHashStr(hashStr, rawHash)) {
         return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr);
     }
 
     const BlockHash hash(rawHash);
 
     const CBlockIndex *tip = nullptr;
     std::vector<const CBlockIndex *> headers;
     headers.reserve(count);
     {
         LOCK(cs_main);
         tip = ::ChainActive().Tip();
         const CBlockIndex *pindex = LookupBlockIndex(hash);
         while (pindex != nullptr && ::ChainActive().Contains(pindex)) {
             headers.push_back(pindex);
             if (headers.size() == size_t(count)) {
                 break;
             }
             pindex = ::ChainActive().Next(pindex);
         }
     }
 
     switch (rf) {
         case RetFormat::BINARY: {
             CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION);
             for (const CBlockIndex *pindex : headers) {
                 ssHeader << pindex->GetBlockHeader();
             }
 
             std::string binaryHeader = ssHeader.str();
             req->WriteHeader("Content-Type", "application/octet-stream");
             req->WriteReply(HTTP_OK, binaryHeader);
             return true;
         }
 
         case RetFormat::HEX: {
             CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION);
             for (const CBlockIndex *pindex : headers) {
                 ssHeader << pindex->GetBlockHeader();
             }
 
             std::string strHex =
                 HexStr(ssHeader.begin(), ssHeader.end()) + "\n";
             req->WriteHeader("Content-Type", "text/plain");
             req->WriteReply(HTTP_OK, strHex);
             return true;
         }
         case RetFormat::JSON: {
             UniValue jsonHeaders(UniValue::VARR);
             for (const CBlockIndex *pindex : headers) {
                 jsonHeaders.push_back(blockheaderToJSON(tip, pindex));
             }
             std::string strJSON = jsonHeaders.write() + "\n";
             req->WriteHeader("Content-Type", "application/json");
             req->WriteReply(HTTP_OK, strJSON);
             return true;
         }
         default: {
             return RESTERR(req, HTTP_NOT_FOUND,
                            "output format not found (available: .bin, .hex)");
         }
     }
 }
 
 static bool rest_block(const Config &config, HTTPRequest *req,
                        const std::string &strURIPart, bool showTxDetails) {
     if (!CheckWarmup(req)) {
         return false;
     }
 
     std::string hashStr;
     const RetFormat rf = ParseDataFormat(hashStr, strURIPart);
 
     uint256 rawHash;
     if (!ParseHashStr(hashStr, rawHash)) {
         return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr);
     }
 
     const BlockHash hash(rawHash);
 
     CBlock block;
     CBlockIndex *pblockindex = nullptr;
     CBlockIndex *tip = nullptr;
     {
         LOCK(cs_main);
         tip = ::ChainActive().Tip();
         pblockindex = LookupBlockIndex(hash);
         if (!pblockindex) {
             return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
         }
 
         if (IsBlockPruned(pblockindex)) {
             return RESTERR(req, HTTP_NOT_FOUND,
                            hashStr + " not available (pruned data)");
         }
 
         if (!ReadBlockFromDisk(block, pblockindex,
                                config.GetChainParams().GetConsensus())) {
             return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
         }
     }
 
     switch (rf) {
         case RetFormat::BINARY: {
             CDataStream ssBlock(SER_NETWORK,
                                 PROTOCOL_VERSION | RPCSerializationFlags());
             ssBlock << block;
             std::string binaryBlock = ssBlock.str();
             req->WriteHeader("Content-Type", "application/octet-stream");
             req->WriteReply(HTTP_OK, binaryBlock);
             return true;
         }
 
         case RetFormat::HEX: {
             CDataStream ssBlock(SER_NETWORK,
                                 PROTOCOL_VERSION | RPCSerializationFlags());
             ssBlock << block;
             std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()) + "\n";
             req->WriteHeader("Content-Type", "text/plain");
             req->WriteReply(HTTP_OK, strHex);
             return true;
         }
 
         case RetFormat::JSON: {
             UniValue objBlock =
                 blockToJSON(block, tip, pblockindex, showTxDetails);
             std::string strJSON = objBlock.write() + "\n";
             req->WriteHeader("Content-Type", "application/json");
             req->WriteReply(HTTP_OK, strJSON);
             return true;
         }
 
         default: {
             return RESTERR(req, HTTP_NOT_FOUND,
                            "output format not found (available: " +
                                AvailableDataFormatsString() + ")");
         }
     }
 }
 
 static bool rest_block_extended(Config &config, HTTPRequest *req,
                                 const std::string &strURIPart) {
     return rest_block(config, req, strURIPart, true);
 }
 
 static bool rest_block_notxdetails(Config &config, HTTPRequest *req,
                                    const std::string &strURIPart) {
     return rest_block(config, req, strURIPart, false);
 }
 
 static bool rest_chaininfo(Config &config, HTTPRequest *req,
                            const std::string &strURIPart) {
     if (!CheckWarmup(req)) {
         return false;
     }
 
     std::string param;
     const RetFormat rf = ParseDataFormat(param, strURIPart);
 
     switch (rf) {
         case RetFormat::JSON: {
             JSONRPCRequest jsonRequest;
             jsonRequest.params = UniValue(UniValue::VARR);
             UniValue chainInfoObject = getblockchaininfo(config, jsonRequest);
             std::string strJSON = chainInfoObject.write() + "\n";
             req->WriteHeader("Content-Type", "application/json");
             req->WriteReply(HTTP_OK, strJSON);
             return true;
         }
         default: {
             return RESTERR(req, HTTP_NOT_FOUND,
                            "output format not found (available: json)");
         }
     }
 }
 
 static bool rest_mempool_info(Config &config, HTTPRequest *req,
                               const std::string &strURIPart) {
     if (!CheckWarmup(req)) {
         return false;
     }
 
     std::string param;
     const RetFormat rf = ParseDataFormat(param, strURIPart);
 
     switch (rf) {
         case RetFormat::JSON: {
             UniValue mempoolInfoObject = MempoolInfoToJSON(::g_mempool);
 
             std::string strJSON = mempoolInfoObject.write() + "\n";
             req->WriteHeader("Content-Type", "application/json");
             req->WriteReply(HTTP_OK, strJSON);
             return true;
         }
         default: {
             return RESTERR(req, HTTP_NOT_FOUND,
                            "output format not found (available: json)");
         }
     }
 }
 
 static bool rest_mempool_contents(Config &config, HTTPRequest *req,
                                   const std::string &strURIPart) {
     if (!CheckWarmup(req)) {
         return false;
     }
 
     std::string param;
     const RetFormat rf = ParseDataFormat(param, strURIPart);
 
     switch (rf) {
         case RetFormat::JSON: {
             UniValue mempoolObject = MempoolToJSON(::g_mempool, true);
 
             std::string strJSON = mempoolObject.write() + "\n";
             req->WriteHeader("Content-Type", "application/json");
             req->WriteReply(HTTP_OK, strJSON);
             return true;
         }
         default: {
             return RESTERR(req, HTTP_NOT_FOUND,
                            "output format not found (available: json)");
         }
     }
 }
 
 static bool rest_tx(Config &config, HTTPRequest *req,
                     const std::string &strURIPart) {
     if (!CheckWarmup(req)) {
         return false;
     }
 
     std::string hashStr;
     const RetFormat rf = ParseDataFormat(hashStr, strURIPart);
 
     uint256 hash;
     if (!ParseHashStr(hashStr, hash)) {
         return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr);
     }
 
     const TxId txid(hash);
 
     if (g_txindex) {
         g_txindex->BlockUntilSyncedToCurrentChain();
     }
 
     CTransactionRef tx;
     BlockHash hashBlock;
     if (!GetTransaction(txid, tx, config.GetChainParams().GetConsensus(),
                         hashBlock)) {
         return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found");
     }
 
     switch (rf) {
         case RetFormat::BINARY: {
             CDataStream ssTx(SER_NETWORK,
                              PROTOCOL_VERSION | RPCSerializationFlags());
             ssTx << tx;
 
             std::string binaryTx = ssTx.str();
             req->WriteHeader("Content-Type", "application/octet-stream");
             req->WriteReply(HTTP_OK, binaryTx);
             return true;
         }
 
         case RetFormat::HEX: {
             CDataStream ssTx(SER_NETWORK,
                              PROTOCOL_VERSION | RPCSerializationFlags());
             ssTx << tx;
 
             std::string strHex = HexStr(ssTx.begin(), ssTx.end()) + "\n";
             req->WriteHeader("Content-Type", "text/plain");
             req->WriteReply(HTTP_OK, strHex);
             return true;
         }
 
         case RetFormat::JSON: {
             UniValue objTx(UniValue::VOBJ);
             TxToUniv(*tx, hashBlock, objTx);
             std::string strJSON = objTx.write() + "\n";
             req->WriteHeader("Content-Type", "application/json");
             req->WriteReply(HTTP_OK, strJSON);
             return true;
         }
 
         default: {
             return RESTERR(req, HTTP_NOT_FOUND,
                            "output format not found (available: " +
                                AvailableDataFormatsString() + ")");
         }
     }
 }
 
 static bool rest_getutxos(Config &config, HTTPRequest *req,
                           const std::string &strURIPart) {
     if (!CheckWarmup(req)) {
         return false;
     }
 
     std::string param;
     const RetFormat rf = ParseDataFormat(param, strURIPart);
 
     std::vector<std::string> uriParts;
     if (param.length() > 1) {
         std::string strUriParams = param.substr(1);
         boost::split(uriParts, strUriParams, boost::is_any_of("/"));
     }
 
     // throw exception in case of an empty request
     std::string strRequestMutable = req->ReadBody();
     if (strRequestMutable.length() == 0 && uriParts.size() == 0) {
         return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request");
     }
 
     bool fInputParsed = false;
     bool fCheckMemPool = false;
     std::vector<COutPoint> vOutPoints;
 
     // parse/deserialize input
     // input-format = output-format, rest/getutxos/bin requires binary input,
     // gives binary output, ...
 
     if (uriParts.size() > 0) {
         // inputs is sent over URI scheme
         // (/rest/getutxos/checkmempool/txid1-n/txid2-n/...)
         if (uriParts[0] == "checkmempool") {
             fCheckMemPool = true;
         }
 
         for (size_t i = (fCheckMemPool) ? 1 : 0; i < uriParts.size(); i++) {
             int32_t nOutput;
             std::string strTxid = uriParts[i].substr(0, uriParts[i].find('-'));
             std::string strOutput =
                 uriParts[i].substr(uriParts[i].find('-') + 1);
 
             if (!ParseInt32(strOutput, &nOutput) || !IsHex(strTxid)) {
                 return RESTERR(req, HTTP_BAD_REQUEST, "Parse error");
             }
 
             TxId txid;
             txid.SetHex(strTxid);
             vOutPoints.push_back(COutPoint(txid, uint32_t(nOutput)));
         }
 
         if (vOutPoints.size() > 0) {
             fInputParsed = true;
         } else {
             return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request");
         }
     }
 
     switch (rf) {
         case RetFormat::HEX: {
             // convert hex to bin, continue then with bin part
             std::vector<uint8_t> strRequestV = ParseHex(strRequestMutable);
             strRequestMutable.assign(strRequestV.begin(), strRequestV.end());
         }
         // FALLTHROUGH
         case RetFormat::BINARY: {
             try {
                 // deserialize only if user sent a request
                 if (strRequestMutable.size() > 0) {
                     // don't allow sending input over URI and HTTP RAW DATA
                     if (fInputParsed) {
                         return RESTERR(req, HTTP_BAD_REQUEST,
                                        "Combination of URI scheme inputs and "
                                        "raw post data is not allowed");
                     }
 
                     CDataStream oss(SER_NETWORK, PROTOCOL_VERSION);
                     oss << strRequestMutable;
                     oss >> fCheckMemPool;
                     oss >> vOutPoints;
                 }
             } catch (const std::ios_base::failure &) {
                 // abort in case of unreadable binary data
                 return RESTERR(req, HTTP_BAD_REQUEST, "Parse error");
             }
             break;
         }
 
         case RetFormat::JSON: {
             if (!fInputParsed) {
                 return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request");
             }
             break;
         }
         default: {
             return RESTERR(req, HTTP_NOT_FOUND,
                            "output format not found (available: " +
                                AvailableDataFormatsString() + ")");
         }
     }
 
     // limit max outpoints
     if (vOutPoints.size() > MAX_GETUTXOS_OUTPOINTS) {
         return RESTERR(
             req, HTTP_BAD_REQUEST,
             strprintf("Error: max outpoints exceeded (max: %d, tried: %d)",
                       MAX_GETUTXOS_OUTPOINTS, vOutPoints.size()));
     }
 
     // check spentness and form a bitmap (as well as a JSON capable
     // human-readable string representation)
     std::vector<uint8_t> bitmap;
     std::vector<CCoin> outs;
     std::string bitmapStringRepresentation;
     std::vector<bool> hits;
     bitmap.resize((vOutPoints.size() + 7) / 8);
     {
         auto process_utxos = [&vOutPoints, &outs,
                               &hits](const CCoinsView &view,
                                      const CTxMemPool &mempool) {
             for (const COutPoint &vOutPoint : vOutPoints) {
                 Coin coin;
                 bool hit = !mempool.isSpent(vOutPoint) &&
                            view.GetCoin(vOutPoint, coin);
                 hits.push_back(hit);
                 if (hit) {
                     outs.emplace_back(std::move(coin));
                 }
             }
         };
 
         if (fCheckMemPool) {
             // use db+mempool as cache backend in case user likes to query
             // mempool
             LOCK2(cs_main, g_mempool.cs);
             CCoinsViewCache &viewChain = *pcoinsTip;
             CCoinsViewMemPool viewMempool(&viewChain, g_mempool);
             process_utxos(viewMempool, g_mempool);
         } else {
             // no need to lock mempool!
             LOCK(cs_main);
             process_utxos(*pcoinsTip, CTxMemPool());
         }
 
         for (size_t i = 0; i < hits.size(); ++i) {
             const bool hit = hits[i];
             // form a binary string representation (human-readable for json
             // output)
             bitmapStringRepresentation.append(hit ? "1" : "0");
             bitmap[i / 8] |= ((uint8_t)hit) << (i % 8);
         }
     }
 
     switch (rf) {
         case RetFormat::BINARY: {
             // serialize data
             // use exact same output as mentioned in Bip64
             CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION);
             ssGetUTXOResponse << ::ChainActive().Height()
                               << ::ChainActive().Tip()->GetBlockHash() << bitmap
                               << outs;
             std::string ssGetUTXOResponseString = ssGetUTXOResponse.str();
 
             req->WriteHeader("Content-Type", "application/octet-stream");
             req->WriteReply(HTTP_OK, ssGetUTXOResponseString);
             return true;
         }
 
         case RetFormat::HEX: {
             CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION);
             ssGetUTXOResponse << ::ChainActive().Height()
                               << ::ChainActive().Tip()->GetBlockHash() << bitmap
                               << outs;
             std::string strHex =
                 HexStr(ssGetUTXOResponse.begin(), ssGetUTXOResponse.end()) +
                 "\n";
 
             req->WriteHeader("Content-Type", "text/plain");
             req->WriteReply(HTTP_OK, strHex);
             return true;
         }
 
         case RetFormat::JSON: {
             UniValue objGetUTXOResponse(UniValue::VOBJ);
 
             // pack in some essentials
             // use more or less the same output as mentioned in Bip64
             objGetUTXOResponse.pushKV("chainHeight", ::ChainActive().Height());
             objGetUTXOResponse.pushKV(
                 "chaintipHash", ::ChainActive().Tip()->GetBlockHash().GetHex());
             objGetUTXOResponse.pushKV("bitmap", bitmapStringRepresentation);
 
             UniValue utxos(UniValue::VARR);
             for (const CCoin &coin : outs) {
                 UniValue utxo(UniValue::VOBJ);
                 utxo.pushKV("height", int32_t(coin.nHeight));
                 utxo.pushKV("value", ValueFromAmount(coin.out.nValue));
 
                 // include the script in a json output
                 UniValue o(UniValue::VOBJ);
                 ScriptPubKeyToUniv(coin.out.scriptPubKey, o, true);
                 utxo.pushKV("scriptPubKey", o);
                 utxos.push_back(utxo);
             }
             objGetUTXOResponse.pushKV("utxos", utxos);
 
             // return json string
             std::string strJSON = objGetUTXOResponse.write() + "\n";
             req->WriteHeader("Content-Type", "application/json");
             req->WriteReply(HTTP_OK, strJSON);
             return true;
         }
         default: {
             return RESTERR(req, HTTP_NOT_FOUND,
                            "output format not found (available: " +
                                AvailableDataFormatsString() + ")");
         }
     }
 }
 
 static bool rest_blockhash_by_height(Config &config, HTTPRequest *req,
                                      const std::string &str_uri_part) {
     if (!CheckWarmup(req)) {
         return false;
     }
     std::string height_str;
     const RetFormat rf = ParseDataFormat(height_str, str_uri_part);
 
     int32_t blockheight;
     if (!ParseInt32(height_str, &blockheight) || blockheight < 0) {
         return RESTERR(req, HTTP_BAD_REQUEST,
                        "Invalid height: " + SanitizeString(height_str));
     }
 
     CBlockIndex *pblockindex = nullptr;
     {
         LOCK(cs_main);
         if (blockheight > ::ChainActive().Height()) {
             return RESTERR(req, HTTP_NOT_FOUND, "Block height out of range");
         }
         pblockindex = ::ChainActive()[blockheight];
     }
     switch (rf) {
         case RetFormat::BINARY: {
             CDataStream ss_blockhash(SER_NETWORK, PROTOCOL_VERSION);
             ss_blockhash << pblockindex->GetBlockHash();
             req->WriteHeader("Content-Type", "application/octet-stream");
             req->WriteReply(HTTP_OK, ss_blockhash.str());
             return true;
         }
         case RetFormat::HEX: {
             req->WriteHeader("Content-Type", "text/plain");
             req->WriteReply(HTTP_OK,
                             pblockindex->GetBlockHash().GetHex() + "\n");
             return true;
         }
         case RetFormat::JSON: {
             req->WriteHeader("Content-Type", "application/json");
             UniValue resp = UniValue(UniValue::VOBJ);
             resp.pushKV("blockhash", pblockindex->GetBlockHash().GetHex());
             req->WriteReply(HTTP_OK, resp.write() + "\n");
             return true;
         }
         default: {
             return RESTERR(req, HTTP_NOT_FOUND,
                            "output format not found (available: " +
                                AvailableDataFormatsString() + ")");
         }
     }
 }
 
 static const struct {
     const char *prefix;
     bool (*handler)(Config &config, HTTPRequest *req,
                     const std::string &strReq);
 } uri_prefixes[] = {
     {"/rest/tx/", rest_tx},
     {"/rest/block/notxdetails/", rest_block_notxdetails},
     {"/rest/block/", rest_block_extended},
     {"/rest/chaininfo", rest_chaininfo},
     {"/rest/mempool/info", rest_mempool_info},
     {"/rest/mempool/contents", rest_mempool_contents},
     {"/rest/headers/", rest_headers},
     {"/rest/getutxos", rest_getutxos},
     {"/rest/blockhashbyheight/", rest_blockhash_by_height},
 };
 
 void StartREST() {
     for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) {
         RegisterHTTPHandler(uri_prefixes[i].prefix, false,
                             uri_prefixes[i].handler);
     }
 }
 
 void InterruptREST() {}
 
 void StopREST() {
     for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) {
         UnregisterHTTPHandler(uri_prefixes[i].prefix, false);
     }
 }
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 7ef7858c4..d1fd46d71 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -1,2707 +1,2708 @@
 // Copyright (c) 2010 Satoshi Nakamoto
 // Copyright (c) 2009-2019 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <rpc/blockchain.h>
 
 #include <amount.h>
+#include <blockdb.h>
 #include <blockfilter.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <checkpoints.h>
 #include <coins.h>
 #include <config.h>
 #include <consensus/validation.h>
 #include <core_io.h>
 #include <hash.h>
 #include <index/blockfilterindex.h>
 #include <node/coinstats.h>
 #include <node/context.h>
 #include <policy/policy.h>
 #include <primitives/transaction.h>
 #include <rpc/server.h>
 #include <rpc/util.h>
 #include <script/descriptor.h>
 #include <streams.h>
 #include <txdb.h>
 #include <txmempool.h>
 #include <undo.h>
 #include <util/strencodings.h>
 #include <util/system.h>
 #include <util/validation.h>
 #include <validation.h>
 #include <validationinterface.h>
 #include <versionbitsinfo.h> // For VersionBitsDeploymentInfo
 #include <warnings.h>
 
 #include <boost/thread/thread.hpp> // boost::thread::interrupt
 
 #include <condition_variable>
 #include <cstdint>
 #include <memory>
 #include <mutex>
 
 struct CUpdatedBlock {
     uint256 hash;
     int height;
 };
 
 static Mutex cs_blockchange;
 static std::condition_variable cond_blockchange;
 static CUpdatedBlock latestblock GUARDED_BY(cs_blockchange);
 
 CTxMemPool &EnsureMemPool() {
     CHECK_NONFATAL(g_rpc_node);
     if (!g_rpc_node->mempool) {
         throw JSONRPCError(RPC_CLIENT_MEMPOOL_DISABLED,
                            "Mempool disabled or instance not found");
     }
     return *g_rpc_node->mempool;
 }
 
 /**
  * Calculate the difficulty for a given block index.
  */
 double GetDifficulty(const CBlockIndex *blockindex) {
     CHECK_NONFATAL(blockindex);
 
     int nShift = (blockindex->nBits >> 24) & 0xff;
     double dDiff = double(0x0000ffff) / double(blockindex->nBits & 0x00ffffff);
 
     while (nShift < 29) {
         dDiff *= 256.0;
         nShift++;
     }
     while (nShift > 29) {
         dDiff /= 256.0;
         nShift--;
     }
 
     return dDiff;
 }
 
 static int ComputeNextBlockAndDepth(const CBlockIndex *tip,
                                     const CBlockIndex *blockindex,
                                     const CBlockIndex *&next) {
     next = tip->GetAncestor(blockindex->nHeight + 1);
     if (next && next->pprev == blockindex) {
         return tip->nHeight - blockindex->nHeight + 1;
     }
     next = nullptr;
     return blockindex == tip ? 1 : -1;
 }
 
 UniValue blockheaderToJSON(const CBlockIndex *tip,
                            const CBlockIndex *blockindex) {
     // Serialize passed information without accessing chain state of the active
     // chain!
     // For performance reasons
     AssertLockNotHeld(cs_main);
 
     UniValue result(UniValue::VOBJ);
     result.pushKV("hash", blockindex->GetBlockHash().GetHex());
     const CBlockIndex *pnext;
     int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext);
     result.pushKV("confirmations", confirmations);
     result.pushKV("height", blockindex->nHeight);
     result.pushKV("version", blockindex->nVersion);
     result.pushKV("versionHex", strprintf("%08x", blockindex->nVersion));
     result.pushKV("merkleroot", blockindex->hashMerkleRoot.GetHex());
     result.pushKV("time", int64_t(blockindex->nTime));
     result.pushKV("mediantime", int64_t(blockindex->GetMedianTimePast()));
     result.pushKV("nonce", uint64_t(blockindex->nNonce));
     result.pushKV("bits", strprintf("%08x", blockindex->nBits));
     result.pushKV("difficulty", GetDifficulty(blockindex));
     result.pushKV("chainwork", blockindex->nChainWork.GetHex());
     result.pushKV("nTx", uint64_t(blockindex->nTx));
 
     if (blockindex->pprev) {
         result.pushKV("previousblockhash",
                       blockindex->pprev->GetBlockHash().GetHex());
     }
     if (pnext) {
         result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex());
     }
     return result;
 }
 
 UniValue blockToJSON(const CBlock &block, const CBlockIndex *tip,
                      const CBlockIndex *blockindex, bool txDetails) {
     // Serialize passed information without accessing chain state of the active
     // chain!
     // For performance reasons
     AssertLockNotHeld(cs_main);
 
     UniValue result(UniValue::VOBJ);
     result.pushKV("hash", blockindex->GetBlockHash().GetHex());
     const CBlockIndex *pnext;
     int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext);
     result.pushKV("confirmations", confirmations);
     result.pushKV("size", (int)::GetSerializeSize(block, PROTOCOL_VERSION));
     result.pushKV("height", blockindex->nHeight);
     result.pushKV("version", block.nVersion);
     result.pushKV("versionHex", strprintf("%08x", block.nVersion));
     result.pushKV("merkleroot", block.hashMerkleRoot.GetHex());
     UniValue txs(UniValue::VARR);
     for (const auto &tx : block.vtx) {
         if (txDetails) {
             UniValue objTx(UniValue::VOBJ);
             TxToUniv(*tx, uint256(), objTx, true, RPCSerializationFlags());
             txs.push_back(objTx);
         } else {
             txs.push_back(tx->GetId().GetHex());
         }
     }
     result.pushKV("tx", txs);
     result.pushKV("time", block.GetBlockTime());
     result.pushKV("mediantime", int64_t(blockindex->GetMedianTimePast()));
     result.pushKV("nonce", uint64_t(block.nNonce));
     result.pushKV("bits", strprintf("%08x", block.nBits));
     result.pushKV("difficulty", GetDifficulty(blockindex));
     result.pushKV("chainwork", blockindex->nChainWork.GetHex());
     result.pushKV("nTx", uint64_t(blockindex->nTx));
 
     if (blockindex->pprev) {
         result.pushKV("previousblockhash",
                       blockindex->pprev->GetBlockHash().GetHex());
     }
     if (pnext) {
         result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex());
     }
     return result;
 }
 
 static UniValue getblockcount(const Config &config,
                               const JSONRPCRequest &request) {
     RPCHelpMan{
         "getblockcount",
         "\nReturns the height of the most-work fully-validated chain.\n"
         "The genesis block has height 0.\n",
         {},
         RPCResult{"n    (numeric) The current block count\n"},
         RPCExamples{HelpExampleCli("getblockcount", "") +
                     HelpExampleRpc("getblockcount", "")},
     }
         .Check(request);
 
     LOCK(cs_main);
     return ::ChainActive().Height();
 }
 
 static UniValue getbestblockhash(const Config &config,
                                  const JSONRPCRequest &request) {
     RPCHelpMan{
         "getbestblockhash",
         "Returns the hash of the best (tip) block in the "
         "most-work fully-validated chain.\n",
         {},
         RPCResult{"\"hex\"      (string) the block hash, hex-encoded\n"},
         RPCExamples{HelpExampleCli("getbestblockhash", "") +
                     HelpExampleRpc("getbestblockhash", "")},
     }
         .Check(request);
 
     LOCK(cs_main);
     return ::ChainActive().Tip()->GetBlockHash().GetHex();
 }
 
 UniValue getfinalizedblockhash(const Config &config,
                                const JSONRPCRequest &request) {
     RPCHelpMan{
         "getfinalizedblockhash",
         "Returns the hash of the currently finalized block\n",
         {},
         RPCResult{"\"hex\"      (string) the block hash hex-encoded\n"},
         RPCExamples{HelpExampleCli("getfinalizedblockhash", "") +
                     HelpExampleRpc("getfinalizedblockhash", "")},
     }
         .Check(request);
 
     LOCK(cs_main);
     const CBlockIndex *blockIndexFinalized =
         ::ChainstateActive().GetFinalizedBlock();
     if (blockIndexFinalized) {
         return blockIndexFinalized->GetBlockHash().GetHex();
     }
     return UniValue(UniValue::VSTR);
 }
 
 void RPCNotifyBlockChange(bool ibd, const CBlockIndex *pindex) {
     if (pindex) {
         LOCK(cs_blockchange);
         latestblock.hash = pindex->GetBlockHash();
         latestblock.height = pindex->nHeight;
     }
     cond_blockchange.notify_all();
 }
 
 static UniValue waitfornewblock(const Config &config,
                                 const JSONRPCRequest &request) {
     RPCHelpMan{
         "waitfornewblock",
         "Waits for a specific new block and returns useful info about it.\n"
         "\nReturns the current block on timeout or exit.\n",
         {
             {"timeout", RPCArg::Type::NUM, /* default */ "0",
              "Time in milliseconds to wait for a response. 0 indicates no "
              "timeout."},
         },
         RPCResult{"{                           (json object)\n"
                   "  \"hash\" : {       (string) The blockhash\n"
                   "  \"height\" : {     (int) Block height\n"
                   "}\n"},
         RPCExamples{HelpExampleCli("waitfornewblock", "1000") +
                     HelpExampleRpc("waitfornewblock", "1000")},
     }
         .Check(request);
 
     int timeout = 0;
     if (!request.params[0].isNull()) {
         timeout = request.params[0].get_int();
     }
 
     CUpdatedBlock block;
     {
         WAIT_LOCK(cs_blockchange, lock);
         block = latestblock;
         if (timeout) {
             cond_blockchange.wait_for(
                 lock, std::chrono::milliseconds(timeout),
                 [&block]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {
                     return latestblock.height != block.height ||
                            latestblock.hash != block.hash || !IsRPCRunning();
                 });
         } else {
             cond_blockchange.wait(
                 lock, [&block]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {
                     return latestblock.height != block.height ||
                            latestblock.hash != block.hash || !IsRPCRunning();
                 });
         }
         block = latestblock;
     }
     UniValue ret(UniValue::VOBJ);
     ret.pushKV("hash", block.hash.GetHex());
     ret.pushKV("height", block.height);
     return ret;
 }
 
 static UniValue waitforblock(const Config &config,
                              const JSONRPCRequest &request) {
     RPCHelpMan{
         "waitforblock",
         "Waits for a specific new block and returns useful info about it.\n"
         "\nReturns the current block on timeout or exit.\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "Block hash to wait for."},
             {"timeout", RPCArg::Type::NUM, /* default */ "0",
              "Time in milliseconds to wait for a response. 0 indicates no "
              "timeout."},
         },
         RPCResult{"{                           (json object)\n"
                   "  \"hash\" : {       (string) The blockhash\n"
                   "  \"height\" : {     (int) Block height\n"
                   "}\n"},
         RPCExamples{HelpExampleCli("waitforblock",
                                    "\"0000000000079f8ef3d2c688c244eb7a4570b24c9"
                                    "ed7b4a8c619eb02596f8862\", 1000") +
                     HelpExampleRpc("waitforblock",
                                    "\"0000000000079f8ef3d2c688c244eb7a4570b24c9"
                                    "ed7b4a8c619eb02596f8862\", 1000")},
     }
         .Check(request);
 
     int timeout = 0;
 
     BlockHash hash(ParseHashV(request.params[0], "blockhash"));
 
     if (!request.params[1].isNull()) {
         timeout = request.params[1].get_int();
     }
 
     CUpdatedBlock block;
     {
         WAIT_LOCK(cs_blockchange, lock);
         if (timeout) {
             cond_blockchange.wait_for(
                 lock, std::chrono::milliseconds(timeout),
                 [&hash]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {
                     return latestblock.hash == hash || !IsRPCRunning();
                 });
         } else {
             cond_blockchange.wait(
                 lock, [&hash]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {
                     return latestblock.hash == hash || !IsRPCRunning();
                 });
         }
         block = latestblock;
     }
 
     UniValue ret(UniValue::VOBJ);
     ret.pushKV("hash", block.hash.GetHex());
     ret.pushKV("height", block.height);
     return ret;
 }
 
 static UniValue waitforblockheight(const Config &config,
                                    const JSONRPCRequest &request) {
     RPCHelpMan{
         "waitforblockheight",
         "Waits for (at least) block height and returns the height and "
         "hash\nof the current tip.\n"
         "\nReturns the current block on timeout or exit.\n",
         {
             {"height", RPCArg::Type::NUM, RPCArg::Optional::NO,
              "Block height to wait for."},
             {"timeout", RPCArg::Type::NUM, /* default */ "0",
              "Time in milliseconds to wait for a response. 0 indicates no "
              "timeout."},
         },
         RPCResult{"{                           (json object)\n"
                   "  \"hash\" : {       (string) The blockhash\n"
                   "  \"height\" : {     (int) Block height\n"
                   "}\n"},
         RPCExamples{HelpExampleCli("waitforblockheight", "\"100\", 1000") +
                     HelpExampleRpc("waitforblockheight", "\"100\", 1000")},
     }
         .Check(request);
 
     int timeout = 0;
 
     int height = request.params[0].get_int();
 
     if (!request.params[1].isNull()) {
         timeout = request.params[1].get_int();
     }
 
     CUpdatedBlock block;
     {
         WAIT_LOCK(cs_blockchange, lock);
         if (timeout) {
             cond_blockchange.wait_for(
                 lock, std::chrono::milliseconds(timeout),
                 [&height]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {
                     return latestblock.height >= height || !IsRPCRunning();
                 });
         } else {
             cond_blockchange.wait(
                 lock, [&height]() EXCLUSIVE_LOCKS_REQUIRED(cs_blockchange) {
                     return latestblock.height >= height || !IsRPCRunning();
                 });
         }
         block = latestblock;
     }
     UniValue ret(UniValue::VOBJ);
     ret.pushKV("hash", block.hash.GetHex());
     ret.pushKV("height", block.height);
     return ret;
 }
 
 static UniValue
 syncwithvalidationinterfacequeue(const Config &config,
                                  const JSONRPCRequest &request) {
     RPCHelpMan{
         "syncwithvalidationinterfacequeue",
         "Waits for the validation interface queue to catch up on everything "
         "that was there when we entered this function.\n",
         {},
         RPCResults{},
         RPCExamples{HelpExampleCli("syncwithvalidationinterfacequeue", "") +
                     HelpExampleRpc("syncwithvalidationinterfacequeue", "")},
     }
         .Check(request);
 
     SyncWithValidationInterfaceQueue();
     return NullUniValue;
 }
 
 static UniValue getdifficulty(const Config &config,
                               const JSONRPCRequest &request) {
     RPCHelpMan{
         "getdifficulty",
         "Returns the proof-of-work difficulty as a multiple of the minimum "
         "difficulty.\n",
         {},
         RPCResult{"n.nnn       (numeric) the proof-of-work difficulty as a "
                   "multiple of the minimum difficulty.\n"},
         RPCExamples{HelpExampleCli("getdifficulty", "") +
                     HelpExampleRpc("getdifficulty", "")},
     }
         .Check(request);
 
     LOCK(cs_main);
     return GetDifficulty(::ChainActive().Tip());
 }
 
 static std::string EntryDescriptionString() {
     return "    \"size\" : n,             (numeric) transaction size.\n"
            "    \"fee\" : n,              (numeric) transaction fee in " +
            CURRENCY_UNIT + "(DEPRECATED)" +
            "\n"
            "    \"modifiedfee\" : n,      (numeric) transaction fee with fee "
            "deltas used for mining priority (DEPRECATED)\n"
            "    \"time\" : n,             (numeric) local time transaction "
            "entered pool in seconds since 1 Jan 1970 GMT\n"
            "    \"height\" : n,           (numeric) block height when "
            "transaction entered pool\n"
            "    \"descendantcount\" : n,  (numeric) number of in-mempool "
            "descendant transactions (including this one)\n"
            "    \"descendantsize\" : n,   (numeric) transaction size "
            "of in-mempool descendants (including this one)\n"
            "    \"descendantfees\" : n,   (numeric) modified fees (see above) "
            "of in-mempool descendants (including this one) (DEPRECATED)\n"
            "    \"ancestorcount\" : n,    (numeric) number of in-mempool "
            "ancestor transactions (including this one)\n"
            "    \"ancestorsize\" : n,     (numeric) transaction size "
            "of in-mempool ancestors (including this one)\n"
            "    \"ancestorfees\" : n,     (numeric) modified fees (see above) "
            "of in-mempool ancestors (including this one) (DEPRECATED)\n"
            "    \"fees\" : {\n"
            "        \"base\" : n,         (numeric) transaction fee in " +
            CURRENCY_UNIT +
            "\n"
            "        \"modified\" : n,     (numeric) transaction fee with fee "
            "deltas used for mining priority in " +
            CURRENCY_UNIT +
            "\n"
            "        \"ancestor\" : n,     (numeric) modified fees (see above) "
            "of in-mempool ancestors (including this one) in " +
            CURRENCY_UNIT +
            "\n"
            "        \"descendant\" : n,   (numeric) modified fees (see above) "
            "of in-mempool descendants (including this one) in " +
            CURRENCY_UNIT +
            "\n"
            "    }\n"
            "    \"depends\" : [           (array) unconfirmed transactions "
            "used as inputs for this transaction\n"
            "        \"transactionid\",    (string) parent transaction id\n"
            "       ... ]\n"
            "    \"spentby\" : [           (array) unconfirmed transactions "
            "spending outputs from this transaction\n"
            "        \"transactionid\",    (string) child transaction id\n"
            "       ... ]\n";
 }
 
 static void entryToJSON(const CTxMemPool &pool, UniValue &info,
                         const CTxMemPoolEntry &e)
     EXCLUSIVE_LOCKS_REQUIRED(pool.cs) {
     AssertLockHeld(pool.cs);
 
     UniValue fees(UniValue::VOBJ);
     fees.pushKV("base", ValueFromAmount(e.GetFee()));
     fees.pushKV("modified", ValueFromAmount(e.GetModifiedFee()));
     fees.pushKV("ancestor", ValueFromAmount(e.GetModFeesWithAncestors()));
     fees.pushKV("descendant", ValueFromAmount(e.GetModFeesWithDescendants()));
     info.pushKV("fees", fees);
 
     info.pushKV("size", (int)e.GetTxSize());
     info.pushKV("fee", ValueFromAmount(e.GetFee()));
     info.pushKV("modifiedfee", ValueFromAmount(e.GetModifiedFee()));
     info.pushKV("time", count_seconds(e.GetTime()));
     info.pushKV("height", (int)e.GetHeight());
     info.pushKV("descendantcount", e.GetCountWithDescendants());
     info.pushKV("descendantsize", e.GetSizeWithDescendants());
     info.pushKV("descendantfees", e.GetModFeesWithDescendants() / SATOSHI);
     info.pushKV("ancestorcount", e.GetCountWithAncestors());
     info.pushKV("ancestorsize", e.GetSizeWithAncestors());
     info.pushKV("ancestorfees", e.GetModFeesWithAncestors() / SATOSHI);
     const CTransaction &tx = e.GetTx();
     std::set<std::string> setDepends;
     for (const CTxIn &txin : tx.vin) {
         if (pool.exists(txin.prevout.GetTxId())) {
             setDepends.insert(txin.prevout.GetTxId().ToString());
         }
     }
 
     UniValue depends(UniValue::VARR);
     for (const std::string &dep : setDepends) {
         depends.push_back(dep);
     }
 
     info.pushKV("depends", depends);
 
     UniValue spent(UniValue::VARR);
     const CTxMemPool::txiter &it = pool.mapTx.find(tx.GetId());
     const CTxMemPool::setEntries &setChildren = pool.GetMemPoolChildren(it);
     for (CTxMemPool::txiter childiter : setChildren) {
         spent.push_back(childiter->GetTx().GetId().ToString());
     }
 
     info.pushKV("spentby", spent);
 }
 
 UniValue MempoolToJSON(const CTxMemPool &pool, bool verbose) {
     if (verbose) {
         LOCK(pool.cs);
         UniValue o(UniValue::VOBJ);
         for (const CTxMemPoolEntry &e : pool.mapTx) {
             const uint256 &txid = e.GetTx().GetId();
             UniValue info(UniValue::VOBJ);
             entryToJSON(pool, info, e);
             // Mempool has unique entries so there is no advantage in using
             // UniValue::pushKV, which checks if the key already exists in O(N).
             // UniValue::__pushKV is used instead which currently is O(1).
             o.__pushKV(txid.ToString(), info);
         }
         return o;
     } else {
         std::vector<uint256> vtxids;
         pool.queryHashes(vtxids);
 
         UniValue a(UniValue::VARR);
         for (const uint256 &txid : vtxids) {
             a.push_back(txid.ToString());
         }
 
         return a;
     }
 }
 
 static UniValue getrawmempool(const Config &config,
                               const JSONRPCRequest &request) {
     RPCHelpMan{
         "getrawmempool",
         "Returns all transaction ids in memory pool as a json array of "
         "string transaction ids.\n"
         "\nHint: use getmempoolentry to fetch a specific transaction from the "
         "mempool.\n",
         {
             {"verbose", RPCArg::Type::BOOL, /* default */ "false",
              "True for a json object, false for array of transaction ids"},
         },
         RPCResult{"for verbose = false",
                   "[                     (json array of string)\n"
                   "  \"transactionid\"     (string) The transaction id\n"
                   "  ,...\n"
                   "]\n"
                   "\nResult: (for verbose = true):\n"
                   "{                           (json object)\n"
                   "  \"transactionid\" : {       (json object)\n" +
                       EntryDescriptionString() +
                       "  }, ...\n"
                       "}\n"},
         RPCExamples{HelpExampleCli("getrawmempool", "true") +
                     HelpExampleRpc("getrawmempool", "true")},
     }
         .Check(request);
 
     bool fVerbose = false;
     if (!request.params[0].isNull()) {
         fVerbose = request.params[0].get_bool();
     }
 
     return MempoolToJSON(::g_mempool, fVerbose);
 }
 
 static UniValue getmempoolancestors(const Config &config,
                                     const JSONRPCRequest &request) {
     RPCHelpMan{
         "getmempoolancestors",
         "If txid is in the mempool, returns all in-mempool ancestors.\n",
         {
             {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The transaction id (must be in mempool)"},
             {"verbose", RPCArg::Type::BOOL, /* default */ "false",
              "True for a json object, false for array of transaction ids"},
         },
         {
             RPCResult{"for verbose = false",
                       "[                       (json array of strings)\n"
                       "  \"transactionid\"           (string) The transaction "
                       "id of an in-mempool ancestor transaction\n"
                       "  ,...\n"
                       "]\n"},
             RPCResult{"for verbose = true",
                       "{                           (json object)\n"
                       "  \"transactionid\" : {       (json object)\n" +
                           EntryDescriptionString() +
                           "  }, ...\n"
                           "}\n"},
         },
         RPCExamples{HelpExampleCli("getmempoolancestors", "\"mytxid\"") +
                     HelpExampleRpc("getmempoolancestors", "\"mytxid\"")},
     }
         .Check(request);
 
     bool fVerbose = false;
     if (!request.params[1].isNull()) {
         fVerbose = request.params[1].get_bool();
     }
 
     TxId txid(ParseHashV(request.params[0], "parameter 1"));
 
     LOCK(g_mempool.cs);
 
     CTxMemPool::txiter it = g_mempool.mapTx.find(txid);
     if (it == g_mempool.mapTx.end()) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            "Transaction not in mempool");
     }
 
     CTxMemPool::setEntries setAncestors;
     uint64_t noLimit = std::numeric_limits<uint64_t>::max();
     std::string dummy;
     g_mempool.CalculateMemPoolAncestors(*it, setAncestors, noLimit, noLimit,
                                         noLimit, noLimit, dummy, false);
 
     if (!fVerbose) {
         UniValue o(UniValue::VARR);
         for (CTxMemPool::txiter ancestorIt : setAncestors) {
             o.push_back(ancestorIt->GetTx().GetId().ToString());
         }
 
         return o;
     } else {
         UniValue o(UniValue::VOBJ);
         for (CTxMemPool::txiter ancestorIt : setAncestors) {
             const CTxMemPoolEntry &e = *ancestorIt;
             const TxId &_txid = e.GetTx().GetId();
             UniValue info(UniValue::VOBJ);
             entryToJSON(::g_mempool, info, e);
             o.pushKV(_txid.ToString(), info);
         }
         return o;
     }
 }
 
 static UniValue getmempooldescendants(const Config &config,
                                       const JSONRPCRequest &request) {
     RPCHelpMan{
         "getmempooldescendants",
         "If txid is in the mempool, returns all in-mempool descendants.\n",
         {
             {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The transaction id (must be in mempool)"},
             {"verbose", RPCArg::Type::BOOL, /* default */ "false",
              "True for a json object, false for array of transaction ids"},
         },
         {
             RPCResult{"for verbose = false",
                       "[                       (json array of strings)\n"
                       "  \"transactionid\"           (string) The transaction "
                       "id of an in-mempool descendant transaction\n"
                       "  ,...\n"
                       "]\n"},
             RPCResult{"for verbose = true",
                       "{                           (json object)\n"
                       "  \"transactionid\" : {       (json object)\n" +
                           EntryDescriptionString() +
                           "  }, ...\n"
                           "}\n"},
         },
         RPCExamples{HelpExampleCli("getmempooldescendants", "\"mytxid\"") +
                     HelpExampleRpc("getmempooldescendants", "\"mytxid\"")},
     }
         .Check(request);
 
     bool fVerbose = false;
     if (!request.params[1].isNull()) {
         fVerbose = request.params[1].get_bool();
     }
 
     TxId txid(ParseHashV(request.params[0], "parameter 1"));
 
     LOCK(g_mempool.cs);
 
     CTxMemPool::txiter it = g_mempool.mapTx.find(txid);
     if (it == g_mempool.mapTx.end()) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            "Transaction not in mempool");
     }
 
     CTxMemPool::setEntries setDescendants;
     g_mempool.CalculateDescendants(it, setDescendants);
     // CTxMemPool::CalculateDescendants will include the given tx
     setDescendants.erase(it);
 
     if (!fVerbose) {
         UniValue o(UniValue::VARR);
         for (CTxMemPool::txiter descendantIt : setDescendants) {
             o.push_back(descendantIt->GetTx().GetId().ToString());
         }
 
         return o;
     } else {
         UniValue o(UniValue::VOBJ);
         for (CTxMemPool::txiter descendantIt : setDescendants) {
             const CTxMemPoolEntry &e = *descendantIt;
             const TxId &_txid = e.GetTx().GetId();
             UniValue info(UniValue::VOBJ);
             entryToJSON(::g_mempool, info, e);
             o.pushKV(_txid.ToString(), info);
         }
         return o;
     }
 }
 
 static UniValue getmempoolentry(const Config &config,
                                 const JSONRPCRequest &request) {
     RPCHelpMan{
         "getmempoolentry",
         "Returns mempool data for given transaction\n",
         {
             {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The transaction id (must be in mempool)"},
         },
         RPCResult{"{                           (json object)\n" +
                   EntryDescriptionString() + "}\n"},
         RPCExamples{HelpExampleCli("getmempoolentry", "\"mytxid\"") +
                     HelpExampleRpc("getmempoolentry", "\"mytxid\"")},
     }
         .Check(request);
 
     TxId txid(ParseHashV(request.params[0], "parameter 1"));
 
     LOCK(g_mempool.cs);
 
     CTxMemPool::txiter it = g_mempool.mapTx.find(txid);
     if (it == g_mempool.mapTx.end()) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            "Transaction not in mempool");
     }
 
     const CTxMemPoolEntry &e = *it;
     UniValue info(UniValue::VOBJ);
     entryToJSON(::g_mempool, info, e);
     return info;
 }
 
 static UniValue getblockhash(const Config &config,
                              const JSONRPCRequest &request) {
     RPCHelpMan{
         "getblockhash",
         "Returns hash of block in best-block-chain at height provided.\n",
         {
             {"height", RPCArg::Type::NUM, RPCArg::Optional::NO,
              "The height index"},
         },
         RPCResult{"\"hash\"         (string) The block hash\n"},
         RPCExamples{HelpExampleCli("getblockhash", "1000") +
                     HelpExampleRpc("getblockhash", "1000")},
     }
         .Check(request);
 
     LOCK(cs_main);
 
     int nHeight = request.params[0].get_int();
     if (nHeight < 0 || nHeight > ::ChainActive().Height()) {
         throw JSONRPCError(RPC_INVALID_PARAMETER, "Block height out of range");
     }
 
     CBlockIndex *pblockindex = ::ChainActive()[nHeight];
     return pblockindex->GetBlockHash().GetHex();
 }
 
 static UniValue getblockheader(const Config &config,
                                const JSONRPCRequest &request) {
     RPCHelpMan{
         "getblockheader",
         "If verbose is false, returns a string that is serialized, hex-encoded "
         "data for blockheader 'hash'.\n"
         "If verbose is true, returns an Object with information about "
         "blockheader <hash>.\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The block hash"},
             {"verbose", RPCArg::Type::BOOL, /* default */ "true",
              "true for a json object, false for the hex-encoded data"},
         },
         {
             RPCResult{
                 "for verbose = true",
                 "{\n"
                 "  \"hash\" : \"hash\",     (string) the block hash (same as "
                 "provided)\n"
                 "  \"confirmations\" : n,   (numeric) The number of "
                 "confirmations, or -1 if the block is not on the main chain\n"
                 "  \"height\" : n,          (numeric) The block height or "
                 "index\n"
                 "  \"version\" : n,         (numeric) The block version\n"
                 "  \"versionHex\" : \"00000000\", (string) The block version "
                 "formatted in hexadecimal\n"
                 "  \"merkleroot\" : \"xxxx\", (string) The merkle root\n"
                 "  \"time\" : ttt,          (numeric) The block time expressed "
                 "in " +
                     UNIX_EPOCH_TIME +
                     "\n"
                     "  \"mediantime\" : ttt,    (numeric) The median block "
                     "time expressed in " +
                     UNIX_EPOCH_TIME +
                     "\n"
                     "  \"nonce\" : n,           (numeric) The nonce\n"
                     "  \"bits\" : \"1d00ffff\", (string) The bits\n"
                     "  \"difficulty\" : x.xxx,  (numeric) The difficulty\n"
                     "  \"chainwork\" : \"0000...1f3\"     (string) Expected "
                     "number of hashes required to produce the current chain "
                     "(in hex)\n"
                     "  \"nTx\" : n,             (numeric) The number of "
                     "transactions in the block.\n"
                     "  \"previousblockhash\" : \"hash\",  (string) The hash of "
                     "the previous block\n"
                     "  \"nextblockhash\" : \"hash\",      (string) The hash of "
                     "the next block\n"
                     "}\n"},
             RPCResult{"for verbose=false",
                       "\"data\"             (string) A string that is "
                       "serialized, hex-encoded data for block 'hash'.\n"},
         },
         RPCExamples{HelpExampleCli("getblockheader",
                                    "\"00000000c937983704a73af28acdec37b049d214a"
                                    "dbda81d7e2a3dd146f6ed09\"") +
                     HelpExampleRpc("getblockheader",
                                    "\"00000000c937983704a73af28acdec37b049d214a"
                                    "dbda81d7e2a3dd146f6ed09\"")},
     }
         .Check(request);
 
     BlockHash hash(ParseHashV(request.params[0], "hash"));
 
     bool fVerbose = true;
     if (!request.params[1].isNull()) {
         fVerbose = request.params[1].get_bool();
     }
 
     const CBlockIndex *pblockindex;
     const CBlockIndex *tip;
     {
         LOCK(cs_main);
         pblockindex = LookupBlockIndex(hash);
         tip = ::ChainActive().Tip();
     }
 
     if (!pblockindex) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
     }
 
     if (!fVerbose) {
         CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION);
         ssBlock << pblockindex->GetBlockHeader();
         std::string strHex = HexStr(ssBlock.begin(), ssBlock.end());
         return strHex;
     }
 
     return blockheaderToJSON(tip, pblockindex);
 }
 
 static CBlock GetBlockChecked(const Config &config,
                               const CBlockIndex *pblockindex) {
     CBlock block;
     if (IsBlockPruned(pblockindex)) {
         throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
     }
 
     if (!ReadBlockFromDisk(block, pblockindex,
                            config.GetChainParams().GetConsensus())) {
         // Block not found on disk. This could be because we have the block
         // header in our index but don't have the block (for example if a
         // non-whitelisted node sends us an unrequested long chain of valid
         // blocks, we add the headers to our index, but don't accept the block).
         throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk");
     }
 
     return block;
 }
 
 static CBlockUndo GetUndoChecked(const CBlockIndex *pblockindex) {
     CBlockUndo blockUndo;
     if (IsBlockPruned(pblockindex)) {
         throw JSONRPCError(RPC_MISC_ERROR,
                            "Undo data not available (pruned data)");
     }
 
     if (!UndoReadFromDisk(blockUndo, pblockindex)) {
         throw JSONRPCError(RPC_MISC_ERROR, "Can't read undo data from disk");
     }
 
     return blockUndo;
 }
 
 static UniValue getblock(const Config &config, const JSONRPCRequest &request) {
     RPCHelpMan{
         "getblock",
         "If verbosity is 0 or false, returns a string that is serialized, "
         "hex-encoded data for block 'hash'.\n"
         "If verbosity is 1 or true, returns an Object with information about "
         "block <hash>.\n"
         "If verbosity is 2, returns an Object with information about block "
         "<hash> and information about each transaction.\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The block hash"},
             {"verbosity", RPCArg::Type::NUM, /* default */ "1",
              "0 for hex-encoded data, 1 for a json object, and 2 for json "
              "object with transaction data"},
         },
         {
             RPCResult{"for verbosity = 0",
                       "\"data\"                   (string) A string that is "
                       "serialized, hex-encoded data for block 'hash'.\n"},
             RPCResult{
                 "for verbosity = 1",
                 "{\n"
                 "  \"hash\" : \"hash\",       (string) The block hash (same as "
                 "provided)\n"
                 "  \"confirmations\" : n,   (numeric) The number of "
                 "confirmations, or -1 if the block is not on the main chain\n"
                 "  \"size\" : n,            (numeric) The block size\n"
                 "  \"height\" : n,          (numeric) The block height or "
                 "index\n"
                 "  \"version\" : n,         (numeric) The block version\n"
                 "  \"versionHex\" : \"00000000\", (string) The block version "
                 "formatted in hexadecimal\n"
                 "  \"merkleroot\" : \"xxxx\", (string) The merkle root\n"
                 "  \"tx\" : [               (array of string) The transaction "
                 "ids\n"
                 "     \"transactionid\"     (string) The transaction id\n"
                 "     ,...\n"
                 "  ],\n"
                 "  \"time\" : ttt,          (numeric) The block time expressed "
                 "in " +
                     UNIX_EPOCH_TIME +
                     "\n"
                     "  \"mediantime\" : ttt,    (numeric) The median block "
                     "time expressed in " +
                     UNIX_EPOCH_TIME +
                     "\n"
                     "  \"nonce\" : n,           (numeric) The nonce\n"
                     "  \"bits\" : \"1d00ffff\",   (string) The bits\n"
                     "  \"difficulty\" : x.xxx,  (numeric) The difficulty\n"
                     "  \"chainwork\" : \"xxxx\",  (string) Expected number of "
                     "hashes required to produce the chain up to this block (in "
                     "hex)\n"
                     "  \"nTx\" : n,             (numeric) The number of "
                     "transactions in the block.\n"
                     "  \"previousblockhash\" : \"hash\",  (string) The hash of "
                     "the previous block\n"
                     "  \"nextblockhash\" : \"hash\"       (string) The hash of "
                     "the next block\n"
                     "}\n"},
             RPCResult{"for verbosity = 2",
                       "{\n"
                       "  ...,                   Same output as verbosity = 1\n"
                       "  \"tx\" : [               (array of Objects) The "
                       "transactions in the format of the getrawtransaction "
                       "RPC; different from verbosity = 1 \"tx\" result\n"
                       "    ...\n"
                       "  ],\n"
                       "  ...                    Same output as verbosity = 1\n"
                       "}\n"},
         },
         RPCExamples{
             HelpExampleCli("getblock", "\"00000000c937983704a73af28acdec37b049d"
                                        "214adbda81d7e2a3dd146f6ed09\"") +
             HelpExampleRpc("getblock", "\"00000000c937983704a73af28acdec37b049d"
                                        "214adbda81d7e2a3dd146f6ed09\"")},
     }
         .Check(request);
 
     BlockHash hash(ParseHashV(request.params[0], "blockhash"));
 
     int verbosity = 1;
     if (!request.params[1].isNull()) {
         if (request.params[1].isNum()) {
             verbosity = request.params[1].get_int();
         } else {
             verbosity = request.params[1].get_bool() ? 1 : 0;
         }
     }
 
     CBlock block;
     const CBlockIndex *pblockindex;
     const CBlockIndex *tip;
     {
         LOCK(cs_main);
         pblockindex = LookupBlockIndex(hash);
         tip = ::ChainActive().Tip();
 
         if (!pblockindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
 
         block = GetBlockChecked(config, pblockindex);
     }
 
     if (verbosity <= 0) {
         CDataStream ssBlock(SER_NETWORK,
                             PROTOCOL_VERSION | RPCSerializationFlags());
         ssBlock << block;
         std::string strHex = HexStr(ssBlock.begin(), ssBlock.end());
         return strHex;
     }
 
     return blockToJSON(block, tip, pblockindex, verbosity >= 2);
 }
 
 static UniValue pruneblockchain(const Config &config,
                                 const JSONRPCRequest &request) {
     RPCHelpMan{
         "pruneblockchain",
         "",
         {
             {"height", RPCArg::Type::NUM, RPCArg::Optional::NO,
              "The block height to prune up to. May be set to a discrete "
              "height, or to a " +
                  UNIX_EPOCH_TIME +
                  "\n"
                  "                  to prune blocks whose block time is at "
                  "least 2 hours older than the provided timestamp."},
         },
         RPCResult{"n    (numeric) Height of the last block pruned.\n"},
         RPCExamples{HelpExampleCli("pruneblockchain", "1000") +
                     HelpExampleRpc("pruneblockchain", "1000")},
     }
         .Check(request);
 
     if (!fPruneMode) {
         throw JSONRPCError(
             RPC_MISC_ERROR,
             "Cannot prune blocks because node is not in prune mode.");
     }
 
     LOCK(cs_main);
 
     int heightParam = request.params[0].get_int();
     if (heightParam < 0) {
         throw JSONRPCError(RPC_INVALID_PARAMETER, "Negative block height.");
     }
 
     // Height value more than a billion is too high to be a block height, and
     // too low to be a block time (corresponds to timestamp from Sep 2001).
     if (heightParam > 1000000000) {
         // Add a 2 hour buffer to include blocks which might have had old
         // timestamps
         CBlockIndex *pindex = ::ChainActive().FindEarliestAtLeast(
             heightParam - TIMESTAMP_WINDOW, 0);
         if (!pindex) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 "Could not find block with at least the specified timestamp.");
         }
         heightParam = pindex->nHeight;
     }
 
     unsigned int height = (unsigned int)heightParam;
     unsigned int chainHeight = (unsigned int)::ChainActive().Height();
     if (chainHeight < config.GetChainParams().PruneAfterHeight()) {
         throw JSONRPCError(RPC_MISC_ERROR,
                            "Blockchain is too short for pruning.");
     } else if (height > chainHeight) {
         throw JSONRPCError(
             RPC_INVALID_PARAMETER,
             "Blockchain is shorter than the attempted prune height.");
     } else if (height > chainHeight - MIN_BLOCKS_TO_KEEP) {
         LogPrint(BCLog::RPC, "Attempt to prune blocks close to the tip. "
                              "Retaining the minimum number of blocks.\n");
         height = chainHeight - MIN_BLOCKS_TO_KEEP;
     }
 
     PruneBlockFilesManual(height);
     const CBlockIndex *block = ::ChainActive().Tip();
     CHECK_NONFATAL(block);
     while (block->pprev && (block->pprev->nStatus.hasData())) {
         block = block->pprev;
     }
     return uint64_t(block->nHeight);
 }
 
 static UniValue gettxoutsetinfo(const Config &config,
                                 const JSONRPCRequest &request) {
     RPCHelpMan{
         "gettxoutsetinfo",
         "Returns statistics about the unspent transaction output set.\n"
         "Note this call may take some time.\n",
         {},
         RPCResult{
             "{\n"
             "  \"height\":n,     (numeric) The current block height (index)\n"
             "  \"bestblock\": \"hex\",   (string) the best block hash hex\n"
             "  \"transactions\": n,      (numeric) The number of transactions\n"
             "  \"txouts\": n,            (numeric) The number of output "
             "transactions\n"
             "  \"bogosize\": n,          (numeric) A database-independent "
             "metric for UTXO set size\n"
             "  \"hash_serialized\": \"hash\",   (string) The serialized hash\n"
             "  \"disk_size\": n,         (numeric) The estimated size of the "
             "chainstate on disk\n"
             "  \"total_amount\": x.xxx   (numeric) The total amount\n"
             "}\n"},
         RPCExamples{HelpExampleCli("gettxoutsetinfo", "") +
                     HelpExampleRpc("gettxoutsetinfo", "")},
     }
         .Check(request);
 
     UniValue ret(UniValue::VOBJ);
 
     CCoinsStats stats;
     ::ChainstateActive().ForceFlushStateToDisk();
     if (GetUTXOStats(pcoinsdbview.get(), stats)) {
         ret.pushKV("height", int64_t(stats.nHeight));
         ret.pushKV("bestblock", stats.hashBlock.GetHex());
         ret.pushKV("transactions", int64_t(stats.nTransactions));
         ret.pushKV("txouts", int64_t(stats.nTransactionOutputs));
         ret.pushKV("bogosize", int64_t(stats.nBogoSize));
         ret.pushKV("hash_serialized", stats.hashSerialized.GetHex());
         ret.pushKV("disk_size", stats.nDiskSize);
         ret.pushKV("total_amount", ValueFromAmount(stats.nTotalAmount));
     } else {
         throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set");
     }
     return ret;
 }
 
 UniValue gettxout(const Config &config, const JSONRPCRequest &request) {
     RPCHelpMan{
         "gettxout",
         "Returns details about an unspent transaction output.\n",
         {
             {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The transaction id"},
             {"n", RPCArg::Type::NUM, RPCArg::Optional::NO, "vout number"},
             {"include_mempool", RPCArg::Type::BOOL, /* default */ "true",
              "Whether to include the mempool. Note that an unspent output that "
              "is spent in the mempool won't appear."},
         },
         RPCResult{
             "{\n"
             "  \"bestblock\" : \"hash\",    (string) the block hash\n"
             "  \"confirmations\" : n,       (numeric) The number of "
             "confirmations\n"
             "  \"value\" : x.xxx,           (numeric) The transaction value "
             "in " +
             CURRENCY_UNIT +
             "\n"
             "  \"scriptPubKey\" : {         (json object)\n"
             "     \"asm\" : \"code\",       (string) \n"
             "     \"hex\" : \"hex\",        (string) \n"
             "     \"reqSigs\" : n,          (numeric) Number of required "
             "signatures\n"
             "     \"type\" : \"pubkeyhash\", (string) The type, eg pubkeyhash\n"
             "     \"addresses\" : [          (array of string) array of "
             "bitcoin addresses\n"
             "        \"address\"     (string) bitcoin address\n"
             "        ,...\n"
             "     ]\n"
             "  },\n"
             "  \"coinbase\" : true|false   (boolean) Coinbase or not\n"
             "}\n"},
         RPCExamples{"\nGet unspent transactions\n" +
                     HelpExampleCli("listunspent", "") + "\nView the details\n" +
                     HelpExampleCli("gettxout", "\"txid\" 1") +
                     "\nAs a JSON-RPC call\n" +
                     HelpExampleRpc("gettxout", "\"txid\", 1")},
     }
         .Check(request);
 
     LOCK(cs_main);
 
     UniValue ret(UniValue::VOBJ);
 
     TxId txid(ParseHashV(request.params[0], "txid"));
     int n = request.params[1].get_int();
     COutPoint out(txid, n);
     bool fMempool = true;
     if (!request.params[2].isNull()) {
         fMempool = request.params[2].get_bool();
     }
 
     Coin coin;
     if (fMempool) {
         LOCK(g_mempool.cs);
         CCoinsViewMemPool view(pcoinsTip.get(), g_mempool);
         if (!view.GetCoin(out, coin) || g_mempool.isSpent(out)) {
             return NullUniValue;
         }
     } else {
         if (!pcoinsTip->GetCoin(out, coin)) {
             return NullUniValue;
         }
     }
 
     const CBlockIndex *pindex = LookupBlockIndex(pcoinsTip->GetBestBlock());
     ret.pushKV("bestblock", pindex->GetBlockHash().GetHex());
     if (coin.GetHeight() == MEMPOOL_HEIGHT) {
         ret.pushKV("confirmations", 0);
     } else {
         ret.pushKV("confirmations",
                    int64_t(pindex->nHeight - coin.GetHeight() + 1));
     }
     ret.pushKV("value", ValueFromAmount(coin.GetTxOut().nValue));
     UniValue o(UniValue::VOBJ);
     ScriptPubKeyToUniv(coin.GetTxOut().scriptPubKey, o, true);
     ret.pushKV("scriptPubKey", o);
     ret.pushKV("coinbase", coin.IsCoinBase());
 
     return ret;
 }
 
 static UniValue verifychain(const Config &config,
                             const JSONRPCRequest &request) {
     int nCheckLevel = gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL);
     int nCheckDepth = gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS);
     RPCHelpMan{
         "verifychain",
         "Verifies blockchain database.\n",
         {
             {"checklevel", RPCArg::Type::NUM,
              /* default */ strprintf("%d, range=0-4", nCheckLevel),
              "How thorough the block verification is."},
             {"nblocks", RPCArg::Type::NUM,
              /* default */ strprintf("%d, 0=all", nCheckDepth),
              "The number of blocks to check."},
         },
         RPCResult{"true|false       (boolean) Verified or not\n"},
         RPCExamples{HelpExampleCli("verifychain", "") +
                     HelpExampleRpc("verifychain", "")},
     }
         .Check(request);
 
     LOCK(cs_main);
 
     if (!request.params[0].isNull()) {
         nCheckLevel = request.params[0].get_int();
     }
     if (!request.params[1].isNull()) {
         nCheckDepth = request.params[1].get_int();
     }
 
     return CVerifyDB().VerifyDB(config, pcoinsTip.get(), nCheckLevel,
                                 nCheckDepth);
 }
 
 static void BIP9SoftForkDescPushBack(UniValue &softforks,
                                      const Consensus::Params &consensusParams,
                                      Consensus::DeploymentPos id)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     // For BIP9 deployments.
     // Deployments (e.g. testdummy) with timeout value before Jan 1, 2009 are
     // hidden. A timeout value of 0 guarantees a softfork will never be
     // activated. This is used when merging logic to implement a proposed
     // softfork without a specified deployment schedule.
     if (consensusParams.vDeployments[id].nTimeout <= 1230768000) {
         return;
     }
 
     UniValue bip9(UniValue::VOBJ);
     const ThresholdState thresholdState =
         VersionBitsTipState(consensusParams, id);
     switch (thresholdState) {
         case ThresholdState::DEFINED:
             bip9.pushKV("status", "defined");
             break;
         case ThresholdState::STARTED:
             bip9.pushKV("status", "started");
             break;
         case ThresholdState::LOCKED_IN:
             bip9.pushKV("status", "locked_in");
             break;
         case ThresholdState::ACTIVE:
             bip9.pushKV("status", "active");
             break;
         case ThresholdState::FAILED:
             bip9.pushKV("status", "failed");
             break;
     }
     if (ThresholdState::STARTED == thresholdState) {
         bip9.pushKV("bit", consensusParams.vDeployments[id].bit);
     }
     bip9.pushKV("start_time", consensusParams.vDeployments[id].nStartTime);
     bip9.pushKV("timeout", consensusParams.vDeployments[id].nTimeout);
     int64_t since_height = VersionBitsTipStateSinceHeight(consensusParams, id);
     bip9.pushKV("since", since_height);
     if (ThresholdState::STARTED == thresholdState) {
         UniValue statsUV(UniValue::VOBJ);
         BIP9Stats statsStruct = VersionBitsTipStatistics(consensusParams, id);
         statsUV.pushKV("period", statsStruct.period);
         statsUV.pushKV("threshold", statsStruct.threshold);
         statsUV.pushKV("elapsed", statsStruct.elapsed);
         statsUV.pushKV("count", statsStruct.count);
         statsUV.pushKV("possible", statsStruct.possible);
         bip9.pushKV("statistics", statsUV);
     }
 
     UniValue rv(UniValue::VOBJ);
     rv.pushKV("type", "bip9");
     rv.pushKV("bip9", bip9);
     if (ThresholdState::ACTIVE == thresholdState) {
         rv.pushKV("height", since_height);
     }
     rv.pushKV("active", ThresholdState::ACTIVE == thresholdState);
 
     softforks.pushKV(VersionBitsDeploymentInfo[id].name, rv);
 }
 
 UniValue getblockchaininfo(const Config &config,
                            const JSONRPCRequest &request) {
     RPCHelpMan{
         "getblockchaininfo",
         "Returns an object containing various state info regarding blockchain "
         "processing.\n",
         {},
         RPCResult{
             "{\n"
             "  \"chain\": \"xxxx\",            (string) current network name "
             "as defined in BIP70 (main, test, regtest)\n"
             "  \"blocks\": xxxxxx,             the height of the most-work "
             "fully-validated chain. The genesis block has height 0\n"
             "  \"headers\": xxxxxx,            (numeric) the current number of "
             "headers we have validated\n"
             "  \"bestblockhash\": \"...\",     (string) the hash of the "
             "currently best block\n"
             "  \"difficulty\": xxxxxx,         (numeric) the current "
             "difficulty\n"
             "  \"mediantime\": xxxxxx,         (numeric) median time for the "
             "current best block\n"
             "  \"verificationprogress\": xxxx, (numeric) estimate of "
             "verification progress [0..1]\n"
             "  \"initialblockdownload\": xxxx, (bool) (debug information) "
             "estimate of whether this node is in Initial Block Download mode.\n"
             "  \"chainwork\": \"xxxx\"         (string) total amount of work "
             "in active chain, in hexadecimal\n"
             "  \"size_on_disk\": xxxxxx,       (numeric) the estimated size of "
             "the block and undo files on disk\n"
             "  \"pruned\": xx,                 (boolean) if the blocks are "
             "subject to pruning\n"
             "  \"pruneheight\": xxxxxx,        (numeric) lowest-height "
             "complete block stored (only present if pruning is enabled)\n"
             "  \"automatic_pruning\": xx,      (boolean) whether automatic "
             "pruning is enabled (only present if pruning is enabled)\n"
             "  \"prune_target_size\": xxxxxx,  (numeric) the target size used "
             "by pruning (only present if automatic pruning is enabled)\n"
             "  \"softforks\": {                (object) status of softforks in "
             "progress\n"
             "    \"xxxx\" : {                  (string) name of the softfork\n"
             "      \"type\" : \"bip9\",        (string) currently only set to "
             "\"bip9\"\n"
             "      \"bip9\" : {                (object) status of bip9 "
             "softforks (only for \"bip9\" type)\n"
             "        \"status\": \"xxxx\",     (string) one of \"defined\", "
             "\"started\", \"locked_in\", \"active\", \"failed\"\n"
             "        \"bit\": xx,              (numeric) the bit (0-28) in the "
             "block version field used to signal this softfork (only for "
             "\"started\" status)\n"
             "        \"startTime\": xx,        (numeric) the minimum median "
             "time past of a block at which the bit gains its meaning\n"
             "        \"timeout\": xx,          (numeric) the median time past "
             "of a block at which the deployment is considered failed if not "
             "yet locked in\n"
             "        \"since\": xx,            (numeric) height of the first "
             "block to which the status applies\n"
             "        \"statistics\": {         (object) numeric statistics "
             "about BIP9 signalling for a softfork (only for \"started\" "
             "status)\n"
             "          \"period\": xx,         (numeric) the length in blocks "
             "of the BIP9 signalling period \n"
             "          \"threshold\": xx,      (numeric) the number of blocks "
             "with the version bit set required to activate the feature \n"
             "          \"elapsed\": xx,        (numeric) the number of blocks "
             "elapsed since the beginning of the current period \n"
             "          \"count\": xx,          (numeric) the number of blocks "
             "with the version bit set in the current period \n"
             "          \"possible\": xx        (boolean) returns false if "
             "there are not enough blocks left in this period to pass "
             "activation threshold\n"
             "        },\n"
             "        \"active\": xx,           (boolean) true if the rules are "
             "enforced for the mempool and the next block\n"
             "      }\n"
             "    }\n"
             "  }\n"
             "  \"warnings\" : \"...\",           (string) any network and "
             "blockchain warnings.\n"
             "}\n"},
         RPCExamples{HelpExampleCli("getblockchaininfo", "") +
                     HelpExampleRpc("getblockchaininfo", "")},
     }
         .Check(request);
 
     LOCK(cs_main);
 
     const CChainParams &chainparams = config.GetChainParams();
 
     const CBlockIndex *tip = ::ChainActive().Tip();
     UniValue obj(UniValue::VOBJ);
     obj.pushKV("chain", chainparams.NetworkIDString());
     obj.pushKV("blocks", int(::ChainActive().Height()));
     obj.pushKV("headers", pindexBestHeader ? pindexBestHeader->nHeight : -1);
     obj.pushKV("bestblockhash", tip->GetBlockHash().GetHex());
     obj.pushKV("difficulty", double(GetDifficulty(tip)));
     obj.pushKV("mediantime", int64_t(tip->GetMedianTimePast()));
     obj.pushKV("verificationprogress",
                GuessVerificationProgress(Params().TxData(), tip));
     obj.pushKV("initialblockdownload",
                ::ChainstateActive().IsInitialBlockDownload());
     obj.pushKV("chainwork", tip->nChainWork.GetHex());
     obj.pushKV("size_on_disk", CalculateCurrentUsage());
     obj.pushKV("pruned", fPruneMode);
 
     if (fPruneMode) {
         const CBlockIndex *block = tip;
         CHECK_NONFATAL(block);
         while (block->pprev && (block->pprev->nStatus.hasData())) {
             block = block->pprev;
         }
 
         obj.pushKV("pruneheight", block->nHeight);
 
         // if 0, execution bypasses the whole if block.
         bool automatic_pruning = (gArgs.GetArg("-prune", 0) != 1);
         obj.pushKV("automatic_pruning", automatic_pruning);
         if (automatic_pruning) {
             obj.pushKV("prune_target_size", nPruneTarget);
         }
     }
 
     UniValue softforks(UniValue::VOBJ);
     for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
         BIP9SoftForkDescPushBack(softforks, chainparams.GetConsensus(),
                                  Consensus::DeploymentPos(i));
     }
     obj.pushKV("softforks", softforks);
 
     obj.pushKV("warnings", GetWarnings("statusbar"));
     return obj;
 }
 
 /** Comparison function for sorting the getchaintips heads.  */
 struct CompareBlocksByHeight {
     bool operator()(const CBlockIndex *a, const CBlockIndex *b) const {
         // Make sure that unequal blocks with the same height do not compare
         // equal. Use the pointers themselves to make a distinction.
         if (a->nHeight != b->nHeight) {
             return (a->nHeight > b->nHeight);
         }
 
         return a < b;
     }
 };
 
 static UniValue getchaintips(const Config &config,
                              const JSONRPCRequest &request) {
     RPCHelpMan{
         "getchaintips",
         "Return information about all known tips in the block tree, including "
         "the main chain as well as orphaned branches.\n",
         {},
         RPCResult{
             "[\n"
             "  {\n"
             "    \"height\": xxxx,         (numeric) height of the chain tip\n"
             "    \"hash\": \"xxxx\",       (string) block hash of the tip\n"
             "    \"branchlen\": 0          (numeric) zero for main chain\n"
             "    \"status\": \"active\"    (string) \"active\" for the main "
             "chain\n"
             "  },\n"
             "  {\n"
             "    \"height\": xxxx,\n"
             "    \"hash\": \"xxxx\",\n"
             "    \"branchlen\": 1          (numeric) length of branch "
             "connecting the tip to the main chain\n"
             "    \"status\": \"xxxx\"      (string) status of the chain "
             "(active, valid-fork, valid-headers, headers-only, invalid)\n"
             "  }\n"
             "]\n"
             "Possible values for status:\n"
             "1.  \"invalid\"               This branch contains at least one "
             "invalid block\n"
             "2.  \"parked\"                This branch contains at least one "
             "parked block\n"
             "3.  \"headers-only\"          Not all blocks for this branch are "
             "available, but the headers are valid\n"
             "4.  \"valid-headers\"         All blocks are available for this "
             "branch, but they were never fully validated\n"
             "5.  \"valid-fork\"            This branch is not part of the "
             "active chain, but is fully validated\n"
             "6.  \"active\"                This is the tip of the active main "
             "chain, which is certainly valid\n"},
         RPCExamples{HelpExampleCli("getchaintips", "") +
                     HelpExampleRpc("getchaintips", "")},
     }
         .Check(request);
 
     LOCK(cs_main);
 
     /**
      * Idea:  the set of chain tips is ::ChainActive().tip, plus orphan blocks
      * which do not have another orphan building off of them. Algorithm:
      *  - Make one pass through g_blockman.m_block_index, picking out the orphan
      * blocks, and also storing a set of the orphan block's pprev pointers.
      *  - Iterate through the orphan blocks. If the block isn't pointed to by
      * another orphan, it is a chain tip.
      *  - add ::ChainActive().Tip()
      */
     std::set<const CBlockIndex *, CompareBlocksByHeight> setTips;
     std::set<const CBlockIndex *> setOrphans;
     std::set<const CBlockIndex *> setPrevs;
 
     for (const std::pair<const BlockHash, CBlockIndex *> &item :
          ::BlockIndex()) {
         if (!::ChainActive().Contains(item.second)) {
             setOrphans.insert(item.second);
             setPrevs.insert(item.second->pprev);
         }
     }
 
     for (std::set<const CBlockIndex *>::iterator it = setOrphans.begin();
          it != setOrphans.end(); ++it) {
         if (setPrevs.erase(*it) == 0) {
             setTips.insert(*it);
         }
     }
 
     // Always report the currently active tip.
     setTips.insert(::ChainActive().Tip());
 
     /* Construct the output array.  */
     UniValue res(UniValue::VARR);
     for (const CBlockIndex *block : setTips) {
         UniValue obj(UniValue::VOBJ);
         obj.pushKV("height", block->nHeight);
         obj.pushKV("hash", block->phashBlock->GetHex());
 
         const int branchLen =
             block->nHeight - ::ChainActive().FindFork(block)->nHeight;
         obj.pushKV("branchlen", branchLen);
 
         std::string status;
         if (::ChainActive().Contains(block)) {
             // This block is part of the currently active chain.
             status = "active";
         } else if (block->nStatus.isInvalid()) {
             // This block or one of its ancestors is invalid.
             status = "invalid";
         } else if (block->nStatus.isOnParkedChain()) {
             // This block or one of its ancestors is parked.
             status = "parked";
         } else if (!block->HaveTxsDownloaded()) {
             // This block cannot be connected because full block data for it or
             // one of its parents is missing.
             status = "headers-only";
         } else if (block->IsValid(BlockValidity::SCRIPTS)) {
             // This block is fully validated, but no longer part of the active
             // chain. It was probably the active block once, but was
             // reorganized.
             status = "valid-fork";
         } else if (block->IsValid(BlockValidity::TREE)) {
             // The headers for this block are valid, but it has not been
             // validated. It was probably never part of the most-work chain.
             status = "valid-headers";
         } else {
             // No clue.
             status = "unknown";
         }
         obj.pushKV("status", status);
 
         res.push_back(obj);
     }
 
     return res;
 }
 
 UniValue MempoolInfoToJSON(const CTxMemPool &pool) {
     UniValue ret(UniValue::VOBJ);
     ret.pushKV("loaded", pool.IsLoaded());
     ret.pushKV("size", (int64_t)pool.size());
     ret.pushKV("bytes", (int64_t)pool.GetTotalTxSize());
     ret.pushKV("usage", (int64_t)pool.DynamicMemoryUsage());
     size_t maxmempool =
         gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
     ret.pushKV("maxmempool", (int64_t)maxmempool);
     ret.pushKV(
         "mempoolminfee",
         ValueFromAmount(std::max(pool.GetMinFee(maxmempool), ::minRelayTxFee)
                             .GetFeePerK()));
     ret.pushKV("minrelaytxfee", ValueFromAmount(::minRelayTxFee.GetFeePerK()));
 
     return ret;
 }
 
 static UniValue getmempoolinfo(const Config &config,
                                const JSONRPCRequest &request) {
     RPCHelpMan{
         "getmempoolinfo",
         "Returns details on the active state of the TX memory pool.\n",
         {},
         RPCResult{
             "{\n"
             "  \"loaded\": true|false         (boolean) True if the mempool is "
             "fully loaded\n"
             "  \"size\": xxxxx,               (numeric) Current tx count\n"
             "  \"bytes\": xxxxx,              (numeric) Transaction size.\n"
             "  \"usage\": xxxxx,              (numeric) Total memory usage for "
             "the mempool\n"
             "  \"maxmempool\": xxxxx,         (numeric) Maximum memory usage "
             "for the mempool\n"
             "  \"mempoolminfee\": xxxxx       (numeric) Minimum fee rate in " +
             CURRENCY_UNIT +
             "/kB for tx to be accepted. Is the maximum of minrelaytxfee and "
             "minimum mempool fee\n"
             "  \"minrelaytxfee\": xxxxx       (numeric) Current minimum relay "
             "fee for transactions\n"
             "}\n"},
         RPCExamples{HelpExampleCli("getmempoolinfo", "") +
                     HelpExampleRpc("getmempoolinfo", "")},
     }
         .Check(request);
 
     return MempoolInfoToJSON(::g_mempool);
 }
 
 static UniValue preciousblock(const Config &config,
                               const JSONRPCRequest &request) {
     RPCHelpMan{
         "preciousblock",
         "Treats a block as if it were received before others with the same "
         "work.\n"
         "\nA later preciousblock call can override the effect of an earlier "
         "one.\n"
         "\nThe effects of preciousblock are not retained across restarts.\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "the hash of the block to mark as precious"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("preciousblock", "\"blockhash\"") +
                     HelpExampleRpc("preciousblock", "\"blockhash\"")},
     }
         .Check(request);
 
     BlockHash hash(ParseHashV(request.params[0], "blockhash"));
     CBlockIndex *pblockindex;
 
     {
         LOCK(cs_main);
         pblockindex = LookupBlockIndex(hash);
         if (!pblockindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
     }
 
     BlockValidationState state;
     PreciousBlock(config, state, pblockindex);
 
     if (!state.IsValid()) {
         throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason());
     }
 
     return NullUniValue;
 }
 
 UniValue finalizeblock(const Config &config, const JSONRPCRequest &request) {
     RPCHelpMan{
         "finalizeblock",
         "Treats a block as final. It cannot be reorged. Any chain\n"
         "that does not contain this block is invalid. Used on a less\n"
         "work chain, it can effectively PUT YOU OUT OF CONSENSUS.\n"
         "USE WITH CAUTION!\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "the hash of the block to mark as invalid"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("finalizeblock", "\"blockhash\"") +
                     HelpExampleRpc("finalizeblock", "\"blockhash\"")},
     }
         .Check(request);
 
     std::string strHash = request.params[0].get_str();
     BlockHash hash(uint256S(strHash));
     BlockValidationState state;
 
     CBlockIndex *pblockindex = nullptr;
     {
         LOCK(cs_main);
         pblockindex = LookupBlockIndex(hash);
         if (!pblockindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
     } // end of locked cs_main scope
 
     ::ChainstateActive().FinalizeBlock(config, state, pblockindex);
 
     if (state.IsValid()) {
         ActivateBestChain(config, state);
     }
 
     if (!state.IsValid()) {
         throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state));
     }
 
     return NullUniValue;
 }
 
 static UniValue invalidateblock(const Config &config,
                                 const JSONRPCRequest &request) {
     RPCHelpMan{
         "invalidateblock",
         "Permanently marks a block as invalid, as if it violated a consensus "
         "rule.\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "the hash of the block to mark as invalid"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("invalidateblock", "\"blockhash\"") +
                     HelpExampleRpc("invalidateblock", "\"blockhash\"")},
     }
         .Check(request);
 
     const BlockHash hash(ParseHashV(request.params[0], "blockhash"));
     BlockValidationState state;
 
     CBlockIndex *pblockindex;
     {
         LOCK(cs_main);
         pblockindex = LookupBlockIndex(hash);
         if (!pblockindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
     }
     ::ChainstateActive().InvalidateBlock(config, state, pblockindex);
 
     if (state.IsValid()) {
         ActivateBestChain(config, state);
     }
 
     if (!state.IsValid()) {
         throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state));
     }
 
     return NullUniValue;
 }
 
 UniValue parkblock(const Config &config, const JSONRPCRequest &request) {
     RPCHelpMan{
         "parkblock",
         "Marks a block as parked.\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "the hash of the block to park"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("parkblock", "\"blockhash\"") +
                     HelpExampleRpc("parkblock", "\"blockhash\"")},
     }
         .Check(request);
 
     const std::string strHash = request.params[0].get_str();
     const BlockHash hash(uint256S(strHash));
     BlockValidationState state;
 
     CBlockIndex *pblockindex = nullptr;
     {
         LOCK(cs_main);
         pblockindex = LookupBlockIndex(hash);
         if (!pblockindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
     }
     ::ChainstateActive().ParkBlock(config, state, pblockindex);
 
     if (state.IsValid()) {
         ActivateBestChain(config, state);
     }
 
     if (!state.IsValid()) {
         throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason());
     }
 
     return NullUniValue;
 }
 
 static UniValue reconsiderblock(const Config &config,
                                 const JSONRPCRequest &request) {
     RPCHelpMan{
         "reconsiderblock",
         "Removes invalidity status of a block and its descendants, "
         "reconsider them for activation.\n"
         "This can be used to undo the effects of invalidateblock.\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "the hash of the block to reconsider"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("reconsiderblock", "\"blockhash\"") +
                     HelpExampleRpc("reconsiderblock", "\"blockhash\"")},
     }
         .Check(request);
 
     const BlockHash hash(ParseHashV(request.params[0], "blockhash"));
 
     {
         LOCK(cs_main);
         CBlockIndex *pblockindex = LookupBlockIndex(hash);
         if (!pblockindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
 
         ResetBlockFailureFlags(pblockindex);
     }
 
     BlockValidationState state;
     ActivateBestChain(config, state);
 
     if (!state.IsValid()) {
         throw JSONRPCError(RPC_DATABASE_ERROR, FormatStateMessage(state));
     }
 
     return NullUniValue;
 }
 
 UniValue unparkblock(const Config &config, const JSONRPCRequest &request) {
     RPCHelpMan{
         "unparkblock",
         "Removes parked status of a block and its descendants, reconsider "
         "them for activation.\n"
         "This can be used to undo the effects of parkblock.\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "the hash of the block to unpark"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("unparkblock", "\"blockhash\"") +
                     HelpExampleRpc("unparkblock", "\"blockhash\"")},
     }
         .Check(request);
 
     const std::string strHash = request.params[0].get_str();
     const BlockHash hash(uint256S(strHash));
 
     {
         LOCK(cs_main);
 
         CBlockIndex *pblockindex = LookupBlockIndex(hash);
         if (!pblockindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
 
         UnparkBlockAndChildren(pblockindex);
     }
 
     BlockValidationState state;
     ActivateBestChain(config, state);
 
     if (!state.IsValid()) {
         throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason());
     }
 
     return NullUniValue;
 }
 
 static UniValue getchaintxstats(const Config &config,
                                 const JSONRPCRequest &request) {
     RPCHelpMan{
         "getchaintxstats",
         "Compute statistics about the total number and rate of transactions "
         "in the chain.\n",
         {
             {"nblocks", RPCArg::Type::NUM, /* default */ "one month",
              "Size of the window in number of blocks"},
             {"blockhash", RPCArg::Type::STR_HEX, /* default */ "chain tip",
              "The hash of the block that ends the window."},
         },
         RPCResult{
             "{\n"
             "  \"time\": xxxxx,                         (numeric) The "
             "timestamp for the final block in the window, expressed in " +
             UNIX_EPOCH_TIME +
             ".\n"
             "  \"txcount\": xxxxx,                      (numeric) The total "
             "number of transactions in the chain up to that point.\n"
             "  \"window_final_block_hash\": \"...\",    (string) The hash of "
             "the final block in the window.\n"
             "  \"window_final_block_height\": xxxxx,    (numeric) The height "
             "of the final block in the window.\n"
             "  \"window_block_count\": xxxxx,           (numeric) Size of the "
             "window in number of blocks.\n"
             "  \"window_tx_count\": xxxxx,              (numeric) The number "
             "of transactions in the window. Only returned if "
             "\"window_block_count\" is > 0.\n"
             "  \"window_interval\": xxxxx,              (numeric) The elapsed "
             "time in the window in seconds. Only returned if "
             "\"window_block_count\" is > 0.\n"
             "  \"txrate\": x.xx,                        (numeric) The average "
             "rate of transactions per second in the window. Only returned if "
             "\"window_interval\" is > 0.\n"
             "}\n"},
         RPCExamples{HelpExampleCli("getchaintxstats", "") +
                     HelpExampleRpc("getchaintxstats", "2016")},
     }
         .Check(request);
 
     const CBlockIndex *pindex;
 
     // By default: 1 month
     int blockcount = 30 * 24 * 60 * 60 /
                      config.GetChainParams().GetConsensus().nPowTargetSpacing;
 
     if (request.params[1].isNull()) {
         LOCK(cs_main);
         pindex = ::ChainActive().Tip();
     } else {
         BlockHash hash(ParseHashV(request.params[1], "blockhash"));
         LOCK(cs_main);
         pindex = LookupBlockIndex(hash);
         if (!pindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
         if (!::ChainActive().Contains(pindex)) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Block is not in main chain");
         }
     }
 
     CHECK_NONFATAL(pindex != nullptr);
 
     if (request.params[0].isNull()) {
         blockcount = std::max(0, std::min(blockcount, pindex->nHeight - 1));
     } else {
         blockcount = request.params[0].get_int();
 
         if (blockcount < 0 ||
             (blockcount > 0 && blockcount >= pindex->nHeight)) {
             throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid block count: "
                                                       "should be between 0 and "
                                                       "the block's height - 1");
         }
     }
 
     const CBlockIndex *pindexPast =
         pindex->GetAncestor(pindex->nHeight - blockcount);
     int nTimeDiff =
         pindex->GetMedianTimePast() - pindexPast->GetMedianTimePast();
     int nTxDiff = pindex->GetChainTxCount() - pindexPast->GetChainTxCount();
 
     UniValue ret(UniValue::VOBJ);
     ret.pushKV("time", pindex->GetBlockTime());
     ret.pushKV("txcount", pindex->GetChainTxCount());
     ret.pushKV("window_final_block_hash", pindex->GetBlockHash().GetHex());
     ret.pushKV("window_final_block_height", pindex->nHeight);
     ret.pushKV("window_block_count", blockcount);
     if (blockcount > 0) {
         ret.pushKV("window_tx_count", nTxDiff);
         ret.pushKV("window_interval", nTimeDiff);
         if (nTimeDiff > 0) {
             ret.pushKV("txrate", double(nTxDiff) / nTimeDiff);
         }
     }
 
     return ret;
 }
 
 template <typename T>
 static T CalculateTruncatedMedian(std::vector<T> &scores) {
     size_t size = scores.size();
     if (size == 0) {
         return T();
     }
 
     std::sort(scores.begin(), scores.end());
     if (size % 2 == 0) {
         return (scores[size / 2 - 1] + scores[size / 2]) / 2;
     } else {
         return scores[size / 2];
     }
 }
 
 template <typename T> static inline bool SetHasKeys(const std::set<T> &set) {
     return false;
 }
 template <typename T, typename Tk, typename... Args>
 static inline bool SetHasKeys(const std::set<T> &set, const Tk &key,
                               const Args &... args) {
     return (set.count(key) != 0) || SetHasKeys(set, args...);
 }
 
 // outpoint (needed for the utxo index) + nHeight + fCoinBase
 static constexpr size_t PER_UTXO_OVERHEAD =
     sizeof(COutPoint) + sizeof(uint32_t) + sizeof(bool);
 
 static UniValue getblockstats(const Config &config,
                               const JSONRPCRequest &request) {
     RPCHelpMan{
         "getblockstats",
         "Compute per block statistics for a given window. All amounts are "
         "in " +
             CURRENCY_UNIT +
             ".\n"
             "It won't work for some heights with pruning.\n",
         {
             {"hash_or_height",
              RPCArg::Type::NUM,
              RPCArg::Optional::NO,
              "The block hash or height of the target block",
              "",
              {"", "string or numeric"}},
             {"stats",
              RPCArg::Type::ARR,
              /* default */ "all values",
              "Values to plot (see result below)",
              {
                  {"height", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
                   "Selected statistic"},
                  {"time", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
                   "Selected statistic"},
              },
              "stats"},
         },
         RPCResult{
             "{                           (json object)\n"
             "  \"avgfee\": x.xxx,          (numeric) Average fee in the block\n"
             "  \"avgfeerate\": x.xxx,      (numeric) Average feerate (in " +
             CURRENCY_UNIT +
             " per byte)\n"
             "  \"avgtxsize\": xxxxx,       (numeric) Average transaction size\n"
             "  \"blockhash\": xxxxx,       (string) The block hash (to check "
             "for potential reorgs)\n"
             "  \"height\": xxxxx,          (numeric) The height of the block\n"
             "  \"ins\": xxxxx,             (numeric) The number of inputs "
             "(excluding coinbase)\n"
             "  \"maxfee\": xxxxx,          (numeric) Maximum fee in the block\n"
             "  \"maxfeerate\": xxxxx,      (numeric) Maximum feerate (in " +
             CURRENCY_UNIT +
             " per byte)\n"
             "  \"maxtxsize\": xxxxx,       (numeric) Maximum transaction size\n"
             "  \"medianfee\": x.xxx,       (numeric) Truncated median fee in "
             "the block\n"
             "  \"medianfeerate\": x.xxx,   (numeric) Truncated median feerate "
             "(in " +
             CURRENCY_UNIT +
             " per byte)\n"
             "  \"mediantime\": xxxxx,      (numeric) The block median time "
             "past\n"
             "  \"mediantxsize\": xxxxx,    (numeric) Truncated median "
             "transaction size\n"
             "  \"minfee\": x.xxx,          (numeric) Minimum fee in the block\n"
             "  \"minfeerate\": xx.xx,      (numeric) Minimum feerate (in " +
             CURRENCY_UNIT +
             " per byte)\n"
             "  \"mintxsize\": xxxxx,       (numeric) Minimum transaction size\n"
             "  \"outs\": xxxxx,            (numeric) The number of outputs\n"
             "  \"subsidy\": x.xxx,         (numeric) The block subsidy\n"
             "  \"time\": xxxxx,            (numeric) The block time\n"
             "  \"total_out\": x.xxx,       (numeric) Total amount in all "
             "outputs (excluding coinbase and thus reward [ie subsidy + "
             "totalfee])\n"
             "  \"total_size\": xxxxx,      (numeric) Total size of all "
             "non-coinbase transactions\n"
             "  \"totalfee\": x.xxx,        (numeric) The fee total\n"
             "  \"txs\": xxxxx,             (numeric) The number of "
             "transactions (excluding coinbase)\n"
             "  \"utxo_increase\": xxxxx,   (numeric) The increase/decrease in "
             "the number of unspent outputs\n"
             "  \"utxo_size_inc\": xxxxx,   (numeric) The increase/decrease in "
             "size for the utxo index (not discounting op_return and similar)\n"
             "}\n"},
         RPCExamples{HelpExampleCli("getblockstats",
                                    "1000 '[\"minfeerate\",\"avgfeerate\"]'") +
                     HelpExampleRpc("getblockstats",
                                    "1000 '[\"minfeerate\",\"avgfeerate\"]'")},
     }
         .Check(request);
 
     LOCK(cs_main);
 
     CBlockIndex *pindex;
     if (request.params[0].isNum()) {
         const int height = request.params[0].get_int();
         const int current_tip = ::ChainActive().Height();
         if (height < 0) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 strprintf("Target block height %d is negative", height));
         }
         if (height > current_tip) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 strprintf("Target block height %d after current tip %d", height,
                           current_tip));
         }
 
         pindex = ::ChainActive()[height];
     } else {
         const BlockHash hash(ParseHashV(request.params[0], "hash_or_height"));
         pindex = LookupBlockIndex(hash);
         if (!pindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
         if (!::ChainActive().Contains(pindex)) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                strprintf("Block is not in chain %s",
                                          Params().NetworkIDString()));
         }
     }
 
     CHECK_NONFATAL(pindex != nullptr);
 
     std::set<std::string> stats;
     if (!request.params[1].isNull()) {
         const UniValue stats_univalue = request.params[1].get_array();
         for (unsigned int i = 0; i < stats_univalue.size(); i++) {
             const std::string stat = stats_univalue[i].get_str();
             stats.insert(stat);
         }
     }
 
     const CBlock block = GetBlockChecked(config, pindex);
     const CBlockUndo blockUndo = GetUndoChecked(pindex);
 
     // Calculate everything if nothing selected (default)
     const bool do_all = stats.size() == 0;
     const bool do_mediantxsize = do_all || stats.count("mediantxsize") != 0;
     const bool do_medianfee = do_all || stats.count("medianfee") != 0;
     const bool do_medianfeerate = do_all || stats.count("medianfeerate") != 0;
     const bool loop_inputs =
         do_all || do_medianfee || do_medianfeerate ||
         SetHasKeys(stats, "utxo_size_inc", "totalfee", "avgfee", "avgfeerate",
                    "minfee", "maxfee", "minfeerate", "maxfeerate");
     const bool loop_outputs = do_all || loop_inputs || stats.count("total_out");
     const bool do_calculate_size =
         do_mediantxsize || loop_inputs ||
         SetHasKeys(stats, "total_size", "avgtxsize", "mintxsize", "maxtxsize");
 
     const int64_t blockMaxSize = config.GetMaxBlockSize();
     Amount maxfee = Amount::zero();
     Amount maxfeerate = Amount::zero();
     Amount minfee = MAX_MONEY;
     Amount minfeerate = MAX_MONEY;
     Amount total_out = Amount::zero();
     Amount totalfee = Amount::zero();
     int64_t inputs = 0;
     int64_t maxtxsize = 0;
     int64_t mintxsize = blockMaxSize;
     int64_t outputs = 0;
     int64_t total_size = 0;
     int64_t utxo_size_inc = 0;
     std::vector<Amount> fee_array;
     std::vector<Amount> feerate_array;
     std::vector<int64_t> txsize_array;
 
     for (size_t i = 0; i < block.vtx.size(); ++i) {
         const auto &tx = block.vtx.at(i);
         outputs += tx->vout.size();
         Amount tx_total_out = Amount::zero();
         if (loop_outputs) {
             for (const CTxOut &out : tx->vout) {
                 tx_total_out += out.nValue;
                 utxo_size_inc +=
                     GetSerializeSize(out, PROTOCOL_VERSION) + PER_UTXO_OVERHEAD;
             }
         }
 
         if (tx->IsCoinBase()) {
             continue;
         }
 
         // Don't count coinbase's fake input
         inputs += tx->vin.size();
         // Don't count coinbase reward
         total_out += tx_total_out;
 
         int64_t tx_size = 0;
         if (do_calculate_size) {
             tx_size = tx->GetTotalSize();
             if (do_mediantxsize) {
                 txsize_array.push_back(tx_size);
             }
             maxtxsize = std::max(maxtxsize, tx_size);
             mintxsize = std::min(mintxsize, tx_size);
             total_size += tx_size;
         }
 
         if (loop_inputs) {
             Amount tx_total_in = Amount::zero();
             const auto &txundo = blockUndo.vtxundo.at(i - 1);
             for (const Coin &coin : txundo.vprevout) {
                 const CTxOut &prevoutput = coin.GetTxOut();
 
                 tx_total_in += prevoutput.nValue;
                 utxo_size_inc -=
                     GetSerializeSize(prevoutput, PROTOCOL_VERSION) +
                     PER_UTXO_OVERHEAD;
             }
 
             Amount txfee = tx_total_in - tx_total_out;
             CHECK_NONFATAL(MoneyRange(txfee));
             if (do_medianfee) {
                 fee_array.push_back(txfee);
             }
             maxfee = std::max(maxfee, txfee);
             minfee = std::min(minfee, txfee);
             totalfee += txfee;
 
             Amount feerate = txfee / tx_size;
             if (do_medianfeerate) {
                 feerate_array.push_back(feerate);
             }
             maxfeerate = std::max(maxfeerate, feerate);
             minfeerate = std::min(minfeerate, feerate);
         }
     }
 
     UniValue ret_all(UniValue::VOBJ);
     ret_all.pushKV("avgfee",
                    ValueFromAmount((block.vtx.size() > 1)
                                        ? totalfee / int((block.vtx.size() - 1))
                                        : Amount::zero()));
     ret_all.pushKV("avgfeerate",
                    ValueFromAmount((total_size > 0) ? totalfee / total_size
                                                     : Amount::zero()));
     ret_all.pushKV("avgtxsize", (block.vtx.size() > 1)
                                     ? total_size / (block.vtx.size() - 1)
                                     : 0);
     ret_all.pushKV("blockhash", pindex->GetBlockHash().GetHex());
     ret_all.pushKV("height", (int64_t)pindex->nHeight);
     ret_all.pushKV("ins", inputs);
     ret_all.pushKV("maxfee", ValueFromAmount(maxfee));
     ret_all.pushKV("maxfeerate", ValueFromAmount(maxfeerate));
     ret_all.pushKV("maxtxsize", maxtxsize);
     ret_all.pushKV("medianfee",
                    ValueFromAmount(CalculateTruncatedMedian(fee_array)));
     ret_all.pushKV("medianfeerate",
                    ValueFromAmount(CalculateTruncatedMedian(feerate_array)));
     ret_all.pushKV("mediantime", pindex->GetMedianTimePast());
     ret_all.pushKV("mediantxsize", CalculateTruncatedMedian(txsize_array));
     ret_all.pushKV(
         "minfee",
         ValueFromAmount((minfee == MAX_MONEY) ? Amount::zero() : minfee));
     ret_all.pushKV("minfeerate",
                    ValueFromAmount((minfeerate == MAX_MONEY) ? Amount::zero()
                                                              : minfeerate));
     ret_all.pushKV("mintxsize", mintxsize == blockMaxSize ? 0 : mintxsize);
     ret_all.pushKV("outs", outputs);
     ret_all.pushKV("subsidy", ValueFromAmount(GetBlockSubsidy(
                                   pindex->nHeight, Params().GetConsensus())));
     ret_all.pushKV("time", pindex->GetBlockTime());
     ret_all.pushKV("total_out", ValueFromAmount(total_out));
     ret_all.pushKV("total_size", total_size);
     ret_all.pushKV("totalfee", ValueFromAmount(totalfee));
     ret_all.pushKV("txs", (int64_t)block.vtx.size());
     ret_all.pushKV("utxo_increase", outputs - inputs);
     ret_all.pushKV("utxo_size_inc", utxo_size_inc);
 
     if (do_all) {
         return ret_all;
     }
 
     UniValue ret(UniValue::VOBJ);
     for (const std::string &stat : stats) {
         const UniValue &value = ret_all[stat];
         if (value.isNull()) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 strprintf("Invalid selected statistic %s", stat));
         }
         ret.pushKV(stat, value);
     }
     return ret;
 }
 
 static UniValue savemempool(const Config &config,
                             const JSONRPCRequest &request) {
     RPCHelpMan{
         "savemempool",
         "Dumps the mempool to disk. It will fail until the previous dump is "
         "fully loaded.\n",
         {},
         RPCResults{},
         RPCExamples{HelpExampleCli("savemempool", "") +
                     HelpExampleRpc("savemempool", "")},
     }
         .Check(request);
 
     if (!::g_mempool.IsLoaded()) {
         throw JSONRPCError(RPC_MISC_ERROR, "The mempool was not loaded yet");
     }
 
     if (!DumpMempool(::g_mempool)) {
         throw JSONRPCError(RPC_MISC_ERROR, "Unable to dump mempool to disk");
     }
 
     return NullUniValue;
 }
 
 //! Search for a given set of pubkey scripts
 static bool FindScriptPubKey(std::atomic<int> &scan_progress,
                              const std::atomic<bool> &should_abort,
                              int64_t &count, CCoinsViewCursor *cursor,
                              const std::set<CScript> &needles,
                              std::map<COutPoint, Coin> &out_results) {
     scan_progress = 0;
     count = 0;
     while (cursor->Valid()) {
         COutPoint key;
         Coin coin;
         if (!cursor->GetKey(key) || !cursor->GetValue(coin)) {
             return false;
         }
         if (++count % 8192 == 0) {
             boost::this_thread::interruption_point();
             if (should_abort) {
                 // allow to abort the scan via the abort reference
                 return false;
             }
         }
         if (count % 256 == 0) {
             // update progress reference every 256 item
             const TxId &txid = key.GetTxId();
             uint32_t high = 0x100 * *txid.begin() + *(txid.begin() + 1);
             scan_progress = int(high * 100.0 / 65536.0 + 0.5);
         }
         if (needles.count(coin.GetTxOut().scriptPubKey)) {
             out_results.emplace(key, coin);
         }
         cursor->Next();
     }
     scan_progress = 100;
     return true;
 }
 
 /** RAII object to prevent concurrency issue when scanning the txout set */
 static std::atomic<int> g_scan_progress;
 static std::atomic<bool> g_scan_in_progress;
 static std::atomic<bool> g_should_abort_scan;
 class CoinsViewScanReserver {
 private:
     bool m_could_reserve;
 
 public:
     explicit CoinsViewScanReserver() : m_could_reserve(false) {}
 
     bool reserve() {
         CHECK_NONFATAL(!m_could_reserve);
         if (g_scan_in_progress.exchange(true)) {
             return false;
         }
         m_could_reserve = true;
         return true;
     }
 
     ~CoinsViewScanReserver() {
         if (m_could_reserve) {
             g_scan_in_progress = false;
         }
     }
 };
 
 static UniValue scantxoutset(const Config &config,
                              const JSONRPCRequest &request) {
     RPCHelpMan{
         "scantxoutset",
         "EXPERIMENTAL warning: this call may be removed or changed in future "
         "releases.\n"
         "\nScans the unspent transaction output set for entries that match "
         "certain output descriptors.\n"
         "Examples of output descriptors are:\n"
         "    addr(<address>)                      Outputs whose scriptPubKey "
         "corresponds to the specified address (does not include P2PK)\n"
         "    raw(<hex script>)                    Outputs whose scriptPubKey "
         "equals the specified hex scripts\n"
         "    combo(<pubkey>)                      P2PK and P2PKH outputs for "
         "the given pubkey\n"
         "    pkh(<pubkey>)                        P2PKH outputs for the given "
         "pubkey\n"
         "    sh(multi(<n>,<pubkey>,<pubkey>,...)) P2SH-multisig outputs for "
         "the given threshold and pubkeys\n"
         "\nIn the above, <pubkey> either refers to a fixed public key in "
         "hexadecimal notation, or to an xpub/xprv optionally followed by one\n"
         "or more path elements separated by \"/\", and optionally ending in "
         "\"/*\" (unhardened), or \"/*'\" or \"/*h\" (hardened) to specify all\n"
         "unhardened or hardened child keys.\n"
         "In the latter case, a range needs to be specified by below if "
         "different from 1000.\n"
         "For more information on output descriptors, see the documentation in "
         "the doc/descriptors.md file.\n",
         {
             {"action", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The action to execute\n"
              "                                      \"start\" for starting a "
              "scan\n"
              "                                      \"abort\" for aborting the "
              "current scan (returns true when abort was successful)\n"
              "                                      \"status\" for "
              "progress report (in %) of the current scan"},
             {"scanobjects",
              RPCArg::Type::ARR,
              RPCArg::Optional::NO,
              "Array of scan objects\n"
              "                                  Every scan object is either a "
              "string descriptor or an object:",
              {
                  {"descriptor", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
                   "An output descriptor"},
                  {
                      "",
                      RPCArg::Type::OBJ,
                      RPCArg::Optional::OMITTED,
                      "An object with output descriptor and metadata",
                      {
                          {"desc", RPCArg::Type::STR, RPCArg::Optional::NO,
                           "An output descriptor"},
                          {"range", RPCArg::Type::RANGE, /* default */ "1000",
                           "The range of HD chain indexes to explore (either "
                           "end or [begin,end])"},
                      },
                  },
              },
              "[scanobjects,...]"},
         },
         RPCResult{
             "{\n"
             "  \"success\": true|false,         (boolean) Whether the scan was "
             "completed\n"
             "  \"txouts\": n,                   (numeric) The number of "
             "unspent transaction outputs scanned\n"
             "  \"height\": n,                   (numeric) The current block "
             "height (index)\n"
             "  \"bestblock\": \"hex\",            (string) The hash of the "
             "block at the tip of the chain\n"
             "  \"unspents\": [\n"
             "   {\n"
             "    \"txid\": \"hash\",              (string) The transaction id\n"
             "    \"vout\": n,                   (numeric) The vout value\n"
             "    \"scriptPubKey\": \"script\",    (string) The script key\n"
             "    \"desc\": \"descriptor\",        (string) A specialized "
             "descriptor for the matched scriptPubKey\n"
             "    \"amount\": x.xxx,             (numeric) The total amount "
             "in " +
             CURRENCY_UNIT +
             " of the unspent output\n"
             "    \"height\": n,                 (numeric) Height of the "
             "unspent transaction output\n"
             "   }\n"
             "   ,...],\n"
             "  \"total_amount\": x.xxx,          (numeric) The total amount of "
             "all found unspent outputs in " +
             CURRENCY_UNIT +
             "\n"
             "]\n"},
         RPCExamples{""},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VARR});
 
     UniValue result(UniValue::VOBJ);
     if (request.params[0].get_str() == "status") {
         CoinsViewScanReserver reserver;
         if (reserver.reserve()) {
             // no scan in progress
             return NullUniValue;
         }
         result.pushKV("progress", g_scan_progress.load());
         return result;
     } else if (request.params[0].get_str() == "abort") {
         CoinsViewScanReserver reserver;
         if (reserver.reserve()) {
             // reserve was possible which means no scan was running
             return false;
         }
         // set the abort flag
         g_should_abort_scan = true;
         return true;
     } else if (request.params[0].get_str() == "start") {
         CoinsViewScanReserver reserver;
         if (!reserver.reserve()) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 "Scan already in progress, use action \"abort\" or \"status\"");
         }
         std::set<CScript> needles;
         std::map<CScript, std::string> descriptors;
         Amount total_in = Amount::zero();
 
         // loop through the scan objects
         for (const UniValue &scanobject :
              request.params[1].get_array().getValues()) {
             FlatSigningProvider provider;
             auto scripts = EvalDescriptorStringOrObject(scanobject, provider);
             for (const auto &script : scripts) {
                 std::string inferred =
                     InferDescriptor(script, provider)->ToString();
                 needles.emplace(script);
                 descriptors.emplace(std::move(script), std::move(inferred));
             }
         }
 
         // Scan the unspent transaction output set for inputs
         UniValue unspents(UniValue::VARR);
         std::vector<CTxOut> input_txos;
         std::map<COutPoint, Coin> coins;
         g_should_abort_scan = false;
         g_scan_progress = 0;
         int64_t count = 0;
         std::unique_ptr<CCoinsViewCursor> pcursor;
         CBlockIndex *tip;
         {
             LOCK(cs_main);
             ::ChainstateActive().ForceFlushStateToDisk();
             pcursor = std::unique_ptr<CCoinsViewCursor>(pcoinsdbview->Cursor());
             CHECK_NONFATAL(pcursor);
             tip = ::ChainActive().Tip();
             CHECK_NONFATAL(tip);
         }
         bool res = FindScriptPubKey(g_scan_progress, g_should_abort_scan, count,
                                     pcursor.get(), needles, coins);
         result.pushKV("success", res);
         result.pushKV("txouts", count);
         result.pushKV("height", tip->nHeight);
         result.pushKV("bestblock", tip->GetBlockHash().GetHex());
 
         for (const auto &it : coins) {
             const COutPoint &outpoint = it.first;
             const Coin &coin = it.second;
             const CTxOut &txo = coin.GetTxOut();
             input_txos.push_back(txo);
             total_in += txo.nValue;
 
             UniValue unspent(UniValue::VOBJ);
             unspent.pushKV("txid", outpoint.GetTxId().GetHex());
             unspent.pushKV("vout", int32_t(outpoint.GetN()));
             unspent.pushKV("scriptPubKey", HexStr(txo.scriptPubKey.begin(),
                                                   txo.scriptPubKey.end()));
             unspent.pushKV("desc", descriptors[txo.scriptPubKey]);
             unspent.pushKV("amount", ValueFromAmount(txo.nValue));
             unspent.pushKV("height", int32_t(coin.GetHeight()));
 
             unspents.push_back(unspent);
         }
         result.pushKV("unspents", unspents);
         result.pushKV("total_amount", ValueFromAmount(total_in));
     } else {
         throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid command");
     }
     return result;
 }
 
 static UniValue getblockfilter(const Config &config,
                                const JSONRPCRequest &request) {
     RPCHelpMan{
         "getblockfilter",
         "Retrieve a BIP 157 content filter for a particular block.\n",
         {
             {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The hash of the block"},
             {"filtertype", RPCArg::Type::STR, /*default*/ "basic",
              "The type name of the filter"},
         },
         RPCResult{"{\n"
                   "  \"filter\" : (string) the hex-encoded filter data\n"
                   "  \"header\" : (string) the hex-encoded filter header\n"
                   "}\n"},
         RPCExamples{HelpExampleCli("getblockfilter",
                                    "\"00000000c937983704a73af28acdec37b049d214a"
                                    "dbda81d7e2a3dd146f6ed09\" \"basic\"")}}
         .Check(request);
 
     const BlockHash block_hash(ParseHashV(request.params[0], "blockhash"));
     std::string filtertype_name = "basic";
     if (!request.params[1].isNull()) {
         filtertype_name = request.params[1].get_str();
     }
 
     BlockFilterType filtertype;
     if (!BlockFilterTypeByName(filtertype_name, filtertype)) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Unknown filtertype");
     }
 
     BlockFilterIndex *index = GetBlockFilterIndex(filtertype);
     if (!index) {
         throw JSONRPCError(RPC_MISC_ERROR,
                            "Index is not enabled for filtertype " +
                                filtertype_name);
     }
 
     const CBlockIndex *block_index;
     bool block_was_connected;
     {
         LOCK(cs_main);
         block_index = LookupBlockIndex(block_hash);
         if (!block_index) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
         block_was_connected = block_index->IsValid(BlockValidity::SCRIPTS);
     }
 
     bool index_ready = index->BlockUntilSyncedToCurrentChain();
 
     BlockFilter filter;
     uint256 filter_header;
     if (!index->LookupFilter(block_index, filter) ||
         !index->LookupFilterHeader(block_index, filter_header)) {
         int err_code;
         std::string errmsg = "Filter not found.";
 
         if (!block_was_connected) {
             err_code = RPC_INVALID_ADDRESS_OR_KEY;
             errmsg += " Block was not connected to active chain.";
         } else if (!index_ready) {
             err_code = RPC_MISC_ERROR;
             errmsg +=
                 " Block filters are still in the process of being indexed.";
         } else {
             err_code = RPC_INTERNAL_ERROR;
             errmsg +=
                 " This error is unexpected and indicates index corruption.";
         }
 
         throw JSONRPCError(err_code, errmsg);
     }
 
     UniValue ret(UniValue::VOBJ);
     ret.pushKV("filter", HexStr(filter.GetEncodedFilter()));
     ret.pushKV("header", filter_header.GetHex());
     return ret;
 }
 
 // clang-format off
 static const CRPCCommand commands[] = {
     //  category            name                      actor (function)        argNames
     //  ------------------- ------------------------  ----------------------  ----------
     { "blockchain",         "getbestblockhash",       getbestblockhash,       {} },
     { "blockchain",         "getblock",               getblock,               {"blockhash","verbosity|verbose"} },
     { "blockchain",         "getblockchaininfo",      getblockchaininfo,      {} },
     { "blockchain",         "getblockcount",          getblockcount,          {} },
     { "blockchain",         "getblockhash",           getblockhash,           {"height"} },
     { "blockchain",         "getblockheader",         getblockheader,         {"blockhash","verbose"} },
     { "blockchain",         "getblockstats",          getblockstats,          {"hash_or_height","stats"} },
     { "blockchain",         "getchaintips",           getchaintips,           {} },
     { "blockchain",         "getchaintxstats",        getchaintxstats,        {"nblocks", "blockhash"} },
     { "blockchain",         "getdifficulty",          getdifficulty,          {} },
     { "blockchain",         "getmempoolancestors",    getmempoolancestors,    {"txid","verbose"} },
     { "blockchain",         "getmempooldescendants",  getmempooldescendants,  {"txid","verbose"} },
     { "blockchain",         "getmempoolentry",        getmempoolentry,        {"txid"} },
     { "blockchain",         "getmempoolinfo",         getmempoolinfo,         {} },
     { "blockchain",         "getrawmempool",          getrawmempool,          {"verbose"} },
     { "blockchain",         "gettxout",               gettxout,               {"txid","n","include_mempool"} },
     { "blockchain",         "gettxoutsetinfo",        gettxoutsetinfo,        {} },
     { "blockchain",         "pruneblockchain",        pruneblockchain,        {"height"} },
     { "blockchain",         "savemempool",            savemempool,            {} },
     { "blockchain",         "verifychain",            verifychain,            {"checklevel","nblocks"} },
     { "blockchain",         "preciousblock",          preciousblock,          {"blockhash"} },
     { "blockchain",         "scantxoutset",           scantxoutset,           {"action", "scanobjects"} },
     { "blockchain",         "getblockfilter",         getblockfilter,         {"blockhash", "filtertype"} },
 
     /* Not shown in help */
     { "hidden",             "getfinalizedblockhash",            getfinalizedblockhash,            {} },
     { "hidden",             "finalizeblock",                    finalizeblock,                    {"blockhash"} },
     { "hidden",             "invalidateblock",                  invalidateblock,                  {"blockhash"} },
     { "hidden",             "parkblock",                        parkblock,                        {"blockhash"} },
     { "hidden",             "reconsiderblock",                  reconsiderblock,                  {"blockhash"} },
     { "hidden",             "syncwithvalidationinterfacequeue", syncwithvalidationinterfacequeue, {} },
     { "hidden",             "unparkblock",                      unparkblock,                      {"blockhash"} },
     { "hidden",             "waitfornewblock",                  waitfornewblock,                  {"timeout"} },
     { "hidden",             "waitforblock",                     waitforblock,                     {"blockhash","timeout"} },
     { "hidden",             "waitforblockheight",               waitforblockheight,               {"height","timeout"} },
 };
 // clang-format on
 
 void RegisterBlockchainRPCCommands(CRPCTable &t) {
     for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) {
         t.appendCommand(commands[vcidx].name, &commands[vcidx]);
     }
 }
 
 NodeContext *g_rpc_node = nullptr;
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index d9b140a7f..a2467a36b 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -1,2008 +1,2009 @@
 // Copyright (c) 2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
+#include <blockdb.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <coins.h>
 #include <config.h>
 #include <consensus/validation.h>
 #include <core_io.h>
 #include <index/txindex.h>
 #include <key_io.h>
 #include <merkleblock.h>
 #include <node/coin.h>
 #include <node/context.h>
 #include <node/psbt.h>
 #include <node/transaction.h>
 #include <primitives/transaction.h>
 #include <psbt.h>
 #include <random.h>
 #include <rpc/blockchain.h>
 #include <rpc/rawtransaction_util.h>
 #include <rpc/server.h>
 #include <rpc/util.h>
 #include <script/script.h>
 #include <script/sign.h>
 #include <script/signingprovider.h>
 #include <script/standard.h>
 #include <txmempool.h>
 #include <uint256.h>
 #include <util/error.h>
 #include <util/moneystr.h>
 #include <util/strencodings.h>
 #include <validation.h>
 #include <validationinterface.h>
 
 #include <cstdint>
 #include <numeric>
 
 #include <univalue.h>
 
 /**
  * High fee for sendrawtransaction and testmempoolaccept.
  * By default, transaction with a fee higher than this will be rejected by the
  * RPCs. This can be overridden with the maxfeerate argument.
  */
 constexpr static Amount DEFAULT_MAX_RAW_TX_FEE{COIN / 10};
 
 static void TxToJSON(const CTransaction &tx, const BlockHash &hashBlock,
                      UniValue &entry) {
     // Call into TxToUniv() in bitcoin-common to decode the transaction hex.
     //
     // Blockchain contextual information (confirmations and blocktime) is not
     // available to code in bitcoin-common, so we query them here and push the
     // data into the returned UniValue.
     TxToUniv(tx, uint256(), entry, true, RPCSerializationFlags());
 
     if (!hashBlock.IsNull()) {
         LOCK(cs_main);
 
         entry.pushKV("blockhash", hashBlock.GetHex());
         CBlockIndex *pindex = LookupBlockIndex(hashBlock);
         if (pindex) {
             if (::ChainActive().Contains(pindex)) {
                 entry.pushKV("confirmations",
                              1 + ::ChainActive().Height() - pindex->nHeight);
                 entry.pushKV("time", pindex->GetBlockTime());
                 entry.pushKV("blocktime", pindex->GetBlockTime());
             } else {
                 entry.pushKV("confirmations", 0);
             }
         }
     }
 }
 
 static UniValue getrawtransaction(const Config &config,
                                   const JSONRPCRequest &request) {
     RPCHelpMan{
         "getrawtransaction",
         "By default this function only works for mempool transactions. When "
         "called with a blockhash\n"
         "argument, getrawtransaction will return the transaction if the "
         "specified block is available and\n"
         "the transaction is found in that block. When called without a "
         "blockhash argument, getrawtransaction\n"
         "will return the transaction if it is in the mempool, or if -txindex "
         "is enabled and the transaction\n"
         "is in a block in the blockchain.\n"
 
         "\nReturn the raw transaction data.\n"
         "\nIf verbose is 'true', returns an Object with information about "
         "'txid'.\n"
         "If verbose is 'false' or omitted, returns a string that is "
         "serialized, hex-encoded data for 'txid'.\n",
         {
             {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The transaction id"},
             {"verbose", RPCArg::Type::BOOL, /* default */ "false",
              "If false, return a string, otherwise return a json object"},
             {"blockhash", RPCArg::Type::STR_HEX,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "The block in which to look for the transaction"},
         },
         {
             RPCResult{"if verbose is not set or set to false",
                       "\"data\"      (string) The serialized, hex-encoded data "
                       "for 'txid'\n"},
             RPCResult{
                 "if verbose is set to true",
                 "{\n"
                 "  \"in_active_chain\": b, (bool) Whether specified block is "
                 "in the active chain or not (only present with explicit "
                 "\"blockhash\" argument)\n"
                 "  \"hex\" : \"data\",       (string) The serialized, "
                 "hex-encoded data for 'txid'\n"
                 "  \"txid\" : \"id\",        (string) The transaction id (same "
                 "as provided)\n"
                 "  \"hash\" : \"id\",        (string) The transaction hash "
                 "(differs from txid for witness transactions)\n"
                 "  \"size\" : n,             (numeric) The serialized "
                 "transaction size\n"
                 "  \"version\" : n,          (numeric) The version\n"
                 "  \"locktime\" : ttt,       (numeric) The lock time\n"
                 "  \"vin\" : [               (array of json objects)\n"
                 "     {\n"
                 "       \"txid\": \"id\",    (string) The transaction id\n"
                 "       \"vout\": n,         (numeric) \n"
                 "       \"scriptSig\": {     (json object) The script\n"
                 "         \"asm\": \"asm\",  (string) asm\n"
                 "         \"hex\": \"hex\"   (string) hex\n"
                 "       },\n"
                 "       \"sequence\": n      (numeric) The script sequence "
                 "number\n"
                 "     }\n"
                 "     ,...\n"
                 "  ],\n"
                 "  \"vout\" : [              (array of json objects)\n"
                 "     {\n"
                 "       \"value\" : x.xxx,            (numeric) The value in " +
                     CURRENCY_UNIT +
                     "\n"
                     "       \"n\" : n,                    (numeric) index\n"
                     "       \"scriptPubKey\" : {          (json object)\n"
                     "         \"asm\" : \"asm\",          (string) the asm\n"
                     "         \"hex\" : \"hex\",          (string) the hex\n"
                     "         \"reqSigs\" : n,            (numeric) The "
                     "required sigs\n"
                     "         \"type\" : \"pubkeyhash\",  (string) The type, "
                     "eg 'pubkeyhash'\n"
                     "         \"addresses\" : [           (json array of "
                     "string)\n"
                     "           \"address\"        (string) bitcoin address\n"
                     "           ,...\n"
                     "         ]\n"
                     "       }\n"
                     "     }\n"
                     "     ,...\n"
                     "  ],\n"
                     "  \"blockhash\" : \"hash\",   (string) the block hash\n"
                     "  \"confirmations\" : n,      (numeric) The "
                     "confirmations\n"
                     "  \"blocktime\" : ttt         (numeric) The block time "
                     "expressed in " +
                     UNIX_EPOCH_TIME +
                     "\n"
                     "  \"time\" : ttt,             (numeric) Same as "
                     "\"blocktime\"\n"
                     "}\n"},
         },
         RPCExamples{HelpExampleCli("getrawtransaction", "\"mytxid\"") +
                     HelpExampleCli("getrawtransaction", "\"mytxid\" true") +
                     HelpExampleRpc("getrawtransaction", "\"mytxid\", true") +
                     HelpExampleCli("getrawtransaction",
                                    "\"mytxid\" false \"myblockhash\"") +
                     HelpExampleCli("getrawtransaction",
                                    "\"mytxid\" true \"myblockhash\"")},
     }
         .Check(request);
 
     bool in_active_chain = true;
     TxId txid = TxId(ParseHashV(request.params[0], "parameter 1"));
     CBlockIndex *blockindex = nullptr;
 
     const CChainParams &params = config.GetChainParams();
     if (txid == params.GenesisBlock().hashMerkleRoot) {
         // Special exception for the genesis block coinbase transaction
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            "The genesis block coinbase is not considered an "
                            "ordinary transaction and cannot be retrieved");
     }
 
     // Accept either a bool (true) or a num (>=1) to indicate verbose output.
     bool fVerbose = false;
     if (!request.params[1].isNull()) {
         fVerbose = request.params[1].isNum()
                        ? (request.params[1].get_int() != 0)
                        : request.params[1].get_bool();
     }
 
     if (!request.params[2].isNull()) {
         LOCK(cs_main);
 
         BlockHash blockhash(ParseHashV(request.params[2], "parameter 3"));
         blockindex = LookupBlockIndex(blockhash);
         if (!blockindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                "Block hash not found");
         }
         in_active_chain = ::ChainActive().Contains(blockindex);
     }
 
     bool f_txindex_ready = false;
     if (g_txindex && !blockindex) {
         f_txindex_ready = g_txindex->BlockUntilSyncedToCurrentChain();
     }
 
     CTransactionRef tx;
     BlockHash hash_block;
     if (!GetTransaction(txid, tx, params.GetConsensus(), hash_block,
                         blockindex)) {
         std::string errmsg;
         if (blockindex) {
             if (!blockindex->nStatus.hasData()) {
                 throw JSONRPCError(RPC_MISC_ERROR, "Block not available");
             }
             errmsg = "No such transaction found in the provided block";
         } else if (!g_txindex) {
             errmsg = "No such mempool transaction. Use -txindex to enable "
                      "blockchain transaction queries";
         } else if (!f_txindex_ready) {
             errmsg = "No such mempool transaction. Blockchain transactions are "
                      "still in the process of being indexed";
         } else {
             errmsg = "No such mempool or blockchain transaction";
         }
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            errmsg +
                                ". Use gettransaction for wallet transactions.");
     }
 
     if (!fVerbose) {
         return EncodeHexTx(*tx, RPCSerializationFlags());
     }
 
     UniValue result(UniValue::VOBJ);
     if (blockindex) {
         result.pushKV("in_active_chain", in_active_chain);
     }
     TxToJSON(*tx, hash_block, result);
     return result;
 }
 
 static UniValue gettxoutproof(const Config &config,
                               const JSONRPCRequest &request) {
     RPCHelpMan{
         "gettxoutproof",
         "Returns a hex-encoded proof that \"txid\" was included in a block.\n"
         "\nNOTE: By default this function only works sometimes. "
         "This is when there is an\n"
         "unspent output in the utxo for this transaction. To make it always "
         "work,\n"
         "you need to maintain a transaction index, using the -txindex command "
         "line option or\n"
         "specify the block in which the transaction is included manually (by "
         "blockhash).\n",
         {
             {
                 "txids",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "A json array of txids to filter",
                 {
                     {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
                      "A transaction hash"},
                 },
             },
             {"blockhash", RPCArg::Type::STR_HEX,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "If specified, looks for txid in the block with this hash"},
         },
         RPCResult{"\"data\"           (string) A string that is a serialized, "
                   "hex-encoded data for the proof.\n"},
         RPCExamples{""},
     }
         .Check(request);
 
     std::set<TxId> setTxIds;
     TxId oneTxId;
     UniValue txids = request.params[0].get_array();
     for (unsigned int idx = 0; idx < txids.size(); idx++) {
         const UniValue &utxid = txids[idx];
         TxId txid(ParseHashV(utxid, "txid"));
         if (setTxIds.count(txid)) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 std::string("Invalid parameter, duplicated txid: ") +
                     utxid.get_str());
         }
 
         setTxIds.insert(txid);
         oneTxId = txid;
     }
 
     CBlockIndex *pblockindex = nullptr;
 
     BlockHash hashBlock;
     if (!request.params[1].isNull()) {
         LOCK(cs_main);
         hashBlock = BlockHash(ParseHashV(request.params[1], "blockhash"));
         pblockindex = LookupBlockIndex(hashBlock);
         if (!pblockindex) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
     } else {
         LOCK(cs_main);
         // Loop through txids and try to find which block they're in. Exit loop
         // once a block is found.
         for (const auto &txid : setTxIds) {
             const Coin &coin = AccessByTxid(*pcoinsTip, txid);
             if (!coin.IsSpent()) {
                 pblockindex = ::ChainActive()[coin.GetHeight()];
                 break;
             }
         }
     }
 
     // Allow txindex to catch up if we need to query it and before we acquire
     // cs_main.
     if (g_txindex && !pblockindex) {
         g_txindex->BlockUntilSyncedToCurrentChain();
     }
 
     const Consensus::Params &params = config.GetChainParams().GetConsensus();
 
     LOCK(cs_main);
 
     if (pblockindex == nullptr) {
         CTransactionRef tx;
         if (!GetTransaction(oneTxId, tx, params, hashBlock) ||
             hashBlock.IsNull()) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                "Transaction not yet in block");
         }
 
         pblockindex = LookupBlockIndex(hashBlock);
         if (!pblockindex) {
             throw JSONRPCError(RPC_INTERNAL_ERROR, "Transaction index corrupt");
         }
     }
 
     CBlock block;
     if (!ReadBlockFromDisk(block, pblockindex, params)) {
         throw JSONRPCError(RPC_INTERNAL_ERROR, "Can't read block from disk");
     }
 
     unsigned int ntxFound = 0;
     for (const auto &tx : block.vtx) {
         if (setTxIds.count(tx->GetId())) {
             ntxFound++;
         }
     }
 
     if (ntxFound != setTxIds.size()) {
         throw JSONRPCError(
             RPC_INVALID_ADDRESS_OR_KEY,
             "Not all transactions found in specified or retrieved block");
     }
 
     CDataStream ssMB(SER_NETWORK, PROTOCOL_VERSION);
     CMerkleBlock mb(block, setTxIds);
     ssMB << mb;
     std::string strHex = HexStr(ssMB.begin(), ssMB.end());
     return strHex;
 }
 
 static UniValue verifytxoutproof(const Config &config,
                                  const JSONRPCRequest &request) {
     RPCHelpMan{
         "verifytxoutproof",
         "Verifies that a proof points to a transaction in a block, returning "
         "the transaction it commits to\n"
         "and throwing an RPC error if the block is not in our best chain\n",
         {
             {"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The hex-encoded proof generated by gettxoutproof"},
         },
         RPCResult{
             "[\"txid\"]      (array, strings) The txid(s) which the proof "
             "commits to, or empty array if the proof can not be validated.\n"},
         RPCExamples{""},
     }
         .Check(request);
 
     CDataStream ssMB(ParseHexV(request.params[0], "proof"), SER_NETWORK,
                      PROTOCOL_VERSION);
     CMerkleBlock merkleBlock;
     ssMB >> merkleBlock;
 
     UniValue res(UniValue::VARR);
 
     std::vector<uint256> vMatch;
     std::vector<size_t> vIndex;
     if (merkleBlock.txn.ExtractMatches(vMatch, vIndex) !=
         merkleBlock.header.hashMerkleRoot) {
         return res;
     }
 
     LOCK(cs_main);
 
     const CBlockIndex *pindex = LookupBlockIndex(merkleBlock.header.GetHash());
     if (!pindex || !::ChainActive().Contains(pindex) || pindex->nTx == 0) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            "Block not found in chain");
     }
 
     // Check if proof is valid, only add results if so
     if (pindex->nTx == merkleBlock.txn.GetNumTransactions()) {
         for (const uint256 &hash : vMatch) {
             res.push_back(hash.GetHex());
         }
     }
 
     return res;
 }
 
 static UniValue createrawtransaction(const Config &config,
                                      const JSONRPCRequest &request) {
     RPCHelpMan{
         "createrawtransaction",
         "Create a transaction spending the given inputs and creating new "
         "outputs.\n"
         "Outputs can be addresses or data.\n"
         "Returns hex-encoded raw transaction.\n"
         "Note that the transaction's inputs are not signed, and\n"
         "it is not stored in the wallet or transmitted to the network.\n",
         {
             {
                 "inputs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "A json array of json objects",
                 {
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"txid", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO, "The transaction id"},
                             {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO,
                              "The output number"},
                             {"sequence", RPCArg::Type::NUM, /* default */
                              "depends on the value of the 'locktime' argument",
                              "The sequence number"},
                         },
                     },
                 },
             },
             {
                 "outputs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "a json array with outputs (key-value pairs), where none of "
                 "the keys are duplicated.\n"
                 "That is, each address can only appear once and there can only "
                 "be one 'data' object.\n"
                 "For compatibility reasons, a dictionary, which holds the "
                 "key-value pairs directly, is also\n"
                 "                             accepted as second parameter.",
                 {
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"address", RPCArg::Type::AMOUNT,
                              RPCArg::Optional::NO,
                              "A key-value pair. The key (string) is the "
                              "bitcoin address, the value (float or string) is "
                              "the amount in " +
                                  CURRENCY_UNIT},
                         },
                     },
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"data", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO,
                              "A key-value pair. The key must be \"data\", the "
                              "value is hex-encoded data"},
                         },
                     },
                 },
             },
             {"locktime", RPCArg::Type::NUM, /* default */ "0",
              "Raw locktime. Non-0 value also locktime-activates inputs"},
         },
         RPCResult{"\"transaction\"              (string) hex string of the "
                   "transaction\n"},
         RPCExamples{
             HelpExampleCli("createrawtransaction",
                            "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]"
                            "\" \"[{\\\"address\\\":0.01}]\"") +
             HelpExampleCli("createrawtransaction",
                            "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]"
                            "\" \"[{\\\"data\\\":\\\"00010203\\\"}]\"") +
             HelpExampleRpc("createrawtransaction",
                            "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]"
                            "\", \"[{\\\"address\\\":0.01}]\"") +
             HelpExampleRpc("createrawtransaction",
                            "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]"
                            "\", \"[{\\\"data\\\":\\\"00010203\\\"}]\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params,
                  {UniValue::VARR,
                   UniValueType(), // ARR or OBJ, checked later
                   UniValue::VNUM},
                  true);
 
     CMutableTransaction rawTx =
         ConstructTransaction(config.GetChainParams(), request.params[0],
                              request.params[1], request.params[2]);
 
     return EncodeHexTx(CTransaction(rawTx));
 }
 
 static UniValue decoderawtransaction(const Config &config,
                                      const JSONRPCRequest &request) {
     RPCHelpMan{
         "decoderawtransaction",
         "Return a JSON object representing the serialized, hex-encoded "
         "transaction.\n",
         {
             {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The transaction hex string"},
         },
         RPCResult{
             "{\n"
             "  \"txid\" : \"id\",        (string) The transaction id\n"
             "  \"hash\" : \"id\",        (string) The transaction hash "
             "(differs from txid for witness transactions)\n"
             "  \"size\" : n,             (numeric) The transaction size\n"
             "  \"version\" : n,          (numeric) The version\n"
             "  \"locktime\" : ttt,       (numeric) The lock time\n"
             "  \"vin\" : [               (array of json objects)\n"
             "     {\n"
             "       \"txid\": \"id\",    (string) The transaction id\n"
             "       \"vout\": n,         (numeric) The output number\n"
             "       \"scriptSig\": {     (json object) The script\n"
             "         \"asm\": \"asm\",  (string) asm\n"
             "         \"hex\": \"hex\"   (string) hex\n"
             "       },\n"
             "       \"sequence\": n     (numeric) The script sequence number\n"
             "     }\n"
             "     ,...\n"
             "  ],\n"
             "  \"vout\" : [             (array of json objects)\n"
             "     {\n"
             "       \"value\" : x.xxx,            (numeric) The value in " +
             CURRENCY_UNIT +
             "\n"
             "       \"n\" : n,                    (numeric) index\n"
             "       \"scriptPubKey\" : {          (json object)\n"
             "         \"asm\" : \"asm\",          (string) the asm\n"
             "         \"hex\" : \"hex\",          (string) the hex\n"
             "         \"reqSigs\" : n,            (numeric) The required sigs\n"
             "         \"type\" : \"pubkeyhash\",  (string) The type, eg "
             "'pubkeyhash'\n"
             "         \"addresses\" : [           (json array of string)\n"
             "           \"12tvKAXCxZjSmdNbao16dKXC8tRWfcF5oc\"   (string) "
             "bitcoin address\n"
             "           ,...\n"
             "         ]\n"
             "       }\n"
             "     }\n"
             "     ,...\n"
             "  ],\n"
             "}\n"},
         RPCExamples{HelpExampleCli("decoderawtransaction", "\"hexstring\"") +
                     HelpExampleRpc("decoderawtransaction", "\"hexstring\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VSTR});
 
     CMutableTransaction mtx;
 
     if (!DecodeHexTx(mtx, request.params[0].get_str())) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
     }
 
     UniValue result(UniValue::VOBJ);
     TxToUniv(CTransaction(std::move(mtx)), uint256(), result, false);
 
     return result;
 }
 
 static UniValue decodescript(const Config &config,
                              const JSONRPCRequest &request) {
     RPCHelpMan{
         "decodescript",
         "Decode a hex-encoded script.\n",
         {
             {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "the hex-encoded script"},
         },
         RPCResult{"{\n"
                   "  \"asm\":\"asm\",   (string) Script public key\n"
                   "  \"hex\":\"hex\",   (string) hex-encoded public key\n"
                   "  \"type\":\"type\", (string) The output type\n"
                   "  \"reqSigs\": n,    (numeric) The required signatures\n"
                   "  \"addresses\": [   (json array of string)\n"
                   "     \"address\"     (string) bitcoin address\n"
                   "     ,...\n"
                   "  ],\n"
                   "  \"p2sh\",\"address\" (string) address of P2SH script "
                   "wrapping this redeem script (not returned if the script is "
                   "already a P2SH).\n"
                   "}\n"},
         RPCExamples{HelpExampleCli("decodescript", "\"hexstring\"") +
                     HelpExampleRpc("decodescript", "\"hexstring\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VSTR});
 
     UniValue r(UniValue::VOBJ);
     CScript script;
     if (request.params[0].get_str().size() > 0) {
         std::vector<uint8_t> scriptData(
             ParseHexV(request.params[0], "argument"));
         script = CScript(scriptData.begin(), scriptData.end());
     } else {
         // Empty scripts are valid.
     }
 
     ScriptPubKeyToUniv(script, r, false);
 
     UniValue type;
     type = find_value(r, "type");
 
     if (type.isStr() && type.get_str() != "scripthash") {
         // P2SH cannot be wrapped in a P2SH. If this script is already a P2SH,
         // don't return the address for a P2SH of the P2SH.
         r.pushKV("p2sh", EncodeDestination(ScriptHash(script), config));
     }
 
     return r;
 }
 
 static UniValue combinerawtransaction(const Config &config,
                                       const JSONRPCRequest &request) {
     RPCHelpMan{
         "combinerawtransaction",
         "Combine multiple partially signed transactions into one "
         "transaction.\n"
         "The combined transaction may be another partially signed transaction "
         "or a \n"
         "fully signed transaction.",
         {
             {
                 "txs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "A json array of hex strings of partially signed "
                 "transactions",
                 {
                     {"hexstring", RPCArg::Type::STR_HEX,
                      RPCArg::Optional::OMITTED, "A transaction hash"},
                 },
             },
         },
         RPCResult{"\"hex\"            (string) The hex-encoded raw transaction "
                   "with signature(s)\n"},
         RPCExamples{HelpExampleCli("combinerawtransaction",
                                    "[\"myhex1\", \"myhex2\", \"myhex3\"]")},
     }
         .Check(request);
 
     UniValue txs = request.params[0].get_array();
     std::vector<CMutableTransaction> txVariants(txs.size());
 
     for (unsigned int idx = 0; idx < txs.size(); idx++) {
         if (!DecodeHexTx(txVariants[idx], txs[idx].get_str())) {
             throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
                                strprintf("TX decode failed for tx %d", idx));
         }
     }
 
     if (txVariants.empty()) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Missing transactions");
     }
 
     // mergedTx will end up with all the signatures; it
     // starts as a clone of the rawtx:
     CMutableTransaction mergedTx(txVariants[0]);
 
     // Fetch previous transactions (inputs):
     CCoinsView viewDummy;
     CCoinsViewCache view(&viewDummy);
     {
         LOCK(cs_main);
         LOCK(g_mempool.cs);
         CCoinsViewCache &viewChain = *pcoinsTip;
         CCoinsViewMemPool viewMempool(&viewChain, g_mempool);
         // temporarily switch cache backend to db+mempool view
         view.SetBackend(viewMempool);
 
         for (const CTxIn &txin : mergedTx.vin) {
             // Load entries from viewChain into view; can fail.
             view.AccessCoin(txin.prevout);
         }
 
         // switch back to avoid locking mempool for too long
         view.SetBackend(viewDummy);
     }
 
     // Use CTransaction for the constant parts of the
     // transaction to avoid rehashing.
     const CTransaction txConst(mergedTx);
     // Sign what we can:
     for (size_t i = 0; i < mergedTx.vin.size(); i++) {
         CTxIn &txin = mergedTx.vin[i];
         const Coin &coin = view.AccessCoin(txin.prevout);
         if (coin.IsSpent()) {
             throw JSONRPCError(RPC_VERIFY_ERROR,
                                "Input not found or already spent");
         }
         SignatureData sigdata;
 
         const CTxOut &txout = coin.GetTxOut();
 
         // ... and merge in other signatures:
         for (const CMutableTransaction &txv : txVariants) {
             if (txv.vin.size() > i) {
                 sigdata.MergeSignatureData(DataFromTransaction(txv, i, txout));
             }
         }
         ProduceSignature(
             DUMMY_SIGNING_PROVIDER,
             MutableTransactionSignatureCreator(&mergedTx, i, txout.nValue),
             txout.scriptPubKey, sigdata);
 
         UpdateInput(txin, sigdata);
     }
 
     return EncodeHexTx(CTransaction(mergedTx));
 }
 
 static UniValue signrawtransactionwithkey(const Config &config,
                                           const JSONRPCRequest &request) {
     RPCHelpMan{
         "signrawtransactionwithkey",
         "Sign inputs for raw transaction (serialized, hex-encoded).\n"
         "The second argument is an array of base58-encoded private\n"
         "keys that will be the only keys used to sign the transaction.\n"
         "The third optional argument (may be null) is an array of previous "
         "transaction outputs that\n"
         "this transaction depends on but may not yet be in the block chain.\n",
         {
             {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The transaction hex string"},
             {
                 "privkeys",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "A json array of base58-encoded private keys for signing",
                 {
                     {"privatekey", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
                      "private key in base58-encoding"},
                 },
             },
             {
                 "prevtxs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::OMITTED_NAMED_ARG,
                 "A json array of previous dependent transaction outputs",
                 {
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"txid", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO, "The transaction id"},
                             {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO,
                              "The output number"},
                             {"scriptPubKey", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO, "script key"},
                             {"redeemScript", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::OMITTED,
                              "(required for P2SH) redeem script"},
                             {"amount", RPCArg::Type::AMOUNT,
                              RPCArg::Optional::NO, "The amount spent"},
                         },
                     },
                 },
             },
             {"sighashtype", RPCArg::Type::STR, /* default */ "ALL|FORKID",
              "The signature hash type. Must be one of:\n"
              "       \"ALL|FORKID\"\n"
              "       \"NONE|FORKID\"\n"
              "       \"SINGLE|FORKID\"\n"
              "       \"ALL|FORKID|ANYONECANPAY\"\n"
              "       \"NONE|FORKID|ANYONECANPAY\"\n"
              "       \"SINGLE|FORKID|ANYONECANPAY\""},
         },
         RPCResult{
             "{\n"
             "  \"hex\" : \"value\",         (string) The hex-encoded raw "
             "transaction with signature(s)\n"
             "  \"complete\" : true|false,   (boolean) If the transaction has a "
             "complete set of signatures\n"
             "  \"errors\" : [               (json array of objects) Script "
             "verification errors (if there are any)\n"
             "    {\n"
             "      \"txid\" : \"hash\",     (string) The hash of the "
             "referenced, previous transaction\n"
             "      \"vout\" : n,            (numeric) The index of the output "
             "to spent and used as input\n"
             "      \"scriptSig\" : \"hex\", (string) The hex-encoded signature "
             "script\n"
             "      \"sequence\" : n,        (numeric) Script sequence number\n"
             "      \"error\" : \"text\"     (string) Verification or signing "
             "error related to the input\n"
             "    }\n"
             "    ,...\n"
             "  ]\n"
             "}\n"},
         RPCExamples{
             HelpExampleCli("signrawtransactionwithkey",
                            "\"myhex\" \"[\\\"key1\\\",\\\"key2\\\"]\"") +
             HelpExampleRpc("signrawtransactionwithkey",
                            "\"myhex\", \"[\\\"key1\\\",\\\"key2\\\"]\"")},
     }
         .Check(request);
 
     RPCTypeCheck(
         request.params,
         {UniValue::VSTR, UniValue::VARR, UniValue::VARR, UniValue::VSTR}, true);
 
     CMutableTransaction mtx;
     if (!DecodeHexTx(mtx, request.params[0].get_str())) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
     }
 
     FillableSigningProvider keystore;
     const UniValue &keys = request.params[1].get_array();
     for (size_t idx = 0; idx < keys.size(); ++idx) {
         UniValue k = keys[idx];
         CKey key = DecodeSecret(k.get_str());
         if (!key.IsValid()) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                "Invalid private key");
         }
         keystore.AddKey(key);
     }
 
     // Fetch previous transactions (inputs):
     std::map<COutPoint, Coin> coins;
     for (const CTxIn &txin : mtx.vin) {
         // Create empty map entry keyed by prevout.
         coins[txin.prevout];
     }
     FindCoins(coins);
 
     // Parse the prevtxs array
     ParsePrevouts(request.params[2], &keystore, coins);
 
     UniValue result(UniValue::VOBJ);
     SignTransaction(mtx, &keystore, coins, request.params[3], result);
     return result;
 }
 
 static UniValue sendrawtransaction(const Config &config,
                                    const JSONRPCRequest &request) {
     RPCHelpMan{
         "sendrawtransaction",
         "Submits raw transaction (serialized, hex-encoded) to local node and "
         "network.\n"
         "\nAlso see createrawtransaction and "
         "signrawtransactionwithkey calls.\n",
         {
             {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The hex string of the raw transaction"},
             {"maxfeerate", RPCArg::Type::AMOUNT,
              /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE),
              "Reject transactions whose fee rate is higher than the specified "
              "value, expressed in " +
                  CURRENCY_UNIT + "/kB\n"},
         },
         RPCResult{"\"hex\"             (string) The transaction hash in hex\n"},
         RPCExamples{
             "\nCreate a transaction\n" +
             HelpExampleCli(
                 "createrawtransaction",
                 "\"[{\\\"txid\\\" : \\\"mytxid\\\",\\\"vout\\\":0}]\" "
                 "\"{\\\"myaddress\\\":0.01}\"") +
             "Sign the transaction, and get back the hex\n" +
             HelpExampleCli("signrawtransactionwithwallet", "\"myhex\"") +
             "\nSend the transaction (signed hex)\n" +
             HelpExampleCli("sendrawtransaction", "\"signedhex\"") +
             "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("sendrawtransaction", "\"signedhex\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {
                                      UniValue::VSTR,
                                      // NUM or BOOL, checked later
                                      UniValueType(),
                                  });
 
     // parse hex string from parameter
     CMutableTransaction mtx;
     if (!DecodeHexTx(mtx, request.params[0].get_str())) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
     }
 
     CTransactionRef tx(MakeTransactionRef(std::move(mtx)));
 
     Amount max_raw_tx_fee = DEFAULT_MAX_RAW_TX_FEE;
     // TODO: temporary migration code for old clients. Remove in v0.22
     if (request.params[1].isBool()) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "Second argument must be numeric (maxfeerate) and "
                            "no longer supports a boolean. To allow a "
                            "transaction with high fees, set maxfeerate to 0.");
     } else if (!request.params[1].isNull()) {
         size_t sz = tx->GetTotalSize();
         CFeeRate fr(AmountFromValue(request.params[1]));
         max_raw_tx_fee = fr.GetFee(sz);
     }
     std::string err_string;
     AssertLockNotHeld(cs_main);
     const TransactionError err = BroadcastTransaction(
         *g_rpc_node, config, tx, err_string, max_raw_tx_fee, /*relay*/ true,
         /*wait_callback*/ true);
     if (err != TransactionError::OK) {
         throw JSONRPCTransactionError(err, err_string);
     }
 
     return tx->GetHash().GetHex();
 }
 
 static UniValue testmempoolaccept(const Config &config,
                                   const JSONRPCRequest &request) {
     RPCHelpMan{
         "testmempoolaccept",
         "Returns if raw transaction (serialized, hex-encoded) would be "
         "accepted by mempool.\n"
         "\nThis checks if the transaction violates the consensus or policy "
         "rules.\n"
         "\nSee sendrawtransaction call.\n",
         {
             {
                 "rawtxs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "An array of hex strings of raw transactions.\n"
                 "                             Length must be one for now.",
                 {
                     {"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
                      ""},
                 },
             },
             {"maxfeerate", RPCArg::Type::AMOUNT,
              /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE),
              "Reject transactions whose fee rate is higher than the specified "
              "value, expressed in " +
                  CURRENCY_UNIT + "/kB\n"},
         },
         RPCResult{
             "[                   (array) The result of the mempool acceptance "
             "test for each raw transaction in the input array.\n"
             "                            Length is exactly one for now.\n"
             " {\n"
             "  \"txid\"          (string) The transaction hash in hex\n"
             "  \"allowed\"       (boolean) If the mempool allows this tx to be "
             "inserted\n"
             "  \"reject-reason\" (string) Rejection string (only present when "
             "'allowed' is false)\n"
             " }\n"
             "]\n"},
         RPCExamples{
             "\nCreate a transaction\n" +
             HelpExampleCli(
                 "createrawtransaction",
                 "\"[{\\\"txid\\\" : \\\"mytxid\\\",\\\"vout\\\":0}]\" "
                 "\"{\\\"myaddress\\\":0.01}\"") +
             "Sign the transaction, and get back the hex\n" +
             HelpExampleCli("signrawtransactionwithwallet", "\"myhex\"") +
             "\nTest acceptance of the transaction (signed hex)\n" +
             HelpExampleCli("testmempoolaccept", "[\"signedhex\"]") +
             "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("testmempoolaccept", "[\"signedhex\"]")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {
                                      UniValue::VARR,
                                      // NUM or BOOL, checked later
                                      UniValueType(),
                                  });
 
     if (request.params[0].get_array().size() != 1) {
         throw JSONRPCError(
             RPC_INVALID_PARAMETER,
             "Array must contain exactly one raw transaction for now");
     }
 
     CMutableTransaction mtx;
     if (!DecodeHexTx(mtx, request.params[0].get_array()[0].get_str())) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
     }
     CTransactionRef tx(MakeTransactionRef(std::move(mtx)));
     const TxId &txid = tx->GetId();
 
     Amount max_raw_tx_fee = DEFAULT_MAX_RAW_TX_FEE;
     // TODO: temporary migration code for old clients. Remove in v0.20
     if (request.params[1].isBool()) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "Second argument must be numeric (maxfeerate) and "
                            "no longer supports a boolean. To allow a "
                            "transaction with high fees, set maxfeerate to 0.");
     } else if (!request.params[1].isNull()) {
         size_t sz = tx->GetTotalSize();
         CFeeRate fr(AmountFromValue(request.params[1]));
         max_raw_tx_fee = fr.GetFee(sz);
     }
 
     UniValue result(UniValue::VARR);
     UniValue result_0(UniValue::VOBJ);
     result_0.pushKV("txid", txid.GetHex());
 
     TxValidationState state;
     bool test_accept_res;
     {
         LOCK(cs_main);
         test_accept_res = AcceptToMemoryPool(
             config, g_mempool, state, std::move(tx), false /* bypass_limits */,
             max_raw_tx_fee, true /* test_accept */);
     }
     result_0.pushKV("allowed", test_accept_res);
     if (!test_accept_res) {
         if (state.IsInvalid()) {
             if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
                 result_0.pushKV("reject-reason", "missing-inputs");
             } else {
                 result_0.pushKV("reject-reason",
                                 strprintf("%i: %s", state.GetRejectCode(),
                                           state.GetRejectReason()));
             }
         } else {
             result_0.pushKV("reject-reason", state.GetRejectReason());
         }
     }
 
     result.push_back(std::move(result_0));
     return result;
 }
 
 static std::string WriteHDKeypath(std::vector<uint32_t> &keypath) {
     std::string keypath_str = "m";
     for (uint32_t num : keypath) {
         keypath_str += "/";
         bool hardened = false;
         if (num & 0x80000000) {
             hardened = true;
             num &= ~0x80000000;
         }
 
         keypath_str += std::to_string(num);
         if (hardened) {
             keypath_str += "'";
         }
     }
     return keypath_str;
 }
 
 static UniValue decodepsbt(const Config &config,
                            const JSONRPCRequest &request) {
     RPCHelpMan{
         "decodepsbt",
         "Return a JSON object representing the serialized, base64-encoded "
         "partially signed Bitcoin transaction.\n",
         {
             {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The PSBT base64 string"},
         },
         RPCResult{
             "{\n"
             "  \"tx\" : {                   (json object) The decoded "
             "network-serialized unsigned transaction.\n"
             "    ...                                      The layout is the "
             "same as the output of decoderawtransaction.\n"
             "  },\n"
             "  \"unknown\" : {                (json object) The unknown global "
             "fields\n"
             "    \"key\" : \"value\"            (key-value pair) An unknown "
             "key-value pair\n"
             "     ...\n"
             "  },\n"
             "  \"inputs\" : [                 (array of json objects)\n"
             "    {\n"
             "      \"utxo\" : {            (json object, optional) Transaction "
             "output for UTXOs\n"
             "        \"amount\" : x.xxx,           (numeric) The value in " +
             CURRENCY_UNIT +
             "\n"
             "        \"scriptPubKey\" : {          (json object)\n"
             "          \"asm\" : \"asm\",            (string) The asm\n"
             "          \"hex\" : \"hex\",            (string) The hex\n"
             "          \"type\" : \"pubkeyhash\",    (string) The type, eg "
             "'pubkeyhash'\n"
             "          \"address\" : \"address\"     (string) Bitcoin address "
             "if there is one\n"
             "        }\n"
             "      },\n"
             "      \"partial_signatures\" : {             (json object, "
             "optional)\n"
             "        \"pubkey\" : \"signature\",           (string) The public "
             "key and signature that corresponds to it.\n"
             "        ,...\n"
             "      }\n"
             "      \"sighash\" : \"type\",                  (string, optional) "
             "The sighash type to be used\n"
             "      \"redeem_script\" : {       (json object, optional)\n"
             "          \"asm\" : \"asm\",            (string) The asm\n"
             "          \"hex\" : \"hex\",            (string) The hex\n"
             "          \"type\" : \"pubkeyhash\",    (string) The type, eg "
             "'pubkeyhash'\n"
             "        }\n"
             "      \"bip32_derivs\" : {          (json object, optional)\n"
             "        \"pubkey\" : {                     (json object, "
             "optional) The public key with the derivation path as the value.\n"
             "          \"master_fingerprint\" : \"fingerprint\"     (string) "
             "The fingerprint of the master key\n"
             "          \"path\" : \"path\",                         (string) "
             "The path\n"
             "        }\n"
             "        ,...\n"
             "      }\n"
             "      \"final_scriptsig\" : {       (json object, optional)\n"
             "          \"asm\" : \"asm\",            (string) The asm\n"
             "          \"hex\" : \"hex\",            (string) The hex\n"
             "        }\n"
             "      \"unknown\" : {                (json object) The unknown "
             "global fields\n"
             "        \"key\" : \"value\"            (key-value pair) An "
             "unknown key-value pair\n"
             "         ...\n"
             "      },\n"
             "    }\n"
             "    ,...\n"
             "  ]\n"
             "  \"outputs\" : [                 (array of json objects)\n"
             "    {\n"
             "      \"redeem_script\" : {       (json object, optional)\n"
             "          \"asm\" : \"asm\",            (string) The asm\n"
             "          \"hex\" : \"hex\",            (string) The hex\n"
             "          \"type\" : \"pubkeyhash\",    (string) The type, eg "
             "'pubkeyhash'\n"
             "        }\n"
             "      \"bip32_derivs\" : [          (array of json objects, "
             "optional)\n"
             "        {\n"
             "          \"pubkey\" : \"pubkey\",                     (string) "
             "The public key this path corresponds to\n"
             "          \"master_fingerprint\" : \"fingerprint\"     (string) "
             "The fingerprint of the master key\n"
             "          \"path\" : \"path\",                         (string) "
             "The path\n"
             "          }\n"
             "        }\n"
             "        ,...\n"
             "      ],\n"
             "      \"unknown\" : {                (json object) The unknown "
             "global fields\n"
             "        \"key\" : \"value\"            (key-value pair) An "
             "unknown key-value pair\n"
             "         ...\n"
             "      },\n"
             "    }\n"
             "    ,...\n"
             "  ]\n"
             "  \"fee\" : fee                      (numeric, optional) The "
             "transaction fee paid if all UTXOs slots in the PSBT have been "
             "filled.\n"
             "}\n"},
         RPCExamples{HelpExampleCli("decodepsbt", "\"psbt\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VSTR});
 
     // Unserialize the transactions
     PartiallySignedTransaction psbtx;
     std::string error;
     if (!DecodeBase64PSBT(psbtx, request.params[0].get_str(), error)) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
                            strprintf("TX decode failed %s", error));
     }
 
     UniValue result(UniValue::VOBJ);
 
     // Add the decoded tx
     UniValue tx_univ(UniValue::VOBJ);
     TxToUniv(CTransaction(*psbtx.tx), uint256(), tx_univ, false);
     result.pushKV("tx", tx_univ);
 
     // Unknown data
     if (psbtx.unknown.size() > 0) {
         UniValue unknowns(UniValue::VOBJ);
         for (auto entry : psbtx.unknown) {
             unknowns.pushKV(HexStr(entry.first), HexStr(entry.second));
         }
         result.pushKV("unknown", unknowns);
     }
 
     // inputs
     Amount total_in = Amount::zero();
     bool have_all_utxos = true;
     UniValue inputs(UniValue::VARR);
     for (size_t i = 0; i < psbtx.inputs.size(); ++i) {
         const PSBTInput &input = psbtx.inputs[i];
         UniValue in(UniValue::VOBJ);
         // UTXOs
         if (!input.utxo.IsNull()) {
             const CTxOut &txout = input.utxo;
 
             UniValue out(UniValue::VOBJ);
 
             out.pushKV("amount", ValueFromAmount(txout.nValue));
             total_in += txout.nValue;
 
             UniValue o(UniValue::VOBJ);
             ScriptToUniv(txout.scriptPubKey, o, true);
             out.pushKV("scriptPubKey", o);
             in.pushKV("utxo", out);
         } else {
             have_all_utxos = false;
         }
 
         // Partial sigs
         if (!input.partial_sigs.empty()) {
             UniValue partial_sigs(UniValue::VOBJ);
             for (const auto &sig : input.partial_sigs) {
                 partial_sigs.pushKV(HexStr(sig.second.first),
                                     HexStr(sig.second.second));
             }
             in.pushKV("partial_signatures", partial_sigs);
         }
 
         // Sighash
         uint8_t sighashbyte = input.sighash_type.getRawSigHashType() & 0xff;
         if (sighashbyte > 0) {
             in.pushKV("sighash", SighashToStr(sighashbyte));
         }
 
         // Redeem script
         if (!input.redeem_script.empty()) {
             UniValue r(UniValue::VOBJ);
             ScriptToUniv(input.redeem_script, r, false);
             in.pushKV("redeem_script", r);
         }
 
         // keypaths
         if (!input.hd_keypaths.empty()) {
             UniValue keypaths(UniValue::VARR);
             for (auto entry : input.hd_keypaths) {
                 UniValue keypath(UniValue::VOBJ);
                 keypath.pushKV("pubkey", HexStr(entry.first));
 
                 keypath.pushKV(
                     "master_fingerprint",
                     strprintf("%08x", ReadBE32(entry.second.fingerprint)));
                 keypath.pushKV("path", WriteHDKeypath(entry.second.path));
                 keypaths.push_back(keypath);
             }
             in.pushKV("bip32_derivs", keypaths);
         }
 
         // Final scriptSig
         if (!input.final_script_sig.empty()) {
             UniValue scriptsig(UniValue::VOBJ);
             scriptsig.pushKV("asm",
                              ScriptToAsmStr(input.final_script_sig, true));
             scriptsig.pushKV("hex", HexStr(input.final_script_sig));
             in.pushKV("final_scriptSig", scriptsig);
         }
 
         // Unknown data
         if (input.unknown.size() > 0) {
             UniValue unknowns(UniValue::VOBJ);
             for (auto entry : input.unknown) {
                 unknowns.pushKV(HexStr(entry.first), HexStr(entry.second));
             }
             in.pushKV("unknown", unknowns);
         }
 
         inputs.push_back(in);
     }
     result.pushKV("inputs", inputs);
 
     // outputs
     Amount output_value = Amount::zero();
     UniValue outputs(UniValue::VARR);
     for (size_t i = 0; i < psbtx.outputs.size(); ++i) {
         const PSBTOutput &output = psbtx.outputs[i];
         UniValue out(UniValue::VOBJ);
         // Redeem script
         if (!output.redeem_script.empty()) {
             UniValue r(UniValue::VOBJ);
             ScriptToUniv(output.redeem_script, r, false);
             out.pushKV("redeem_script", r);
         }
 
         // keypaths
         if (!output.hd_keypaths.empty()) {
             UniValue keypaths(UniValue::VARR);
             for (auto entry : output.hd_keypaths) {
                 UniValue keypath(UniValue::VOBJ);
                 keypath.pushKV("pubkey", HexStr(entry.first));
                 keypath.pushKV(
                     "master_fingerprint",
                     strprintf("%08x", ReadBE32(entry.second.fingerprint)));
                 keypath.pushKV("path", WriteHDKeypath(entry.second.path));
                 keypaths.push_back(keypath);
             }
             out.pushKV("bip32_derivs", keypaths);
         }
 
         // Unknown data
         if (output.unknown.size() > 0) {
             UniValue unknowns(UniValue::VOBJ);
             for (auto entry : output.unknown) {
                 unknowns.pushKV(HexStr(entry.first), HexStr(entry.second));
             }
             out.pushKV("unknown", unknowns);
         }
 
         outputs.push_back(out);
 
         // Fee calculation
         output_value += psbtx.tx->vout[i].nValue;
     }
     result.pushKV("outputs", outputs);
     if (have_all_utxos) {
         result.pushKV("fee", ValueFromAmount(total_in - output_value));
     }
 
     return result;
 }
 
 static UniValue combinepsbt(const Config &config,
                             const JSONRPCRequest &request) {
     RPCHelpMan{
         "combinepsbt",
         "Combine multiple partially signed Bitcoin transactions into one "
         "transaction.\n"
         "Implements the Combiner role.\n",
         {
             {
                 "txs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "A json array of base64 strings of partially signed "
                 "transactions",
                 {
                     {"psbt", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
                      "A base64 string of a PSBT"},
                 },
             },
         },
         RPCResult{"  \"psbt\"          (string) The base64-encoded partially "
                   "signed transaction\n"},
         RPCExamples{HelpExampleCli(
             "combinepsbt", "[\"mybase64_1\", \"mybase64_2\", \"mybase64_3\"]")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VARR}, true);
 
     // Unserialize the transactions
     std::vector<PartiallySignedTransaction> psbtxs;
     UniValue txs = request.params[0].get_array();
     if (txs.empty()) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "Parameter 'txs' cannot be empty");
     }
     for (size_t i = 0; i < txs.size(); ++i) {
         PartiallySignedTransaction psbtx;
         std::string error;
         if (!DecodeBase64PSBT(psbtx, txs[i].get_str(), error)) {
             throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
                                strprintf("TX decode failed %s", error));
         }
         psbtxs.push_back(psbtx);
     }
 
     PartiallySignedTransaction merged_psbt;
     const TransactionError error = CombinePSBTs(merged_psbt, psbtxs);
     if (error != TransactionError::OK) {
         throw JSONRPCTransactionError(error);
     }
 
     CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
     ssTx << merged_psbt;
     return EncodeBase64((uint8_t *)ssTx.data(), ssTx.size());
 }
 
 static UniValue finalizepsbt(const Config &config,
                              const JSONRPCRequest &request) {
     RPCHelpMan{
         "finalizepsbt",
         "Finalize the inputs of a PSBT. If the transaction is fully signed, it "
         "will produce a\n"
         "network serialized transaction which can be broadcast with "
         "sendrawtransaction. Otherwise a PSBT will be\n"
         "created which has the final_scriptSigfields filled for inputs that "
         "are complete.\n"
         "Implements the Finalizer and Extractor roles.\n",
         {
             {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO,
              "A base64 string of a PSBT"},
             {"extract", RPCArg::Type::BOOL, /* default */ "true",
              "If true and the transaction is complete,\n"
              "                             extract and return the complete "
              "transaction in normal network serialization instead of the "
              "PSBT."},
         },
         RPCResult{
             "{\n"
             "  \"psbt\" : \"value\",          (string) The base64-encoded "
             "partially signed transaction if not extracted\n"
             "  \"hex\" : \"value\",           (string) The hex-encoded network "
             "transaction if extracted\n"
             "  \"complete\" : true|false,   (boolean) If the transaction has a "
             "complete set of signatures\n"
             "  ]\n"
             "}\n"},
         RPCExamples{HelpExampleCli("finalizepsbt", "\"psbt\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VBOOL}, true);
 
     // Unserialize the transactions
     PartiallySignedTransaction psbtx;
     std::string error;
     if (!DecodeBase64PSBT(psbtx, request.params[0].get_str(), error)) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
                            strprintf("TX decode failed %s", error));
     }
 
     bool extract = request.params[1].isNull() || (!request.params[1].isNull() &&
                                                   request.params[1].get_bool());
 
     CMutableTransaction mtx;
     bool complete = FinalizeAndExtractPSBT(psbtx, mtx);
 
     UniValue result(UniValue::VOBJ);
     CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
     std::string result_str;
 
     if (complete && extract) {
         ssTx << mtx;
         result_str = HexStr(ssTx.str());
         result.pushKV("hex", result_str);
     } else {
         ssTx << psbtx;
         result_str = EncodeBase64(ssTx.str());
         result.pushKV("psbt", result_str);
     }
     result.pushKV("complete", complete);
 
     return result;
 }
 
 static UniValue createpsbt(const Config &config,
                            const JSONRPCRequest &request) {
     RPCHelpMan{
         "createpsbt",
         "Creates a transaction in the Partially Signed Transaction format.\n"
         "Implements the Creator role.\n",
         {
             {
                 "inputs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "A json array of json objects",
                 {
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"txid", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO, "The transaction id"},
                             {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO,
                              "The output number"},
                             {"sequence", RPCArg::Type::NUM, /* default */
                              "depends on the value of the 'locktime' argument",
                              "The sequence number"},
                         },
                     },
                 },
             },
             {
                 "outputs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "a json array with outputs (key-value pairs), where none of "
                 "the keys are duplicated.\n"
                 "That is, each address can only appear once and there can only "
                 "be one 'data' object.\n"
                 "For compatibility reasons, a dictionary, which holds the "
                 "key-value pairs directly, is also\n"
                 "                             accepted as second parameter.",
                 {
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"address", RPCArg::Type::AMOUNT,
                              RPCArg::Optional::NO,
                              "A key-value pair. The key (string) is the "
                              "bitcoin address, the value (float or string) is "
                              "the amount in " +
                                  CURRENCY_UNIT},
                         },
                     },
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"data", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO,
                              "A key-value pair. The key must be \"data\", the "
                              "value is hex-encoded data"},
                         },
                     },
                 },
             },
             {"locktime", RPCArg::Type::NUM, /* default */ "0",
              "Raw locktime. Non-0 value also locktime-activates inputs"},
         },
         RPCResult{"  \"psbt\"        (string)  The resulting raw transaction "
                   "(base64-encoded string)\n"},
         RPCExamples{HelpExampleCli(
             "createpsbt", "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]"
                           "\" \"[{\\\"data\\\":\\\"00010203\\\"}]\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params,
                  {
                      UniValue::VARR,
                      UniValueType(), // ARR or OBJ, checked later
                      UniValue::VNUM,
                  },
                  true);
 
     CMutableTransaction rawTx =
         ConstructTransaction(config.GetChainParams(), request.params[0],
                              request.params[1], request.params[2]);
 
     // Make a blank psbt
     PartiallySignedTransaction psbtx;
     psbtx.tx = rawTx;
     for (size_t i = 0; i < rawTx.vin.size(); ++i) {
         psbtx.inputs.push_back(PSBTInput());
     }
     for (size_t i = 0; i < rawTx.vout.size(); ++i) {
         psbtx.outputs.push_back(PSBTOutput());
     }
 
     // Serialize the PSBT
     CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
     ssTx << psbtx;
 
     return EncodeBase64((uint8_t *)ssTx.data(), ssTx.size());
 }
 
 static UniValue converttopsbt(const Config &config,
                               const JSONRPCRequest &request) {
     RPCHelpMan{
         "converttopsbt",
         "Converts a network serialized transaction to a PSBT. "
         "This should be used only with createrawtransaction and "
         "fundrawtransaction\n"
         "createpsbt and walletcreatefundedpsbt should be used for new "
         "applications.\n",
         {
             {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The hex string of a raw transaction"},
             {"permitsigdata", RPCArg::Type::BOOL, /* default */ "false",
              "If true, any signatures in the input will be discarded and "
              "conversion.\n"
              "                              will continue. If false, RPC will "
              "fail if any signatures are present."},
         },
         RPCResult{"  \"psbt\"        (string)  The resulting raw "
                   "transaction (base64-encoded string)\n"},
         RPCExamples{
             "\nCreate a transaction\n" +
             HelpExampleCli("createrawtransaction",
                            "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]"
                            "\" \"[{\\\"data\\\":\\\"00010203\\\"}]\"") +
             "\nConvert the transaction to a PSBT\n" +
             HelpExampleCli("converttopsbt", "\"rawtransaction\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VBOOL}, true);
 
     // parse hex string from parameter
     CMutableTransaction tx;
     bool permitsigdata =
         request.params[1].isNull() ? false : request.params[1].get_bool();
     if (!DecodeHexTx(tx, request.params[0].get_str())) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
     }
 
     // Remove all scriptSigs from inputs
     for (CTxIn &input : tx.vin) {
         if (!input.scriptSig.empty() && !permitsigdata) {
             throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
                                "Inputs must not have scriptSigs");
         }
         input.scriptSig.clear();
     }
 
     // Make a blank psbt
     PartiallySignedTransaction psbtx;
     psbtx.tx = tx;
     for (size_t i = 0; i < tx.vin.size(); ++i) {
         psbtx.inputs.push_back(PSBTInput());
     }
     for (size_t i = 0; i < tx.vout.size(); ++i) {
         psbtx.outputs.push_back(PSBTOutput());
     }
 
     // Serialize the PSBT
     CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
     ssTx << psbtx;
 
     return EncodeBase64((uint8_t *)ssTx.data(), ssTx.size());
 }
 
 UniValue utxoupdatepsbt(const Config &config, const JSONRPCRequest &request) {
     RPCHelpMan{
         "utxoupdatepsbt",
         "Updates all inputs and outputs in a PSBT with data from output "
         "descriptors, the UTXO set or the mempool.\n",
         {
             {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO,
              "A base64 string of a PSBT"},
             {"descriptors",
              RPCArg::Type::ARR,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "An array of either strings or objects",
              {
                  {"", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
                   "An output descriptor"},
                  {"",
                   RPCArg::Type::OBJ,
                   RPCArg::Optional::OMITTED,
                   "An object with an output descriptor and extra information",
                   {
                       {"desc", RPCArg::Type::STR, RPCArg::Optional::NO,
                        "An output descriptor"},
                       {"range", RPCArg::Type::RANGE, "1000",
                        "Up to what index HD chains should be explored (either "
                        "end or [begin,end])"},
                   }},
              }},
         },
         RPCResult{"  \"psbt\"          (string) The base64-encoded "
                   "partially signed transaction with inputs updated\n"},
         RPCExamples{HelpExampleCli("utxoupdatepsbt", "\"psbt\"")}}
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VARR}, true);
 
     // Unserialize the transactions
     PartiallySignedTransaction psbtx;
     std::string error;
     if (!DecodeBase64PSBT(psbtx, request.params[0].get_str(), error)) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
                            strprintf("TX decode failed %s", error));
     }
 
     // Parse descriptors, if any.
     FlatSigningProvider provider;
     if (!request.params[1].isNull()) {
         auto descs = request.params[1].get_array();
         for (size_t i = 0; i < descs.size(); ++i) {
             EvalDescriptorStringOrObject(descs[i], provider);
         }
     }
     // We don't actually need private keys further on; hide them as a
     // precaution.
     HidingSigningProvider public_provider(&provider, /* nosign */ true,
                                           /* nobip32derivs */ false);
 
     // Fetch previous transactions (inputs):
     CCoinsView viewDummy;
     CCoinsViewCache view(&viewDummy);
     {
         LOCK2(cs_main, g_mempool.cs);
         CCoinsViewCache &viewChain = *pcoinsTip;
         CCoinsViewMemPool viewMempool(&viewChain, g_mempool);
         // temporarily switch cache backend to db+mempool view
         view.SetBackend(viewMempool);
 
         for (const CTxIn &txin : psbtx.tx->vin) {
             // Load entries from viewChain into view; can fail.
             view.AccessCoin(txin.prevout);
         }
 
         // switch back to avoid locking mempool for too long
         view.SetBackend(viewDummy);
     }
 
     // Fill the inputs
     for (size_t i = 0; i < psbtx.tx->vin.size(); ++i) {
         PSBTInput &input = psbtx.inputs.at(i);
 
         if (!input.utxo.IsNull()) {
             continue;
         }
 
         // Update script/keypath information using descriptor data.
         // Note that SignPSBTInput does a lot more than just constructing ECDSA
         // signatures we don't actually care about those here, in fact.
         SignPSBTInput(public_provider, psbtx, i,
                       /* sighash_type */ SigHashType().withForkId());
     }
 
     // Update script/keypath information using descriptor data.
     for (unsigned int i = 0; i < psbtx.tx->vout.size(); ++i) {
         UpdatePSBTOutput(public_provider, psbtx, i);
     }
 
     CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
     ssTx << psbtx;
     return EncodeBase64((uint8_t *)ssTx.data(), ssTx.size());
 }
 
 UniValue joinpsbts(const Config &config, const JSONRPCRequest &request) {
     RPCHelpMan{
         "joinpsbts",
         "Joins multiple distinct PSBTs with different inputs and outputs "
         "into one PSBT with inputs and outputs from all of the PSBTs\n"
         "No input in any of the PSBTs can be in more than one of the PSBTs.\n",
         {{"txs",
           RPCArg::Type::ARR,
           RPCArg::Optional::NO,
           "A json array of base64 strings of partially signed transactions",
           {{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO,
             "A base64 string of a PSBT"}}}},
         RPCResult{"  \"psbt\"          (string) The base64-encoded partially "
                   "signed transaction\n"},
         RPCExamples{HelpExampleCli("joinpsbts", "\"psbt\"")}}
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VARR}, true);
 
     // Unserialize the transactions
     std::vector<PartiallySignedTransaction> psbtxs;
     UniValue txs = request.params[0].get_array();
 
     if (txs.size() <= 1) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "At least two PSBTs are required to join PSBTs.");
     }
 
     int32_t best_version = 1;
     uint32_t best_locktime = 0xffffffff;
     for (size_t i = 0; i < txs.size(); ++i) {
         PartiallySignedTransaction psbtx;
         std::string error;
         if (!DecodeBase64PSBT(psbtx, txs[i].get_str(), error)) {
             throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
                                strprintf("TX decode failed %s", error));
         }
         psbtxs.push_back(psbtx);
         // Choose the highest version number
         if (psbtx.tx->nVersion > best_version) {
             best_version = psbtx.tx->nVersion;
         }
         // Choose the lowest lock time
         if (psbtx.tx->nLockTime < best_locktime) {
             best_locktime = psbtx.tx->nLockTime;
         }
     }
 
     // Create a blank psbt where everything will be added
     PartiallySignedTransaction merged_psbt;
     merged_psbt.tx = CMutableTransaction();
     merged_psbt.tx->nVersion = best_version;
     merged_psbt.tx->nLockTime = best_locktime;
 
     // Merge
     for (auto &psbt : psbtxs) {
         for (size_t i = 0; i < psbt.tx->vin.size(); ++i) {
             if (!merged_psbt.AddInput(psbt.tx->vin[i], psbt.inputs[i])) {
                 throw JSONRPCError(
                     RPC_INVALID_PARAMETER,
                     strprintf(
                         "Input %s:%d exists in multiple PSBTs",
                         psbt.tx->vin[i].prevout.GetTxId().ToString().c_str(),
                         psbt.tx->vin[i].prevout.GetN()));
             }
         }
         for (size_t i = 0; i < psbt.tx->vout.size(); ++i) {
             merged_psbt.AddOutput(psbt.tx->vout[i], psbt.outputs[i]);
         }
         merged_psbt.unknown.insert(psbt.unknown.begin(), psbt.unknown.end());
     }
 
     // Generate list of shuffled indices for shuffling inputs and outputs of the
     // merged PSBT
     std::vector<int> input_indices(merged_psbt.inputs.size());
     std::iota(input_indices.begin(), input_indices.end(), 0);
     std::vector<int> output_indices(merged_psbt.outputs.size());
     std::iota(output_indices.begin(), output_indices.end(), 0);
 
     // Shuffle input and output indicies lists
     Shuffle(input_indices.begin(), input_indices.end(), FastRandomContext());
     Shuffle(output_indices.begin(), output_indices.end(), FastRandomContext());
 
     PartiallySignedTransaction shuffled_psbt;
     shuffled_psbt.tx = CMutableTransaction();
     shuffled_psbt.tx->nVersion = merged_psbt.tx->nVersion;
     shuffled_psbt.tx->nLockTime = merged_psbt.tx->nLockTime;
     for (int i : input_indices) {
         shuffled_psbt.AddInput(merged_psbt.tx->vin[i], merged_psbt.inputs[i]);
     }
     for (int i : output_indices) {
         shuffled_psbt.AddOutput(merged_psbt.tx->vout[i],
                                 merged_psbt.outputs[i]);
     }
     shuffled_psbt.unknown.insert(merged_psbt.unknown.begin(),
                                  merged_psbt.unknown.end());
 
     CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
     ssTx << shuffled_psbt;
     return EncodeBase64((uint8_t *)ssTx.data(), ssTx.size());
 }
 
 UniValue analyzepsbt(const Config &config, const JSONRPCRequest &request) {
     RPCHelpMan{
         "analyzepsbt",
         "Analyzes and provides information about the current status of a "
         "PSBT and its inputs\n",
         {{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO,
           "A base64 string of a PSBT"}},
         RPCResult{
             "{\n"
             "  \"inputs\" : [                      (array of json objects)\n"
             "    {\n"
             "      \"has_utxo\" : true|false     (boolean) Whether a UTXO is "
             "provided\n"
             "      \"is_final\" : true|false     (boolean) Whether the input "
             "is finalized\n"
             "      \"missing\" : {               (json object, optional) "
             "Things that are missing that are required to complete this input\n"
             "        \"pubkeys\" : [             (array), optional\n"
             "          \"keyid\"                 (string) Public key ID, "
             "hash160 of the public key, of a public key whose BIP 32 "
             "derivation path is missing\n"
             "        ]\n"
             "        \"signatures\" : [          (array), optional\n"
             "          \"keyid\"                 (string) Public key ID, "
             "hash160 of the public key, of a public key whose signature is "
             "missing\n"
             "        ]\n"
             "        \"redeemscript\" : \"hash\"   (string, optional) Hash160 "
             "of the redeemScript that is missing\n"
             "      }\n"
             "      \"next\" : \"role\"           (string, optional) Role of "
             "the next person that this input needs to go to\n"
             "    }\n"
             "    ,...\n"
             "  ]\n"
             "  \"estimated_vsize\" : vsize       (numeric, optional) Estimated "
             "vsize of the final signed transaction\n"
             "  \"estimated_feerate\" : feerate   (numeric, optional) Estimated "
             "feerate of the final signed transaction in " +
             CURRENCY_UNIT +
             "/kB. Shown only if all UTXO slots in the PSBT have been filled.\n"
             "  \"fee\" : fee                     (numeric, optional) The "
             "transaction fee paid. Shown only if all UTXO slots in the PSBT "
             "have been filled.\n"
             "  \"next\" : \"role\"                 (string) Role of the next "
             "person that this psbt needs to go to\n"
             "}\n"},
         RPCExamples{HelpExampleCli("analyzepsbt", "\"psbt\"")}}
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VSTR});
 
     // Unserialize the transaction
     PartiallySignedTransaction psbtx;
     std::string error;
     if (!DecodeBase64PSBT(psbtx, request.params[0].get_str(), error)) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
                            strprintf("TX decode failed %s", error));
     }
 
     PSBTAnalysis psbta = AnalyzePSBT(psbtx);
 
     UniValue result(UniValue::VOBJ);
     UniValue inputs_result(UniValue::VARR);
     for (const auto &input : psbta.inputs) {
         UniValue input_univ(UniValue::VOBJ);
         UniValue missing(UniValue::VOBJ);
 
         input_univ.pushKV("has_utxo", input.has_utxo);
         input_univ.pushKV("is_final", input.is_final);
         input_univ.pushKV("next", PSBTRoleName(input.next));
 
         if (!input.missing_pubkeys.empty()) {
             UniValue missing_pubkeys_univ(UniValue::VARR);
             for (const CKeyID &pubkey : input.missing_pubkeys) {
                 missing_pubkeys_univ.push_back(HexStr(pubkey));
             }
             missing.pushKV("pubkeys", missing_pubkeys_univ);
         }
         if (!input.missing_redeem_script.IsNull()) {
             missing.pushKV("redeemscript", HexStr(input.missing_redeem_script));
         }
         if (!input.missing_sigs.empty()) {
             UniValue missing_sigs_univ(UniValue::VARR);
             for (const CKeyID &pubkey : input.missing_sigs) {
                 missing_sigs_univ.push_back(HexStr(pubkey));
             }
             missing.pushKV("signatures", missing_sigs_univ);
         }
         if (!missing.getKeys().empty()) {
             input_univ.pushKV("missing", missing);
         }
         inputs_result.push_back(input_univ);
     }
     result.pushKV("inputs", inputs_result);
 
     if (psbta.estimated_vsize != nullopt) {
         result.pushKV("estimated_vsize", (int)*psbta.estimated_vsize);
     }
     if (psbta.estimated_feerate != nullopt) {
         result.pushKV("estimated_feerate",
                       ValueFromAmount(psbta.estimated_feerate->GetFeePerK()));
     }
     if (psbta.fee != nullopt) {
         result.pushKV("fee", ValueFromAmount(*psbta.fee));
     }
     result.pushKV("next", PSBTRoleName(psbta.next));
 
     return result;
 }
 
 // clang-format off
 static const CRPCCommand commands[] = {
     //  category            name                         actor (function)           argNames
     //  ------------------- ------------------------     ----------------------     ----------
     { "rawtransactions",    "getrawtransaction",         getrawtransaction,         {"txid","verbose","blockhash"} },
     { "rawtransactions",    "createrawtransaction",      createrawtransaction,      {"inputs","outputs","locktime"} },
     { "rawtransactions",    "decoderawtransaction",      decoderawtransaction,      {"hexstring"} },
     { "rawtransactions",    "decodescript",              decodescript,              {"hexstring"} },
     { "rawtransactions",    "sendrawtransaction",        sendrawtransaction,        {"hexstring","allowhighfees|maxfeerate"} },
     { "rawtransactions",    "combinerawtransaction",     combinerawtransaction,     {"txs"} },
     { "rawtransactions",    "signrawtransactionwithkey", signrawtransactionwithkey, {"hexstring","privkeys","prevtxs","sighashtype"} },
     { "rawtransactions",    "testmempoolaccept",         testmempoolaccept,         {"rawtxs","allowhighfees|maxfeerate"} },
     { "rawtransactions",    "decodepsbt",                decodepsbt,                {"psbt"} },
     { "rawtransactions",    "combinepsbt",               combinepsbt,               {"txs"} },
     { "rawtransactions",    "finalizepsbt",              finalizepsbt,              {"psbt", "extract"} },
     { "rawtransactions",    "createpsbt",                createpsbt,                {"inputs","outputs","locktime"} },
     { "rawtransactions",    "converttopsbt",             converttopsbt,             {"hexstring","permitsigdata"} },
     { "rawtransactions",    "utxoupdatepsbt",            utxoupdatepsbt,            {"psbt", "descriptors"} },
     { "rawtransactions",    "joinpsbts",                 joinpsbts,                 {"txs"} },
     { "rawtransactions",    "analyzepsbt",               analyzepsbt,               {"psbt"} },
     { "blockchain",         "gettxoutproof",             gettxoutproof,             {"txids", "blockhash"} },
     { "blockchain",         "verifytxoutproof",          verifytxoutproof,          {"proof"} },
 };
 // clang-format on
 
 void RegisterRawTransactionRPCCommands(CRPCTable &t) {
     for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) {
         t.appendCommand(commands[vcidx].name, &commands[vcidx]);
     }
 }
diff --git a/src/test/util/blockfilter.cpp b/src/test/util/blockfilter.cpp
index 5a652c8f5..720988b6c 100644
--- a/src/test/util/blockfilter.cpp
+++ b/src/test/util/blockfilter.cpp
@@ -1,26 +1,27 @@
 // Copyright (c) 2019 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <test/util/blockfilter.h>
 
+#include <blockdb.h>
 #include <chainparams.h>
 #include <validation.h>
 
 bool ComputeFilter(BlockFilterType filter_type, const CBlockIndex *block_index,
                    BlockFilter &filter) {
     CBlock block;
     if (!ReadBlockFromDisk(block, block_index->GetBlockPos(),
                            Params().GetConsensus())) {
         return false;
     }
 
     CBlockUndo block_undo;
     if (block_index->nHeight > 0 &&
         !UndoReadFromDisk(block_undo, block_index)) {
         return false;
     }
 
     filter = BlockFilter(filter_type, block, block_undo);
     return true;
 }
diff --git a/src/validation.cpp b/src/validation.cpp
index bbaab9875..324aacea3 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -1,5806 +1,5734 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2018 The Bitcoin Core developers
 // Copyright (c) 2017-2020 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <validation.h>
 
 #include <arith_uint256.h>
 #include <avalanche/processor.h>
+#include <blockdb.h>
 #include <blockvalidity.h>
 #include <chainparams.h>
 #include <checkpoints.h>
 #include <checkqueue.h>
 #include <config.h>
 #include <consensus/activation.h>
 #include <consensus/merkle.h>
 #include <consensus/tx_check.h>
 #include <consensus/tx_verify.h>
 #include <consensus/validation.h>
 #include <hash.h>
 #include <index/txindex.h>
 #include <minerfund.h>
 #include <policy/fees.h>
 #include <policy/mempool.h>
 #include <policy/policy.h>
 #include <policy/settings.h>
 #include <pow/aserti32d.h> // For ResetASERTAnchorBlockCache
 #include <pow/pow.h>
 #include <primitives/block.h>
 #include <primitives/transaction.h>
 #include <random.h>
 #include <reverse_iterator.h>
 #include <script/script.h>
 #include <script/scriptcache.h>
 #include <script/sigcache.h>
 #include <shutdown.h>
 #include <timedata.h>
 #include <tinyformat.h>
 #include <txdb.h>
 #include <txmempool.h>
 #include <ui_interface.h>
 #include <undo.h>
 #include <util/moneystr.h>
 #include <util/strencodings.h>
 #include <util/system.h>
 #include <util/translation.h>
 #include <util/validation.h>
 #include <validationinterface.h>
 #include <warnings.h>
 
 #include <boost/algorithm/string/replace.hpp>
 #include <boost/thread.hpp> // boost::this_thread::interruption_point() (mingw)
 
 #include <string>
 #include <thread>
 
 #define MICRO 0.000001
 #define MILLI 0.001
 
 namespace {
 BlockManager g_blockman;
 } // namespace
 
 static CChainState g_chainstate(g_blockman);
 
 CChainState &ChainstateActive() {
     return g_chainstate;
 }
 
 CChain &ChainActive() {
     return g_chainstate.m_chain;
 }
 
 /**
  * Global state
  *
  * Mutex to guard access to validation specific variables, such as reading
  * or changing the chainstate.
  *
  * This may also need to be locked when updating the transaction pool, e.g. on
  * AcceptToMemoryPool. See CTxMemPool::cs comment for details.
  *
  * The transaction pool has a separate lock to allow reading from it and the
  * chainstate at the same time.
  */
 RecursiveMutex cs_main;
 
 CBlockIndex *pindexBestHeader = nullptr;
 Mutex g_best_block_mutex;
 std::condition_variable g_best_block_cv;
 uint256 g_best_block;
 std::atomic_bool fImporting(false);
 std::atomic_bool fReindex(false);
 bool fHavePruned = false;
 bool fPruneMode = false;
 bool fRequireStandard = true;
 bool fCheckBlockIndex = false;
 bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
 size_t nCoinCacheUsage = 5000 * 300;
 uint64_t nPruneTarget = 0;
 int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
 
 BlockHash hashAssumeValid;
 arith_uint256 nMinimumChainWork;
 
 CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE_PER_KB);
 
 CTxMemPool g_mempool;
 
 /** Constant stuff for coinbase transactions we create: */
 CScript COINBASE_FLAGS;
 
 // Internal stuff
 namespace {
 CBlockIndex *pindexBestInvalid = nullptr;
 CBlockIndex *pindexBestParked = nullptr;
 
 RecursiveMutex cs_LastBlockFile;
 std::vector<CBlockFileInfo> vinfoBlockFile;
 int nLastBlockFile = 0;
 /**
  * Global flag to indicate we should check to see if there are block/undo files
  * that should be deleted. Set on startup or if we allocate more file space when
  * we're in prune mode.
  */
 bool fCheckForPruning = false;
 
 /** Dirty block index entries. */
 std::set<const CBlockIndex *> setDirtyBlockIndex;
 
 /** Dirty block file entries. */
 std::set<int> setDirtyFileInfo;
 } // namespace
 
 BlockValidationOptions::BlockValidationOptions(const Config &config)
     : excessiveBlockSize(config.GetMaxBlockSize()), checkPoW(true),
       checkMerkleRoot(true) {}
 
 CBlockIndex *LookupBlockIndex(const BlockHash &hash) {
     AssertLockHeld(cs_main);
     BlockMap::const_iterator it = g_blockman.m_block_index.find(hash);
     return it == g_blockman.m_block_index.end() ? nullptr : it->second;
 }
 
 CBlockIndex *FindForkInGlobalIndex(const CChain &chain,
                                    const CBlockLocator &locator) {
     AssertLockHeld(cs_main);
 
     // Find the latest block common to locator and chain - we expect that
     // locator.vHave is sorted descending by height.
     for (const BlockHash &hash : locator.vHave) {
         CBlockIndex *pindex = LookupBlockIndex(hash);
         if (pindex) {
             if (chain.Contains(pindex)) {
                 return pindex;
             }
             if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
                 return chain.Tip();
             }
         }
     }
     return chain.Genesis();
 }
 
 std::unique_ptr<CCoinsViewDB> pcoinsdbview;
 std::unique_ptr<CCoinsViewCache> pcoinsTip;
 std::unique_ptr<CBlockTreeDB> pblocktree;
 
 // See definition for documentation
 static void FindFilesToPruneManual(std::set<int> &setFilesToPrune,
                                    int nManualPruneHeight);
 static void FindFilesToPrune(std::set<int> &setFilesToPrune,
                              uint64_t nPruneAfterHeight);
-static FILE *OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
-static FlatFileSeq BlockFileSeq();
-static FlatFileSeq UndoFileSeq();
 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
                                         const CBlockIndex *pindex);
 
 bool TestLockPointValidity(const LockPoints *lp) {
     AssertLockHeld(cs_main);
     assert(lp);
     // If there are relative lock times then the maxInputBlock will be set
     // If there are no relative lock times, the LockPoints don't depend on the
     // chain
     if (lp->maxInputBlock) {
         // Check whether ::ChainActive() is an extension of the block at which
         // the LockPoints calculation was valid. If not LockPoints are no longer
         // valid.
         if (!::ChainActive().Contains(lp->maxInputBlock)) {
             return false;
         }
     }
 
     // LockPoints still valid
     return true;
 }
 
 bool CheckSequenceLocks(const CTxMemPool &pool, const CTransaction &tx,
                         int flags, LockPoints *lp, bool useExistingLockPoints) {
     AssertLockHeld(cs_main);
     AssertLockHeld(pool.cs);
 
     CBlockIndex *tip = ::ChainActive().Tip();
     assert(tip != nullptr);
 
     CBlockIndex index;
     index.pprev = tip;
     // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate height
     // based locks because when SequenceLocks() is called within ConnectBlock(),
     // the height of the block *being* evaluated is what is used. Thus if we
     // want to know if a transaction can be part of the *next* block, we need to
     // use one more than ::ChainActive().Height()
     index.nHeight = tip->nHeight + 1;
 
     std::pair<int, int64_t> lockPair;
     if (useExistingLockPoints) {
         assert(lp);
         lockPair.first = lp->height;
         lockPair.second = lp->time;
     } else {
         // pcoinsTip contains the UTXO set for ::ChainActive().Tip()
         CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool);
         std::vector<int> prevheights;
         prevheights.resize(tx.vin.size());
         for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
             const CTxIn &txin = tx.vin[txinIndex];
             Coin coin;
             if (!viewMemPool.GetCoin(txin.prevout, coin)) {
                 return error("%s: Missing input", __func__);
             }
             if (coin.GetHeight() == MEMPOOL_HEIGHT) {
                 // Assume all mempool transaction confirm in the next block
                 prevheights[txinIndex] = tip->nHeight + 1;
             } else {
                 prevheights[txinIndex] = coin.GetHeight();
             }
         }
         lockPair = CalculateSequenceLocks(tx, flags, &prevheights, index);
         if (lp) {
             lp->height = lockPair.first;
             lp->time = lockPair.second;
             // Also store the hash of the block with the highest height of all
             // the blocks which have sequence locked prevouts. This hash needs
             // to still be on the chain for these LockPoint calculations to be
             // valid.
             // Note: It is impossible to correctly calculate a maxInputBlock if
             // any of the sequence locked inputs depend on unconfirmed txs,
             // except in the special case where the relative lock time/height is
             // 0, which is equivalent to no sequence lock. Since we assume input
             // height of tip+1 for mempool txs and test the resulting lockPair
             // from CalculateSequenceLocks against tip+1. We know
             // EvaluateSequenceLocks will fail if there was a non-zero sequence
             // lock on a mempool input, so we can use the return value of
             // CheckSequenceLocks to indicate the LockPoints validity.
             int maxInputHeight = 0;
             for (const int height : prevheights) {
                 // Can ignore mempool inputs since we'll fail if they had
                 // non-zero locks.
                 if (height != tip->nHeight + 1) {
                     maxInputHeight = std::max(maxInputHeight, height);
                 }
             }
             lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
         }
     }
     return EvaluateSequenceLocks(index, lockPair);
 }
 
 // Command-line argument "-replayprotectionactivationtime=<timestamp>" will
 // cause the node to switch to replay protected SigHash ForkID value when the
 // median timestamp of the previous 11 blocks is greater than or equal to
 // <timestamp>. Defaults to the pre-defined timestamp when not set.
 static bool IsReplayProtectionEnabled(const Consensus::Params &params,
                                       int64_t nMedianTimePast) {
     return nMedianTimePast >= gArgs.GetArg("-replayprotectionactivationtime",
                                            params.tachyonActivationTime);
 }
 
 static bool IsReplayProtectionEnabled(const Consensus::Params &params,
                                       const CBlockIndex *pindexPrev) {
     if (pindexPrev == nullptr) {
         return false;
     }
 
     return IsReplayProtectionEnabled(params, pindexPrev->GetMedianTimePast());
 }
 
 // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
 // were somehow broken and returning the wrong scriptPubKeys
 static bool CheckInputsFromMempoolAndCache(
     const CTransaction &tx, TxValidationState &state,
     const CCoinsViewCache &view, const CTxMemPool &pool, const uint32_t flags,
     bool cacheSigStore, PrecomputedTransactionData &txdata, int &nSigChecksOut)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
 
     // pool.cs should be locked already, but go ahead and re-take the lock here
     // to enforce that mempool doesn't change between when we check the view and
     // when we actually call through to CheckInputs
     LOCK(pool.cs);
 
     assert(!tx.IsCoinBase());
     for (const CTxIn &txin : tx.vin) {
         const Coin &coin = view.AccessCoin(txin.prevout);
 
         // At this point we haven't actually checked if the coins are all
         // available (or shouldn't assume we have, since CheckInputs does). So
         // we just return failure if the inputs are not available here, and then
         // only have to check equivalence for available inputs.
         if (coin.IsSpent()) {
             return false;
         }
 
         const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
         if (txFrom) {
             assert(txFrom->GetId() == txin.prevout.GetTxId());
             assert(txFrom->vout.size() > txin.prevout.GetN());
             assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
         } else {
             const Coin &coinFromDisk = pcoinsTip->AccessCoin(txin.prevout);
             assert(!coinFromDisk.IsSpent());
             assert(coinFromDisk.GetTxOut() == coin.GetTxOut());
         }
     }
 
     return CheckInputs(tx, state, view, flags, cacheSigStore, true, txdata,
                        nSigChecksOut);
 }
 
 /**
  * @param[out] coins_to_uncache   Return any outpoints which were not previously
  * present in the coins cache, but were added as a result of validating the tx
  *                                for mempool acceptance. This allows the caller
  * to optionally remove the cache additions if the associated transaction ends
  *                                up being rejected by the mempool.
  */
 static bool AcceptToMemoryPoolWorker(
     const Config &config, CTxMemPool &pool, TxValidationState &state,
     const CTransactionRef &ptx, int64_t nAcceptTime, bool bypass_limits,
     const Amount nAbsurdFee, std::vector<COutPoint> &coins_to_uncache,
     bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
 
     const Consensus::Params &consensusParams =
         config.GetChainParams().GetConsensus();
 
     const CTransaction &tx = *ptx;
     const TxId txid = tx.GetId();
 
     // mempool "read lock" (held through
     // GetMainSignals().TransactionAddedToMempool())
     LOCK(pool.cs);
 
     // Coinbase is only valid in a block, not as a loose transaction.
     if (!CheckRegularTransaction(tx, state)) {
         // state filled in by CheckRegularTransaction.
         return false;
     }
 
     // Rather not work on nonstandard transactions (unless -testnet)
     std::string reason;
     if (fRequireStandard && !IsStandardTx(tx, reason)) {
         return state.Invalid(TxValidationResult::TX_NOT_STANDARD,
                              REJECT_NONSTANDARD, reason);
     }
 
     // Only accept nLockTime-using transactions that can be mined in the next
     // block; we don't want our mempool filled up with transactions that can't
     // be mined yet.
     TxValidationState ctxState;
     if (!ContextualCheckTransactionForCurrentBlock(
             consensusParams, tx, ctxState, STANDARD_LOCKTIME_VERIFY_FLAGS)) {
         // We copy the state from a dummy to ensure we don't increase the
         // ban score of peer for transaction that could be valid in the future.
         return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND,
                              REJECT_NONSTANDARD, ctxState.GetRejectReason(),
                              ctxState.GetDebugMessage());
     }
 
     // Is it already in the memory pool?
     if (pool.exists(txid)) {
         return state.Invalid(TxValidationResult::TX_CONFLICT, REJECT_DUPLICATE,
                              "txn-already-in-mempool");
     }
 
     // Check for conflicts with in-memory transactions
     for (const CTxIn &txin : tx.vin) {
         auto itConflicting = pool.mapNextTx.find(txin.prevout);
         if (itConflicting != pool.mapNextTx.end()) {
             // Disable replacement feature for good
             return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
                                  REJECT_DUPLICATE, "txn-mempool-conflict");
         }
     }
 
     {
         CCoinsView dummy;
         CCoinsViewCache view(&dummy);
 
         LockPoints lp;
         CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool);
         view.SetBackend(viewMemPool);
 
         // Do all inputs exist?
         for (const CTxIn &txin : tx.vin) {
             if (!pcoinsTip->HaveCoinInCache(txin.prevout)) {
                 coins_to_uncache.push_back(txin.prevout);
             }
 
             // Note: this call may add txin.prevout to the coins cache
             // (pcoinsTip.cacheCoins) by way of FetchCoin(). It should be
             // removed later (via coins_to_uncache) if this tx turns out to be
             // invalid.
             if (!view.HaveCoin(txin.prevout)) {
                 // Are inputs missing because we already have the tx?
                 for (size_t out = 0; out < tx.vout.size(); out++) {
                     // Optimistically just do efficient check of cache for
                     // outputs.
                     if (pcoinsTip->HaveCoinInCache(COutPoint(txid, out))) {
                         return state.Invalid(TxValidationResult::TX_CONFLICT,
                                              REJECT_DUPLICATE,
                                              "txn-already-known");
                     }
                 }
 
                 // Otherwise assume this might be an orphan tx for which we just
                 // haven't seen parents yet.
                 return state.Invalid(TxValidationResult::TX_MISSING_INPUTS,
                                      REJECT_INVALID,
                                      "bad-txns-inputs-missingorspent");
             }
         }
 
         // Are the actual inputs available?
         if (!view.HaveInputs(tx)) {
             return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
                                  REJECT_DUPLICATE, "bad-txns-inputs-spent");
         }
 
         // Bring the best block into scope.
         view.GetBestBlock();
 
         // We have all inputs cached now, so switch back to dummy, so we don't
         // need to keep lock on mempool.
         view.SetBackend(dummy);
 
         // Only accept BIP68 sequence locked transactions that can be mined in
         // the next block; we don't want our mempool filled up with transactions
         // that can't be mined yet. Must keep pool.cs for this unless we change
         // CheckSequenceLocks to take a CoinsViewCache instead of create its
         // own.
         if (!CheckSequenceLocks(pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS,
                                 &lp)) {
             return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND,
                                  REJECT_NONSTANDARD, "non-BIP68-final");
         }
 
         Amount nFees = Amount::zero();
         if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view),
                                       nFees)) {
             return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
                          tx.GetId().ToString(), FormatStateMessage(state));
         }
 
         const uint32_t nextBlockScriptVerifyFlags =
             GetNextBlockScriptFlags(consensusParams, ::ChainActive().Tip());
 
         // Check for non-standard pay-to-script-hash in inputs
         if (fRequireStandard &&
             !AreInputsStandard(tx, view, nextBlockScriptVerifyFlags)) {
             return state.Invalid(TxValidationResult::TX_NOT_STANDARD,
                                  REJECT_NONSTANDARD,
                                  "bad-txns-nonstandard-inputs");
         }
 
         // nModifiedFees includes any fee deltas from PrioritiseTransaction
         Amount nModifiedFees = nFees;
         pool.ApplyDelta(txid, nModifiedFees);
 
         // Keep track of transactions that spend a coinbase, which we re-scan
         // during reorgs to ensure COINBASE_MATURITY is still met.
         bool fSpendsCoinbase = false;
         for (const CTxIn &txin : tx.vin) {
             const Coin &coin = view.AccessCoin(txin.prevout);
             if (coin.IsCoinBase()) {
                 fSpendsCoinbase = true;
                 break;
             }
         }
 
         unsigned int nSize = tx.GetTotalSize();
 
         // No transactions are allowed below minRelayTxFee except from
         // disconnected blocks.
         // Do not change this to use virtualsize without coordinating a network
         // policy upgrade.
         if (!bypass_limits && nModifiedFees < minRelayTxFee.GetFee(nSize)) {
             return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
                                  REJECT_INSUFFICIENTFEE,
                                  "min relay fee not met");
         }
 
         if (nAbsurdFee != Amount::zero() && nFees > nAbsurdFee) {
             return state.Invalid(TxValidationResult::TX_NOT_STANDARD,
                                  REJECT_HIGHFEE, "absurdly-high-fee",
                                  strprintf("%d > %d", nFees, nAbsurdFee));
         }
 
         // Validate input scripts against standard script flags.
         const uint32_t scriptVerifyFlags =
             nextBlockScriptVerifyFlags | STANDARD_SCRIPT_VERIFY_FLAGS;
         PrecomputedTransactionData txdata(tx);
         int nSigChecksStandard;
         if (!CheckInputs(tx, state, view, scriptVerifyFlags, true, false,
                          txdata, nSigChecksStandard)) {
             // State filled in by CheckInputs.
             return false;
         }
 
         CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
                               fSpendsCoinbase, nSigChecksStandard, lp);
 
         unsigned int nVirtualSize = entry.GetTxVirtualSize();
 
         Amount mempoolRejectFee =
             pool.GetMinFee(
                     gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
                     1000000)
                 .GetFee(nVirtualSize);
         if (!bypass_limits && mempoolRejectFee > Amount::zero() &&
             nModifiedFees < mempoolRejectFee) {
             return state.Invalid(
                 TxValidationResult::TX_MEMPOOL_POLICY, REJECT_INSUFFICIENTFEE,
                 "mempool min fee not met",
                 strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
         }
 
         // Calculate in-mempool ancestors, up to a limit.
         CTxMemPool::setEntries setAncestors;
         size_t nLimitAncestors =
             gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
         size_t nLimitAncestorSize =
             gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) *
             1000;
         size_t nLimitDescendants =
             gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
         size_t nLimitDescendantSize =
             gArgs.GetArg("-limitdescendantsize",
                          DEFAULT_DESCENDANT_SIZE_LIMIT) *
             1000;
         std::string errString;
         if (!pool.CalculateMemPoolAncestors(
                 entry, setAncestors, nLimitAncestors, nLimitAncestorSize,
                 nLimitDescendants, nLimitDescendantSize, errString)) {
             return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
                                  REJECT_NONSTANDARD, "too-long-mempool-chain",
                                  errString);
         }
 
         // Check again against the next block's script verification flags
         // to cache our script execution flags.
         //
         // This is also useful in case of bugs in the standard flags that cause
         // transactions to pass as valid when they're actually invalid. For
         // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
         // NOT scripts to pass, even though they were invalid.
         //
         // There is a similar check in CreateNewBlock() to prevent creating
         // invalid blocks (using TestBlockValidity), however allowing such
         // transactions into the mempool can be exploited as a DoS attack.
         int nSigChecksConsensus;
         if (!CheckInputsFromMempoolAndCache(tx, state, view, pool,
                                             nextBlockScriptVerifyFlags, true,
                                             txdata, nSigChecksConsensus)) {
             // This can occur under some circumstances, if the node receives an
             // unrequested tx which is invalid due to new consensus rules not
             // being activated yet (during IBD).
             return error("%s: BUG! PLEASE REPORT THIS! CheckInputs failed "
                          "against next-block but not STANDARD flags %s, %s",
                          __func__, txid.ToString(), FormatStateMessage(state));
         }
 
         if (nSigChecksStandard != nSigChecksConsensus) {
             // We can't accept this transaction as we've used the standard count
             // for the mempool/mining, but the consensus count will be enforced
             // in validation (we don't want to produce bad block templates).
             return error(
                 "%s: BUG! PLEASE REPORT THIS! SigChecks count differed between "
                 "standard and consensus flags in %s",
                 __func__, txid.ToString());
         }
 
         if (test_accept) {
             // Tx was accepted, but not added
             return true;
         }
 
         // Store transaction in memory.
         pool.addUnchecked(entry, setAncestors);
 
         // Trim mempool and check if tx was trimmed.
         if (!bypass_limits) {
             pool.LimitSize(
                 gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
                 std::chrono::hours{
                     gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
             if (!pool.exists(txid)) {
                 return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
                                      REJECT_INSUFFICIENTFEE, "mempool full");
             }
         }
     }
 
     GetMainSignals().TransactionAddedToMempool(ptx);
     return true;
 }
 
 /**
  * (try to) add transaction to memory pool with a specified acceptance time.
  */
 static bool
 AcceptToMemoryPoolWithTime(const Config &config, CTxMemPool &pool,
                            TxValidationState &state, const CTransactionRef &tx,
                            int64_t nAcceptTime, bool bypass_limits,
                            const Amount nAbsurdFee, bool test_accept)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     std::vector<COutPoint> coins_to_uncache;
     bool res = AcceptToMemoryPoolWorker(config, pool, state, tx, nAcceptTime,
                                         bypass_limits, nAbsurdFee,
                                         coins_to_uncache, test_accept);
     if (!res) {
         // Remove coins that were not present in the coins cache before calling
         // ATMPW; this is to prevent memory DoS in case we receive a large
         // number of invalid transactions that attempt to overrun the in-memory
         // coins cache
         // (`CCoinsViewCache::cacheCoins`).
 
         for (const COutPoint &outpoint : coins_to_uncache) {
             pcoinsTip->Uncache(outpoint);
         }
     }
 
     // After we've (potentially) uncached entries, ensure our coins cache is
     // still within its size limits
     BlockValidationState stateDummy;
     ::ChainstateActive().FlushStateToDisk(config.GetChainParams(), stateDummy,
                                           FlushStateMode::PERIODIC);
     return res;
 }
 
 bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool,
                         TxValidationState &state, const CTransactionRef &tx,
                         bool bypass_limits, const Amount nAbsurdFee,
                         bool test_accept) {
     return AcceptToMemoryPoolWithTime(config, pool, state, tx, GetTime(),
                                       bypass_limits, nAbsurdFee, test_accept);
 }
 
 /**
  * Return transaction in txOut, and if it was found inside a block, its hash is
  * placed in hashBlock. If blockIndex is provided, the transaction is fetched
  * from the corresponding block.
  */
 bool GetTransaction(const TxId &txid, CTransactionRef &txOut,
                     const Consensus::Params &params, BlockHash &hashBlock,
                     const CBlockIndex *const block_index) {
     LOCK(cs_main);
 
     if (block_index == nullptr) {
         CTransactionRef ptx = g_mempool.get(txid);
         if (ptx) {
             txOut = ptx;
             return true;
         }
 
         if (g_txindex) {
             return g_txindex->FindTx(txid, hashBlock, txOut);
         }
     } else {
         CBlock block;
         if (ReadBlockFromDisk(block, block_index, params)) {
             for (const auto &tx : block.vtx) {
                 if (tx->GetId() == txid) {
                     txOut = tx;
                     hashBlock = block_index->GetBlockHash();
                     return true;
                 }
             }
         }
     }
 
     return false;
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // CBlock and CBlockIndex
 //
 
 static bool WriteBlockToDisk(const CBlock &block, FlatFilePos &pos,
                              const CMessageHeader::MessageMagic &messageStart) {
     // Open history file to append
     CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
     if (fileout.IsNull()) {
         return error("WriteBlockToDisk: OpenBlockFile failed");
     }
 
     // Write index header
     unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
     fileout << messageStart << nSize;
 
     // Write block
     long fileOutPos = ftell(fileout.Get());
     if (fileOutPos < 0) {
         return error("WriteBlockToDisk: ftell failed");
     }
 
     pos.nPos = (unsigned int)fileOutPos;
     fileout << block;
 
     return true;
 }
 
-bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos,
-                       const Consensus::Params &params) {
-    block.SetNull();
-
-    // Open history file to read
-    CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
-    if (filein.IsNull()) {
-        return error("ReadBlockFromDisk: OpenBlockFile failed for %s",
-                     pos.ToString());
-    }
-
-    // Read block
-    try {
-        filein >> block;
-    } catch (const std::exception &e) {
-        return error("%s: Deserialize or I/O error - %s at %s", __func__,
-                     e.what(), pos.ToString());
-    }
-
-    // Check the header
-    if (!CheckProofOfWork(block.GetHash(), block.nBits, params)) {
-        return error("ReadBlockFromDisk: Errors in block header at %s",
-                     pos.ToString());
-    }
-
-    return true;
-}
-
-bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex,
-                       const Consensus::Params &params) {
-    FlatFilePos blockPos;
-    {
-        LOCK(cs_main);
-        blockPos = pindex->GetBlockPos();
-    }
-
-    if (!ReadBlockFromDisk(block, blockPos, params)) {
-        return false;
-    }
-
-    if (block.GetHash() != pindex->GetBlockHash()) {
-        return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() "
-                     "doesn't match index for %s at %s",
-                     pindex->ToString(), pindex->GetBlockPos().ToString());
-    }
-
-    return true;
-}
-
 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
     int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
     // Force block reward to zero when right shift is undefined.
     if (halvings >= 64) {
         return Amount::zero();
     }
 
     Amount nSubsidy = 50 * COIN;
     // Subsidy is cut in half every 210,000 blocks which will occur
     // approximately every 4 years.
     return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI;
 }
 
 // Note that though this is marked const, we may end up modifying
 // `m_cached_finished_ibd`, which is a performance-related implementation
 // detail. This function must be marked `const` so that `CValidationInterface`
 // clients (which are given a `const CChainState*`) can call it.
 //
 bool CChainState::IsInitialBlockDownload() const {
     // Optimization: pre-test latch before taking the lock.
     if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
         return false;
     }
 
     LOCK(cs_main);
     if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
         return false;
     }
     if (fImporting || fReindex) {
         return true;
     }
     if (m_chain.Tip() == nullptr) {
         return true;
     }
     if (m_chain.Tip()->nChainWork < nMinimumChainWork) {
         return true;
     }
     if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) {
         return true;
     }
     LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
     m_cached_finished_ibd.store(true, std::memory_order_relaxed);
     return false;
 }
 
 static CBlockIndex const *pindexBestForkTip = nullptr;
 static CBlockIndex const *pindexBestForkBase = nullptr;
 
 BlockMap &BlockIndex() {
     return g_blockman.m_block_index;
 }
 
 static void AlertNotify(const std::string &strMessage) {
     uiInterface.NotifyAlertChanged();
 #if defined(HAVE_SYSTEM)
     std::string strCmd = gArgs.GetArg("-alertnotify", "");
     if (strCmd.empty()) {
         return;
     }
 
     // Alert text should be plain ascii coming from a trusted source, but to be
     // safe we first strip anything not in safeChars, then add single quotes
     // around the whole string before passing it to the shell:
     std::string singleQuote("'");
     std::string safeStatus = SanitizeString(strMessage);
     safeStatus = singleQuote + safeStatus + singleQuote;
     boost::replace_all(strCmd, "%s", safeStatus);
 
     std::thread t(runCommand, strCmd);
     // thread runs free
     t.detach();
 #endif
 }
 
 static void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     // Before we get past initial download, we cannot reliably alert about forks
     // (we assume we don't get stuck on a fork before finishing our initial
     // sync)
     if (::ChainstateActive().IsInitialBlockDownload()) {
         return;
     }
 
     // If our best fork is no longer within 72 blocks (+/- 12 hours if no one
     // mines it) of our head, drop it
     if (pindexBestForkTip &&
         ::ChainActive().Height() - pindexBestForkTip->nHeight >= 72) {
         pindexBestForkTip = nullptr;
     }
 
     if (pindexBestForkTip ||
         (pindexBestInvalid &&
          pindexBestInvalid->nChainWork >
              ::ChainActive().Tip()->nChainWork +
                  (GetBlockProof(*::ChainActive().Tip()) * 6))) {
         if (!GetfLargeWorkForkFound() && pindexBestForkBase) {
             std::string warning =
                 std::string("'Warning: Large-work fork detected, forking after "
                             "block ") +
                 pindexBestForkBase->phashBlock->ToString() + std::string("'");
             AlertNotify(warning);
         }
 
         if (pindexBestForkTip && pindexBestForkBase) {
             LogPrintf("%s: Warning: Large fork found\n  forking the "
                       "chain at height %d (%s)\n  lasting to height %d "
                       "(%s).\nChain state database corruption likely.\n",
                       __func__, pindexBestForkBase->nHeight,
                       pindexBestForkBase->phashBlock->ToString(),
                       pindexBestForkTip->nHeight,
                       pindexBestForkTip->phashBlock->ToString());
             SetfLargeWorkForkFound(true);
         } else {
             LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
                       "longer than our best chain.\nChain state database "
                       "corruption likely.\n",
                       __func__);
             SetfLargeWorkInvalidChainFound(true);
         }
     } else {
         SetfLargeWorkForkFound(false);
         SetfLargeWorkInvalidChainFound(false);
     }
 }
 
 static void CheckForkWarningConditionsOnNewFork(CBlockIndex *pindexNewForkTip)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     // If we are on a fork that is sufficiently large, set a warning flag.
     const CBlockIndex *pfork = ::ChainActive().FindFork(pindexNewForkTip);
 
     // We define a condition where we should warn the user about as a fork of at
     // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
     // it) of ours. We use 7 blocks rather arbitrarily as it represents just
     // under 10% of sustained network hash rate operating on the fork, or a
     // chain that is entirely longer than ours and invalid (note that this
     // should be detected by both). We define it this way because it allows us
     // to only store the highest fork tip (+ base) which meets the 7-block
     // condition and from this always have the most-likely-to-cause-warning fork
     if (pfork &&
         (!pindexBestForkTip ||
          pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
         pindexNewForkTip->nChainWork - pfork->nChainWork >
             (GetBlockProof(*pfork) * 7) &&
         ::ChainActive().Height() - pindexNewForkTip->nHeight < 72) {
         pindexBestForkTip = pindexNewForkTip;
         pindexBestForkBase = pfork;
     }
 
     CheckForkWarningConditions();
 }
 
 void CChainState::InvalidChainFound(CBlockIndex *pindexNew) {
     AssertLockHeld(cs_main);
     if (!pindexBestInvalid ||
         pindexNew->nChainWork > pindexBestInvalid->nChainWork) {
         pindexBestInvalid = pindexNew;
     }
 
     // If the invalid chain found is supposed to be finalized, we need to move
     // back the finalization point.
     if (IsBlockFinalized(pindexNew)) {
         m_finalizedBlockIndex = pindexNew->pprev;
     }
 
     LogPrintf("%s: invalid block=%s  height=%d  log2_work=%.8g  date=%s\n",
               __func__, pindexNew->GetBlockHash().ToString(),
               pindexNew->nHeight,
               log(pindexNew->nChainWork.getdouble()) / log(2.0),
               FormatISO8601DateTime(pindexNew->GetBlockTime()));
     CBlockIndex *tip = ::ChainActive().Tip();
     assert(tip);
     LogPrintf("%s:  current best=%s  height=%d  log2_work=%.8g  date=%s\n",
               __func__, tip->GetBlockHash().ToString(),
               ::ChainActive().Height(),
               log(tip->nChainWork.getdouble()) / log(2.0),
               FormatISO8601DateTime(tip->GetBlockTime()));
 }
 
 void CChainState::InvalidBlockFound(CBlockIndex *pindex,
                                     const BlockValidationState &state) {
     if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
         pindex->nStatus = pindex->nStatus.withFailed();
         m_blockman.m_failed_blocks.insert(pindex);
         setDirtyBlockIndex.insert(pindex);
         InvalidChainFound(pindex);
     }
 }
 
 void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
                 int nHeight) {
     // Mark inputs spent.
     if (tx.IsCoinBase()) {
         return;
     }
 
     txundo.vprevout.reserve(tx.vin.size());
     for (const CTxIn &txin : tx.vin) {
         txundo.vprevout.emplace_back();
         bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
         assert(is_spent);
     }
 }
 
 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
                  int nHeight) {
     SpendCoins(view, tx, txundo, nHeight);
     AddCoins(view, tx, nHeight);
 }
 
 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, int nHeight) {
     // Mark inputs spent.
     if (!tx.IsCoinBase()) {
         for (const CTxIn &txin : tx.vin) {
             bool is_spent = view.SpendCoin(txin.prevout);
             assert(is_spent);
         }
     }
 
     // Add outputs.
     AddCoins(view, tx, nHeight);
 }
 
 bool CScriptCheck::operator()() {
     const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
     if (!VerifyScript(scriptSig, m_tx_out.scriptPubKey, nFlags,
                       CachingTransactionSignatureChecker(
                           ptxTo, nIn, m_tx_out.nValue, cacheStore, txdata),
                       metrics, &error)) {
         return false;
     }
     if ((pTxLimitSigChecks &&
          !pTxLimitSigChecks->consume_and_check(metrics.nSigChecks)) ||
         (pBlockLimitSigChecks &&
          !pBlockLimitSigChecks->consume_and_check(metrics.nSigChecks))) {
         // we can't assign a meaningful script error (since the script
         // succeeded), but remove the ScriptError::OK which could be
         // misinterpreted.
         error = ScriptError::SIGCHECKS_LIMIT_EXCEEDED;
         return false;
     }
     return true;
 }
 
 int GetSpendHeight(const CCoinsViewCache &inputs) {
     LOCK(cs_main);
     CBlockIndex *pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
     return pindexPrev->nHeight + 1;
 }
 
 bool CheckInputs(const CTransaction &tx, TxValidationState &state,
                  const CCoinsViewCache &inputs, const uint32_t flags,
                  bool sigCacheStore, bool scriptCacheStore,
                  const PrecomputedTransactionData &txdata, int &nSigChecksOut,
                  TxSigCheckLimiter &txLimitSigChecks,
                  CheckInputsLimiter *pBlockLimitSigChecks,
                  std::vector<CScriptCheck> *pvChecks) {
     AssertLockHeld(cs_main);
     assert(!tx.IsCoinBase());
 
     if (pvChecks) {
         pvChecks->reserve(tx.vin.size());
     }
 
     // First check if script executions have been cached with the same flags.
     // Note that this assumes that the inputs provided are correct (ie that the
     // transaction hash which is in tx's prevouts properly commits to the
     // scriptPubKey in the inputs view of that transaction).
     ScriptCacheKey hashCacheEntry(tx, flags);
     if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore, nSigChecksOut)) {
         if (!txLimitSigChecks.consume_and_check(nSigChecksOut) ||
             (pBlockLimitSigChecks &&
              !pBlockLimitSigChecks->consume_and_check(nSigChecksOut))) {
             return state.Invalid(TxValidationResult::TX_CONSENSUS,
                                  REJECT_INVALID, "too-many-sigchecks");
         }
         return true;
     }
 
     int nSigChecksTotal = 0;
 
     for (size_t i = 0; i < tx.vin.size(); i++) {
         const COutPoint &prevout = tx.vin[i].prevout;
         const Coin &coin = inputs.AccessCoin(prevout);
         assert(!coin.IsSpent());
 
         // We very carefully only pass in things to CScriptCheck which are
         // clearly committed to by tx's hash. This provides a sanity
         // check that our caching is not introducing consensus failures through
         // additional data in, eg, the coins being spent being checked as a part
         // of CScriptCheck.
 
         // Verify signature
         CScriptCheck check(coin.GetTxOut(), tx, i, flags, sigCacheStore, txdata,
                            &txLimitSigChecks, pBlockLimitSigChecks);
         if (pvChecks) {
             pvChecks->push_back(std::move(check));
         } else if (!check()) {
             ScriptError scriptError = check.GetScriptError();
             // Compute flags without the optional standardness flags.
             // This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains
             // additional upgrade flags (see AcceptToMemoryPoolWorker variable
             // extraFlags).
             uint32_t mandatoryFlags =
                 flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS;
             if (flags != mandatoryFlags) {
                 // Check whether the failure was caused by a non-mandatory
                 // script verification check. If so, ensure we return
                 // NOT_STANDARD instead of CONSENSUS to avoid downstream users
                 // splitting the network between upgraded and non-upgraded nodes
                 // by banning CONSENSUS-failing data providers.
                 CScriptCheck check2(coin.GetTxOut(), tx, i, mandatoryFlags,
                                     sigCacheStore, txdata);
                 if (check2()) {
                     return state.Invalid(
                         TxValidationResult::TX_NOT_STANDARD, REJECT_NONSTANDARD,
                         strprintf("non-mandatory-script-verify-flag (%s)",
                                   ScriptErrorString(scriptError)));
                 }
                 // update the error message to reflect the mandatory violation.
                 scriptError = check2.GetScriptError();
             }
 
             // MANDATORY flag failures correspond to
             // TxValidationResult::TX_CONSENSUS. Because CONSENSUS failures
             // are the most serious case of validation failures, we may need to
             // consider using RECENT_CONSENSUS_CHANGE for any script failure
             // that could be due to non-upgraded nodes which we may want to
             // support, to avoid splitting the network (but this depends on the
             // details of how net_processing handles such errors).
             return state.Invalid(
                 TxValidationResult::TX_CONSENSUS, REJECT_INVALID,
                 strprintf("mandatory-script-verify-flag-failed (%s)",
                           ScriptErrorString(scriptError)));
         }
 
         nSigChecksTotal += check.GetScriptExecutionMetrics().nSigChecks;
     }
 
     nSigChecksOut = nSigChecksTotal;
 
     if (scriptCacheStore && !pvChecks) {
         // We executed all of the provided scripts, and were told to cache the
         // result. Do so now.
         AddKeyInScriptCache(hashCacheEntry, nSigChecksTotal);
     }
 
     return true;
 }
 
 static bool UndoWriteToDisk(const CBlockUndo &blockundo, FlatFilePos &pos,
                             const BlockHash &hashBlock,
                             const CMessageHeader::MessageMagic &messageStart) {
     // Open history file to append
     CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
     if (fileout.IsNull()) {
         return error("%s: OpenUndoFile failed", __func__);
     }
 
     // Write index header
     unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
     fileout << messageStart << nSize;
 
     // Write undo data
     long fileOutPos = ftell(fileout.Get());
     if (fileOutPos < 0) {
         return error("%s: ftell failed", __func__);
     }
     pos.nPos = (unsigned int)fileOutPos;
     fileout << blockundo;
 
     // calculate & write checksum
     CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
     hasher << hashBlock;
     hasher << blockundo;
     fileout << hasher.GetHash();
 
     return true;
 }
 
 bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex) {
     FlatFilePos pos = pindex->GetUndoPos();
     if (pos.IsNull()) {
         return error("%s: no undo data available", __func__);
     }
 
     // Open history file to read
     CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
     if (filein.IsNull()) {
         return error("%s: OpenUndoFile failed", __func__);
     }
 
     // Read block
     uint256 hashChecksum;
     // We need a CHashVerifier as reserializing may lose data
     CHashVerifier<CAutoFile> verifier(&filein);
     try {
         verifier << pindex->pprev->GetBlockHash();
         verifier >> blockundo;
         filein >> hashChecksum;
     } catch (const std::exception &e) {
         return error("%s: Deserialize or I/O error - %s", __func__, e.what());
     }
 
     // Verify checksum
     if (hashChecksum != verifier.GetHash()) {
         return error("%s: Checksum mismatch", __func__);
     }
 
     return true;
 }
 
 /** Abort with a message */
 static bool AbortNode(const std::string &strMessage,
                       const std::string &userMessage = "",
                       unsigned int prefix = 0) {
     SetMiscWarning(strMessage);
     LogPrintf("*** %s\n", strMessage);
     if (!userMessage.empty()) {
         uiInterface.ThreadSafeMessageBox(
             userMessage, "", CClientUIInterface::MSG_ERROR | prefix);
     } else {
         uiInterface.ThreadSafeMessageBox(
             _("Error: A fatal internal error occurred, see debug.log for "
               "details")
                 .translated,
             "",
             CClientUIInterface::MSG_ERROR | CClientUIInterface::MSG_NOPREFIX);
     }
     StartShutdown();
     return false;
 }
 
 static bool AbortNode(BlockValidationState &state,
                       const std::string &strMessage,
                       const std::string &userMessage = "",
                       unsigned int prefix = 0) {
     AbortNode(strMessage, userMessage, prefix);
     return state.Error(strMessage);
 }
 
 /** Restore the UTXO in a Coin at a given COutPoint. */
 DisconnectResult UndoCoinSpend(const Coin &undo, CCoinsViewCache &view,
                                const COutPoint &out) {
     bool fClean = true;
 
     if (view.HaveCoin(out)) {
         // Overwriting transaction output.
         fClean = false;
     }
 
     if (undo.GetHeight() == 0) {
         // Missing undo metadata (height and coinbase). Older versions included
         // this information only in undo records for the last spend of a
         // transactions' outputs. This implies that it must be present for some
         // other output of the same tx.
         const Coin &alternate = AccessByTxid(view, out.GetTxId());
         if (alternate.IsSpent()) {
             // Adding output for transaction without known metadata
             return DisconnectResult::FAILED;
         }
 
         // This is somewhat ugly, but hopefully utility is limited. This is only
         // useful when working from legacy on disck data. In any case, putting
         // the correct information in there doesn't hurt.
         const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
                                         alternate.IsCoinBase());
     }
 
     // The potential_overwrite parameter to AddCoin is only allowed to be false
     // if we know for sure that the coin did not already exist in the cache. As
     // we have queried for that above using HaveCoin, we don't need to guess.
     // When fClean is false, a coin already existed and it is an overwrite.
     view.AddCoin(out, std::move(undo), !fClean);
 
     return fClean ? DisconnectResult::OK : DisconnectResult::UNCLEAN;
 }
 
 /**
  * Undo the effects of this block (with given index) on the UTXO set represented
  * by coins. When FAILED is returned, view is left in an indeterminate state.
  */
 DisconnectResult CChainState::DisconnectBlock(const CBlock &block,
                                               const CBlockIndex *pindex,
                                               CCoinsViewCache &view) {
     CBlockUndo blockUndo;
     if (!UndoReadFromDisk(blockUndo, pindex)) {
         error("DisconnectBlock(): failure reading undo data");
         return DisconnectResult::FAILED;
     }
 
     return ApplyBlockUndo(blockUndo, block, pindex, view);
 }
 
 DisconnectResult ApplyBlockUndo(const CBlockUndo &blockUndo,
                                 const CBlock &block, const CBlockIndex *pindex,
                                 CCoinsViewCache &view) {
     bool fClean = true;
 
     if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
         error("DisconnectBlock(): block and undo data inconsistent");
         return DisconnectResult::FAILED;
     }
 
     // First, restore inputs.
     for (size_t i = 1; i < block.vtx.size(); i++) {
         const CTransaction &tx = *(block.vtx[i]);
         const CTxUndo &txundo = blockUndo.vtxundo[i - 1];
         if (txundo.vprevout.size() != tx.vin.size()) {
             error("DisconnectBlock(): transaction and undo data inconsistent");
             return DisconnectResult::FAILED;
         }
 
         for (size_t j = 0; j < tx.vin.size(); j++) {
             const COutPoint &out = tx.vin[j].prevout;
             const Coin &undo = txundo.vprevout[j];
             DisconnectResult res = UndoCoinSpend(undo, view, out);
             if (res == DisconnectResult::FAILED) {
                 return DisconnectResult::FAILED;
             }
             fClean = fClean && res != DisconnectResult::UNCLEAN;
         }
     }
 
     // Second, revert created outputs.
     for (const auto &ptx : block.vtx) {
         const CTransaction &tx = *ptx;
         const TxId &txid = tx.GetId();
         const bool is_coinbase = tx.IsCoinBase();
 
         // Check that all outputs are available and match the outputs in the
         // block itself exactly.
         for (size_t o = 0; o < tx.vout.size(); o++) {
             if (tx.vout[o].scriptPubKey.IsUnspendable()) {
                 continue;
             }
 
             COutPoint out(txid, o);
             Coin coin;
             bool is_spent = view.SpendCoin(out, &coin);
             if (!is_spent || tx.vout[o] != coin.GetTxOut() ||
                 uint32_t(pindex->nHeight) != coin.GetHeight() ||
                 is_coinbase != coin.IsCoinBase()) {
                 // transaction output mismatch
                 fClean = false;
             }
         }
     }
 
     // Move best block pointer to previous block.
     view.SetBestBlock(block.hashPrevBlock);
 
     return fClean ? DisconnectResult::OK : DisconnectResult::UNCLEAN;
 }
 
 static void FlushBlockFile(bool fFinalize = false) {
     LOCK(cs_LastBlockFile);
 
     FlatFilePos block_pos_old(nLastBlockFile,
                               vinfoBlockFile[nLastBlockFile].nSize);
     FlatFilePos undo_pos_old(nLastBlockFile,
                              vinfoBlockFile[nLastBlockFile].nUndoSize);
 
     bool status = true;
     status &= BlockFileSeq().Flush(block_pos_old, fFinalize);
     status &= UndoFileSeq().Flush(undo_pos_old, fFinalize);
     if (!status) {
         AbortNode("Flushing block file to disk failed. This is likely the "
                   "result of an I/O error.");
     }
 }
 
 static bool FindUndoPos(BlockValidationState &state, int nFile,
                         FlatFilePos &pos, unsigned int nAddSize);
 
 static bool WriteUndoDataForBlock(const CBlockUndo &blockundo,
                                   BlockValidationState &state,
                                   CBlockIndex *pindex,
                                   const CChainParams &chainparams) {
     // Write undo information to disk
     if (pindex->GetUndoPos().IsNull()) {
         FlatFilePos _pos;
         if (!FindUndoPos(state, pindex->nFile, _pos,
                          ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) {
             return error("ConnectBlock(): FindUndoPos failed");
         }
         if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(),
                              chainparams.DiskMagic())) {
             return AbortNode(state, "Failed to write undo data");
         }
 
         // update nUndoPos in block index
         pindex->nUndoPos = _pos.nPos;
         pindex->nStatus = pindex->nStatus.withUndo();
         setDirtyBlockIndex.insert(pindex);
     }
 
     return true;
 }
 
 static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
 
 void ThreadScriptCheck(int worker_num) {
     util::ThreadRename(strprintf("scriptch.%i", worker_num));
     scriptcheckqueue.Thread();
 }
 
 VersionBitsCache versionbitscache GUARDED_BY(cs_main);
 
 int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev,
                             const Consensus::Params &params) {
     LOCK(cs_main);
     int32_t nVersion = VERSIONBITS_TOP_BITS;
 
     for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
         ThresholdState state = VersionBitsState(
             pindexPrev, params, static_cast<Consensus::DeploymentPos>(i),
             versionbitscache);
         if (state == ThresholdState::LOCKED_IN ||
             state == ThresholdState::STARTED) {
             nVersion |= VersionBitsMask(
                 params, static_cast<Consensus::DeploymentPos>(i));
         }
     }
 
     // Clear the last 4 bits (miner fund activation).
     return nVersion & ~uint32_t(0x0f);
 }
 
 // Returns the script flags which should be checked for the block after
 // the given block.
 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
                                         const CBlockIndex *pindex) {
     uint32_t flags = SCRIPT_VERIFY_NONE;
 
     // Start enforcing P2SH (BIP16)
     if ((pindex->nHeight + 1) >= params.BIP16Height) {
         flags |= SCRIPT_VERIFY_P2SH;
     }
 
     // Start enforcing the DERSIG (BIP66) rule.
     if ((pindex->nHeight + 1) >= params.BIP66Height) {
         flags |= SCRIPT_VERIFY_DERSIG;
     }
 
     // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
     if ((pindex->nHeight + 1) >= params.BIP65Height) {
         flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
     }
 
     // Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
     if ((pindex->nHeight + 1) >= params.CSVHeight) {
         flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
     }
 
     // If the UAHF is enabled, we start accepting replay protected txns
     if (IsUAHFenabled(params, pindex)) {
         flags |= SCRIPT_VERIFY_STRICTENC;
         flags |= SCRIPT_ENABLE_SIGHASH_FORKID;
     }
 
     // If the DAA HF is enabled, we start rejecting transaction that use a high
     // s in their signature. We also make sure that signature that are supposed
     // to fail (for instance in multisig or other forms of smart contracts) are
     // null.
     if (IsDAAEnabled(params, pindex)) {
         flags |= SCRIPT_VERIFY_LOW_S;
         flags |= SCRIPT_VERIFY_NULLFAIL;
     }
 
     // When the magnetic anomaly fork is enabled, we start accepting
     // transactions using the OP_CHECKDATASIG opcode and it's verify
     // alternative. We also start enforcing push only signatures and
     // clean stack.
     if (IsMagneticAnomalyEnabled(params, pindex)) {
         flags |= SCRIPT_VERIFY_CHECKDATASIG_SIGOPS;
         flags |= SCRIPT_VERIFY_SIGPUSHONLY;
         flags |= SCRIPT_VERIFY_CLEANSTACK;
     }
 
     if (IsGravitonEnabled(params, pindex)) {
         flags |= SCRIPT_ENABLE_SCHNORR_MULTISIG;
         flags |= SCRIPT_VERIFY_MINIMALDATA;
     }
 
     if (IsPhononEnabled(params, pindex)) {
         flags |= SCRIPT_ENFORCE_SIGCHECKS;
     }
 
     // We make sure this node will have replay protection during the next hard
     // fork.
     if (IsReplayProtectionEnabled(params, pindex)) {
         flags |= SCRIPT_ENABLE_REPLAY_PROTECTION;
     }
 
     return flags;
 }
 
 static int64_t nTimeCheck = 0;
 static int64_t nTimeForks = 0;
 static int64_t nTimeVerify = 0;
 static int64_t nTimeConnect = 0;
 static int64_t nTimeIndex = 0;
 static int64_t nTimeCallbacks = 0;
 static int64_t nTimeTotal = 0;
 static int64_t nBlocksTotal = 0;
 
 /**
  * Apply the effects of this block (with given index) on the UTXO set
  * represented by coins. Validity checks that depend on the UTXO set are also
  * done; ConnectBlock() can fail if those validity checks fail (among other
  * reasons).
  */
 bool CChainState::ConnectBlock(const CBlock &block, BlockValidationState &state,
                                CBlockIndex *pindex, CCoinsViewCache &view,
                                const CChainParams &params,
                                BlockValidationOptions options,
                                bool fJustCheck) {
     AssertLockHeld(cs_main);
     assert(pindex);
     assert(*pindex->phashBlock == block.GetHash());
     int64_t nTimeStart = GetTimeMicros();
 
     const Consensus::Params &consensusParams = params.GetConsensus();
 
     // Check it again in case a previous version let a bad block in
     // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
     // ContextualCheckBlockHeader() here. This means that if we add a new
     // consensus rule that is enforced in one of those two functions, then we
     // may have let in a block that violates the rule prior to updating the
     // software, and we would NOT be enforcing the rule here. Fully solving
     // upgrade from one software version to the next after a consensus rule
     // change is potentially tricky and issue-specific.
     // Also, currently the rule against blocks more than 2 hours in the future
     // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
     // re-enforce that rule here (at least until we make it impossible for
     // GetAdjustedTime() to go backward).
     if (!CheckBlock(block, state, consensusParams,
                     options.withCheckPoW(!fJustCheck)
                         .withCheckMerkleRoot(!fJustCheck))) {
         if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
             // We don't write down blocks to disk if they may have been
             // corrupted, so this should be impossible unless we're having
             // hardware problems.
             return AbortNode(state, "Corrupt block found indicating potential "
                                     "hardware failure; shutting down");
         }
         return error("%s: Consensus::CheckBlock: %s", __func__,
                      FormatStateMessage(state));
     }
 
     // Verify that the view's current state corresponds to the previous block
     BlockHash hashPrevBlock =
         pindex->pprev == nullptr ? BlockHash() : pindex->pprev->GetBlockHash();
     assert(hashPrevBlock == view.GetBestBlock());
 
     // Special case for the genesis block, skipping connection of its
     // transactions (its coinbase is unspendable)
     if (block.GetHash() == consensusParams.hashGenesisBlock) {
         if (!fJustCheck) {
             view.SetBestBlock(pindex->GetBlockHash());
         }
 
         return true;
     }
 
     nBlocksTotal++;
 
     bool fScriptChecks = true;
     if (!hashAssumeValid.IsNull()) {
         // We've been configured with the hash of a block which has been
         // externally verified to have a valid history. A suitable default value
         // is included with the software and updated from time to time. Because
         // validity relative to a piece of software is an objective fact these
         // defaults can be easily reviewed. This setting doesn't force the
         // selection of any particular chain but makes validating some faster by
         // effectively caching the result of part of the verification.
         BlockMap::const_iterator it =
             m_blockman.m_block_index.find(hashAssumeValid);
         if (it != m_blockman.m_block_index.end()) {
             if (it->second->GetAncestor(pindex->nHeight) == pindex &&
                 pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
                 pindexBestHeader->nChainWork >= nMinimumChainWork) {
                 // This block is a member of the assumed verified chain and an
                 // ancestor of the best header.
                 // Script verification is skipped when connecting blocks under
                 // the assumevalid block. Assuming the assumevalid block is
                 // valid this is safe because block merkle hashes are still
                 // computed and checked, Of course, if an assumed valid block is
                 // invalid due to false scriptSigs this optimization would allow
                 // an invalid chain to be accepted.
                 // The equivalent time check discourages hash power from
                 // extorting the network via DOS attack into accepting an
                 // invalid block through telling users they must manually set
                 // assumevalid. Requiring a software change or burying the
                 // invalid block, regardless of the setting, makes it hard to
                 // hide the implication of the demand. This also avoids having
                 // release candidates that are hardly doing any signature
                 // verification at all in testing without having to artificially
                 // set the default assumed verified block further back. The test
                 // against nMinimumChainWork prevents the skipping when denied
                 // access to any chain at least as good as the expected chain.
                 fScriptChecks =
                     (GetBlockProofEquivalentTime(
                          *pindexBestHeader, *pindex, *pindexBestHeader,
                          consensusParams) <= 60 * 60 * 24 * 7 * 2);
             }
         }
     }
 
     int64_t nTime1 = GetTimeMicros();
     nTimeCheck += nTime1 - nTimeStart;
     LogPrint(BCLog::BENCH, "    - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
              MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO,
              nTimeCheck * MILLI / nBlocksTotal);
 
     // Do not allow blocks that contain transactions which 'overwrite' older
     // transactions, unless those are already completely spent. If such
     // overwrites are allowed, coinbases and transactions depending upon those
     // can be duplicated to remove the ability to spend the first instance --
     // even after being sent to another address. See BIP30 and
     // http://r6.ca/blog/20120206T005236Z.html for more information. This logic
     // is not necessary for memory pool transactions, as AcceptToMemoryPool
     // already refuses previously-known transaction ids entirely. This rule was
     // originally applied to all blocks with a timestamp after March 15, 2012,
     // 0:00 UTC. Now that the whole chain is irreversibly beyond that time it is
     // applied to all blocks except the two in the chain that violate it. This
     // prevents exploiting the issue against nodes during their initial block
     // download.
     bool fEnforceBIP30 = !((pindex->nHeight == 91842 &&
                             pindex->GetBlockHash() ==
                                 uint256S("0x00000000000a4d0a398161ffc163c503763"
                                          "b1f4360639393e0e4c8e300e0caec")) ||
                            (pindex->nHeight == 91880 &&
                             pindex->GetBlockHash() ==
                                 uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
                                          "610ae9601ac046a38084ccb7cd721")));
 
     // Once BIP34 activated it was not possible to create new duplicate
     // coinbases and thus other than starting with the 2 existing duplicate
     // coinbase pairs, not possible to create overwriting txs. But by the time
     // BIP34 activated, in each of the existing pairs the duplicate coinbase had
     // overwritten the first before the first had been spent. Since those
     // coinbases are sufficiently buried it's no longer possible to create
     // further duplicate transactions descending from the known pairs either. If
     // we're on the known chain at height greater than where BIP34 activated, we
     // can save the db accesses needed for the BIP30 check.
     assert(pindex->pprev);
     CBlockIndex *pindexBIP34height =
         pindex->pprev->GetAncestor(consensusParams.BIP34Height);
     // Only continue to enforce if we're below BIP34 activation height or the
     // block hash at that height doesn't correspond.
     fEnforceBIP30 =
         fEnforceBIP30 &&
         (!pindexBIP34height ||
          !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
 
     if (fEnforceBIP30) {
         for (const auto &tx : block.vtx) {
             for (size_t o = 0; o < tx->vout.size(); o++) {
                 if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
                     LogPrintf("ERROR: ConnectBlock(): tried to overwrite "
                               "transaction\n");
                     return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                                          REJECT_INVALID, "bad-txns-BIP30");
                 }
             }
         }
     }
 
     // Start enforcing BIP68 (sequence locks).
     int nLockTimeFlags = 0;
     if (pindex->nHeight >= consensusParams.CSVHeight) {
         nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
     }
 
     const uint32_t flags =
         GetNextBlockScriptFlags(consensusParams, pindex->pprev);
 
     int64_t nTime2 = GetTimeMicros();
     nTimeForks += nTime2 - nTime1;
     LogPrint(BCLog::BENCH, "    - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
              MILLI * (nTime2 - nTime1), nTimeForks * MICRO,
              nTimeForks * MILLI / nBlocksTotal);
 
     std::vector<int> prevheights;
     Amount nFees = Amount::zero();
     int nInputs = 0;
 
     // Limit the total executed signature operations in the block, a consensus
     // rule. Tracking during the CPU-consuming part (validation of uncached
     // inputs) is per-input atomic and validation in each thread stops very
     // quickly after the limit is exceeded, so an adversary cannot cause us to
     // exceed the limit by much at all.
     CheckInputsLimiter nSigChecksBlockLimiter(
         GetMaxBlockSigChecksCount(options.getExcessiveBlockSize()));
 
     std::vector<TxSigCheckLimiter> nSigChecksTxLimiters;
     nSigChecksTxLimiters.resize(block.vtx.size() - 1);
 
     CBlockUndo blockundo;
     blockundo.vtxundo.resize(block.vtx.size() - 1);
 
     CCheckQueueControl<CScriptCheck> control(fScriptChecks ? &scriptcheckqueue
                                                            : nullptr);
 
     // Add all outputs
     try {
         for (const auto &ptx : block.vtx) {
             AddCoins(view, *ptx, pindex->nHeight);
         }
     } catch (const std::logic_error &e) {
         // This error will be thrown from AddCoin if we try to connect a block
         // containing duplicate transactions. Such a thing should normally be
         // caught early nowadays (due to ContextualCheckBlock's CTOR
         // enforcement) however some edge cases can escape that:
         // - ContextualCheckBlock does not get re-run after saving the block to
         // disk, and older versions may have saved a weird block.
         // - its checks are not applied to pre-CTOR chains, which we might visit
         // with checkpointing off.
         LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                              REJECT_INVALID, "tx-duplicate");
     }
 
     size_t txIndex = 0;
     for (const auto &ptx : block.vtx) {
         const CTransaction &tx = *ptx;
         const bool isCoinBase = tx.IsCoinBase();
         nInputs += tx.vin.size();
 
         {
             Amount txfee = Amount::zero();
             TxValidationState tx_state;
             if (!isCoinBase &&
                 !Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight,
                                           txfee)) {
                 // Any transaction validation failure in ConnectBlock is a block
                 // consensus failure.
                 state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                               tx_state.GetRejectCode(),
                               tx_state.GetRejectReason(),
                               tx_state.GetDebugMessage());
 
                 return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
                              tx.GetId().ToString(), FormatStateMessage(state));
             }
             nFees += txfee;
         }
 
         if (!MoneyRange(nFees)) {
             LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n",
                       __func__);
             return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                                  REJECT_INVALID,
                                  "bad-txns-accumulated-fee-outofrange");
         }
 
         // The following checks do not apply to the coinbase.
         if (isCoinBase) {
             continue;
         }
 
         // Check that transaction is BIP68 final BIP68 lock checks (as
         // opposed to nLockTime checks) must be in ConnectBlock because they
         // require the UTXO set.
         prevheights.resize(tx.vin.size());
         for (size_t j = 0; j < tx.vin.size(); j++) {
             prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
         }
 
         if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
             LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n",
                       __func__);
             return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                                  REJECT_INVALID, "bad-txns-nonfinal");
         }
 
         // Don't cache results if we're actually connecting blocks (still
         // consult the cache, though).
         bool fCacheResults = fJustCheck;
 
         const bool fEnforceSigCheck = flags & SCRIPT_ENFORCE_SIGCHECKS;
         if (!fEnforceSigCheck) {
             // Historically, there has been transactions with a very high
             // sigcheck count, so we need to disable this check for such
             // transactions.
             nSigChecksTxLimiters[txIndex] = TxSigCheckLimiter::getDisabled();
         }
 
         std::vector<CScriptCheck> vChecks;
         // nSigChecksRet may be accurate (found in cache) or 0 (checks were
         // deferred into vChecks).
         int nSigChecksRet;
         TxValidationState tx_state;
         if (fScriptChecks &&
             !CheckInputs(tx, tx_state, view, flags, fCacheResults,
                          fCacheResults, PrecomputedTransactionData(tx),
                          nSigChecksRet, nSigChecksTxLimiters[txIndex],
                          &nSigChecksBlockLimiter, &vChecks)) {
             // Any transaction validation failure in ConnectBlock is a block
             // consensus failure
             state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                           tx_state.GetRejectCode(), tx_state.GetRejectReason(),
                           tx_state.GetDebugMessage());
             return error("ConnectBlock(): CheckInputs on %s failed with %s",
                          tx.GetId().ToString(), FormatStateMessage(state));
         }
 
         control.Add(vChecks);
 
         // Note: this must execute in the same iteration as CheckTxInputs (not
         // in a separate loop) in order to detect double spends. However,
         // this does not prevent double-spending by duplicated transaction
         // inputs in the same transaction (cf. CVE-2018-17144) -- that check is
         // done in CheckBlock (CheckRegularTransaction).
         SpendCoins(view, tx, blockundo.vtxundo.at(txIndex), pindex->nHeight);
         txIndex++;
     }
 
     int64_t nTime3 = GetTimeMicros();
     nTimeConnect += nTime3 - nTime2;
     LogPrint(BCLog::BENCH,
              "      - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) "
              "[%.2fs (%.2fms/blk)]\n",
              (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2),
              MILLI * (nTime3 - nTime2) / block.vtx.size(),
              nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs - 1),
              nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
 
     Amount blockReward =
         nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
     if (block.vtx[0]->GetValueOut() > blockReward) {
         LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs "
                   "limit=%d)\n",
                   block.vtx[0]->GetValueOut(), blockReward);
         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                              REJECT_INVALID, "bad-cb-amount");
     }
 
     const std::vector<CTxDestination> whitelist =
         GetMinerFundWhitelist(consensusParams, pindex->pprev);
     if (!whitelist.empty()) {
         const Amount required = GetMinerFundAmount(blockReward);
 
         for (auto &o : block.vtx[0]->vout) {
             if (o.nValue < required) {
                 // This output doesn't qualify because its amount is too low.
                 continue;
             }
 
             CTxDestination address;
             if (!ExtractDestination(o.scriptPubKey, address)) {
                 // Cannot decode address.
                 continue;
             }
 
             if (std::find(whitelist.begin(), whitelist.end(), address) !=
                 whitelist.end()) {
                 goto MinerFundSuccess;
             }
         }
 
         // We did not find an output that match the miner fund requirements.
         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                              REJECT_INVALID, "bad-cb-minerfund");
     }
 
 MinerFundSuccess:
 
     if (!control.Wait()) {
         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                              REJECT_INVALID, "blk-bad-inputs",
                              "parallel script check failed");
     }
 
     int64_t nTime4 = GetTimeMicros();
     nTimeVerify += nTime4 - nTime2;
     LogPrint(
         BCLog::BENCH,
         "    - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n",
         nInputs - 1, MILLI * (nTime4 - nTime2),
         nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs - 1),
         nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
 
     if (fJustCheck) {
         return true;
     }
 
     if (!WriteUndoDataForBlock(blockundo, state, pindex, params)) {
         return false;
     }
 
     if (!pindex->IsValid(BlockValidity::SCRIPTS)) {
         pindex->RaiseValidity(BlockValidity::SCRIPTS);
         setDirtyBlockIndex.insert(pindex);
     }
 
     assert(pindex->phashBlock);
     // add this block to the view's block chain
     view.SetBestBlock(pindex->GetBlockHash());
 
     int64_t nTime5 = GetTimeMicros();
     nTimeIndex += nTime5 - nTime4;
     LogPrint(BCLog::BENCH, "    - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
              MILLI * (nTime5 - nTime4), nTimeIndex * MICRO,
              nTimeIndex * MILLI / nBlocksTotal);
 
     int64_t nTime6 = GetTimeMicros();
     nTimeCallbacks += nTime6 - nTime5;
     LogPrint(BCLog::BENCH, "    - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n",
              MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO,
              nTimeCallbacks * MILLI / nBlocksTotal);
 
     return true;
 }
 
 bool CChainState::FlushStateToDisk(const CChainParams &chainparams,
                                    BlockValidationState &state,
                                    FlushStateMode mode,
                                    int nManualPruneHeight) {
     int64_t nMempoolUsage = g_mempool.DynamicMemoryUsage();
     LOCK(cs_main);
     static int64_t nLastWrite = 0;
     static int64_t nLastFlush = 0;
     std::set<int> setFilesToPrune;
     bool full_flush_completed = false;
     try {
         {
             bool fFlushForPrune = false;
             bool fDoFullFlush = false;
             LOCK(cs_LastBlockFile);
             if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) &&
                 !fReindex) {
                 if (nManualPruneHeight > 0) {
                     FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight);
                 } else {
                     FindFilesToPrune(setFilesToPrune,
                                      chainparams.PruneAfterHeight());
                     fCheckForPruning = false;
                 }
                 if (!setFilesToPrune.empty()) {
                     fFlushForPrune = true;
                     if (!fHavePruned) {
                         pblocktree->WriteFlag("prunedblockfiles", true);
                         fHavePruned = true;
                     }
                 }
             }
             int64_t nNow = GetTimeMicros();
             // Avoid writing/flushing immediately after startup.
             if (nLastWrite == 0) {
                 nLastWrite = nNow;
             }
             if (nLastFlush == 0) {
                 nLastFlush = nNow;
             }
             int64_t nMempoolSizeMax =
                 gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
             int64_t cacheSize = pcoinsTip->DynamicMemoryUsage();
             int64_t nTotalSpace =
                 nCoinCacheUsage +
                 std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
             // The cache is large and we're within 10% and 10 MiB of the limit,
             // but we have time now (not in the middle of a block processing).
             bool fCacheLarge =
                 mode == FlushStateMode::PERIODIC &&
                 cacheSize > std::max((9 * nTotalSpace) / 10,
                                      nTotalSpace -
                                          MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
             // The cache is over the limit, we have to write now.
             bool fCacheCritical =
                 mode == FlushStateMode::IF_NEEDED && cacheSize > nTotalSpace;
             // It's been a while since we wrote the block index to disk. Do this
             // frequently, so we don't need to redownload after a crash.
             bool fPeriodicWrite =
                 mode == FlushStateMode::PERIODIC &&
                 nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
             // It's been very long since we flushed the cache. Do this
             // infrequently, to optimize cache usage.
             bool fPeriodicFlush =
                 mode == FlushStateMode::PERIODIC &&
                 nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
             // Combine all conditions that result in a full cache flush.
             fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge ||
                            fCacheCritical || fPeriodicFlush || fFlushForPrune;
             // Write blocks and block index to disk.
             if (fDoFullFlush || fPeriodicWrite) {
                 // Depend on nMinDiskSpace to ensure we can write block index
                 if (!CheckDiskSpace(GetBlocksDir())) {
                     return AbortNode(
                         state, "Disk space is too low!",
                         _("Error: Disk space is too low!").translated,
                         CClientUIInterface::MSG_NOPREFIX);
                 }
 
                 // First make sure all block and undo data is flushed to disk.
                 FlushBlockFile();
                 // Then update all block file information (which may refer to
                 // block and undo files).
                 {
                     std::vector<std::pair<int, const CBlockFileInfo *>> vFiles;
                     vFiles.reserve(setDirtyFileInfo.size());
                     for (int i : setDirtyFileInfo) {
                         vFiles.push_back(std::make_pair(i, &vinfoBlockFile[i]));
                     }
 
                     setDirtyFileInfo.clear();
 
                     std::vector<const CBlockIndex *> vBlocks;
                     vBlocks.reserve(setDirtyBlockIndex.size());
                     for (const CBlockIndex *cbi : setDirtyBlockIndex) {
                         vBlocks.push_back(cbi);
                     }
 
                     setDirtyBlockIndex.clear();
 
                     if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile,
                                                     vBlocks)) {
                         return AbortNode(
                             state, "Failed to write to block index database");
                     }
                 }
 
                 // Finally remove any pruned files
                 if (fFlushForPrune) {
                     UnlinkPrunedFiles(setFilesToPrune);
                 }
                 nLastWrite = nNow;
             }
             // Flush best chain related state. This can only be done if the
             // blocks / block index write was also done.
             if (fDoFullFlush && !pcoinsTip->GetBestBlock().IsNull()) {
                 // Typical Coin structures on disk are around 48 bytes in size.
                 // Pushing a new one to the database can cause it to be written
                 // twice (once in the log, and once in the tables). This is
                 // already an overestimation, as most will delete an existing
                 // entry or overwrite one. Still, use a conservative safety
                 // factor of 2.
                 if (!CheckDiskSpace(GetDataDir(),
                                     48 * 2 * 2 * pcoinsTip->GetCacheSize())) {
                     return AbortNode(
                         state, "Disk space is too low!",
                         _("Error: Disk space is too low!").translated,
                         CClientUIInterface::MSG_NOPREFIX);
                 }
 
                 // Flush the chainstate (which may refer to block index
                 // entries).
                 if (!pcoinsTip->Flush()) {
                     return AbortNode(state, "Failed to write to coin database");
                 }
                 nLastFlush = nNow;
                 full_flush_completed = true;
             }
         }
 
         if (full_flush_completed) {
             // Update best block in wallet (so we can detect restored wallets).
             GetMainSignals().ChainStateFlushed(m_chain.GetLocator());
         }
     } catch (const std::runtime_error &e) {
         return AbortNode(state, std::string("System error while flushing: ") +
                                     e.what());
     }
     return true;
 }
 
 void CChainState::ForceFlushStateToDisk() {
     BlockValidationState state;
     const CChainParams &chainparams = Params();
     if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
         LogPrintf("%s: failed to flush state (%s)\n", __func__,
                   FormatStateMessage(state));
     }
 }
 
 void CChainState::PruneAndFlush() {
     BlockValidationState state;
     fCheckForPruning = true;
     const CChainParams &chainparams = Params();
     if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
         LogPrintf("%s: failed to flush state (%s)\n", __func__,
                   FormatStateMessage(state));
     }
 }
 
 /** Check warning conditions and do some notifications on new chain tip set. */
 static void UpdateTip(const CChainParams &params, CBlockIndex *pindexNew) {
     // New best block
     g_mempool.AddTransactionsUpdated(1);
 
     {
         LOCK(g_best_block_mutex);
         g_best_block = pindexNew->GetBlockHash();
         g_best_block_cv.notify_all();
     }
 
     LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%ld "
               "date='%s' progress=%f cache=%.1fMiB(%utxo)\n",
               __func__, pindexNew->GetBlockHash().ToString(),
               pindexNew->nHeight, pindexNew->nVersion,
               log(pindexNew->nChainWork.getdouble()) / log(2.0),
               pindexNew->GetChainTxCount(),
               FormatISO8601DateTime(pindexNew->GetBlockTime()),
               GuessVerificationProgress(params.TxData(), pindexNew),
               pcoinsTip->DynamicMemoryUsage() * (1.0 / (1 << 20)),
               pcoinsTip->GetCacheSize());
 }
 
 /**
  * Disconnect m_chain's tip.
  * After calling, the mempool will be in an inconsistent state, with
  * transactions from disconnected blocks being added to disconnectpool. You
  * should make the mempool consistent again by calling updateMempoolForReorg.
  * with cs_main held.
  *
  * If disconnectpool is nullptr, then no disconnected transactions are added to
  * disconnectpool (note that the caller is responsible for mempool consistency
  * in any case).
  */
 bool CChainState::DisconnectTip(const CChainParams &params,
                                 BlockValidationState &state,
                                 DisconnectedBlockTransactions *disconnectpool) {
     AssertLockHeld(cs_main);
     CBlockIndex *pindexDelete = m_chain.Tip();
     const Consensus::Params &consensusParams = params.GetConsensus();
 
     assert(pindexDelete);
 
     // Read block from disk.
     std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
     CBlock &block = *pblock;
     if (!ReadBlockFromDisk(block, pindexDelete, consensusParams)) {
         return error("DisconnectTip(): Failed to read block");
     }
 
     // Apply the block atomically to the chain state.
     int64_t nStart = GetTimeMicros();
     {
         CCoinsViewCache view(pcoinsTip.get());
         assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
         if (DisconnectBlock(block, pindexDelete, view) !=
             DisconnectResult::OK) {
             return error("DisconnectTip(): DisconnectBlock %s failed",
                          pindexDelete->GetBlockHash().ToString());
         }
 
         bool flushed = view.Flush();
         assert(flushed);
     }
 
     LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
              (GetTimeMicros() - nStart) * MILLI);
 
     // Write the chain state to disk, if necessary.
     if (!FlushStateToDisk(params, state, FlushStateMode::IF_NEEDED)) {
         return false;
     }
 
     // If this block is deactivating a fork, we move all mempool transactions
     // in front of disconnectpool for reprocessing in a future
     // updateMempoolForReorg call
     if (pindexDelete->pprev != nullptr &&
         GetNextBlockScriptFlags(consensusParams, pindexDelete) !=
             GetNextBlockScriptFlags(consensusParams, pindexDelete->pprev)) {
         LogPrint(BCLog::MEMPOOL,
                  "Disconnecting mempool due to rewind of upgrade block\n");
         if (disconnectpool) {
             disconnectpool->importMempool(g_mempool);
         }
         g_mempool.clear();
     }
 
     if (disconnectpool) {
         disconnectpool->addForBlock(block.vtx, g_mempool);
     }
 
     // If the tip is finalized, then undo it.
     if (m_finalizedBlockIndex == pindexDelete) {
         m_finalizedBlockIndex = pindexDelete->pprev;
     }
 
     m_chain.SetTip(pindexDelete->pprev);
 
     // Update ::ChainActive() and related variables.
     UpdateTip(params, pindexDelete->pprev);
     // Let wallets know transactions went from 1-confirmed to
     // 0-confirmed or conflicted:
     GetMainSignals().BlockDisconnected(pblock, pindexDelete);
     return true;
 }
 
 static int64_t nTimeReadFromDisk = 0;
 static int64_t nTimeConnectTotal = 0;
 static int64_t nTimeFlush = 0;
 static int64_t nTimeChainState = 0;
 static int64_t nTimePostConnect = 0;
 
 struct PerBlockConnectTrace {
     CBlockIndex *pindex = nullptr;
     std::shared_ptr<const CBlock> pblock;
     std::shared_ptr<std::vector<CTransactionRef>> conflictedTxs;
     PerBlockConnectTrace()
         : conflictedTxs(std::make_shared<std::vector<CTransactionRef>>()) {}
 };
 
 /**
  * Used to track blocks whose transactions were applied to the UTXO state as a
  * part of a single ActivateBestChainStep call.
  *
  * This class also tracks transactions that are removed from the mempool as
  * conflicts (per block) and can be used to pass all those transactions through
  * SyncTransaction.
  *
  * This class assumes (and asserts) that the conflicted transactions for a given
  * block are added via mempool callbacks prior to the BlockConnected()
  * associated with those transactions. If any transactions are marked
  * conflicted, it is assumed that an associated block will always be added.
  *
  * This class is single-use, once you call GetBlocksConnected() you have to
  * throw it away and make a new one.
  */
 class ConnectTrace {
 private:
     std::vector<PerBlockConnectTrace> blocksConnected;
     CTxMemPool &pool;
     boost::signals2::scoped_connection m_connNotifyEntryRemoved;
 
 public:
     explicit ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) {
         m_connNotifyEntryRemoved = pool.NotifyEntryRemoved.connect(
             std::bind(&ConnectTrace::NotifyEntryRemoved, this,
                       std::placeholders::_1, std::placeholders::_2));
     }
 
     void BlockConnected(CBlockIndex *pindex,
                         std::shared_ptr<const CBlock> pblock) {
         assert(!blocksConnected.back().pindex);
         assert(pindex);
         assert(pblock);
         blocksConnected.back().pindex = pindex;
         blocksConnected.back().pblock = std::move(pblock);
         blocksConnected.emplace_back();
     }
 
     std::vector<PerBlockConnectTrace> &GetBlocksConnected() {
         // We always keep one extra block at the end of our list because blocks
         // are added after all the conflicted transactions have been filled in.
         // Thus, the last entry should always be an empty one waiting for the
         // transactions from the next block. We pop the last entry here to make
         // sure the list we return is sane.
         assert(!blocksConnected.back().pindex);
         assert(blocksConnected.back().conflictedTxs->empty());
         blocksConnected.pop_back();
         return blocksConnected;
     }
 
     void NotifyEntryRemoved(CTransactionRef txRemoved,
                             MemPoolRemovalReason reason) {
         assert(!blocksConnected.back().pindex);
         if (reason == MemPoolRemovalReason::CONFLICT) {
             blocksConnected.back().conflictedTxs->emplace_back(
                 std::move(txRemoved));
         }
     }
 };
 
 bool CChainState::MarkBlockAsFinal(const Config &config,
                                    BlockValidationState &state,
                                    const CBlockIndex *pindex) {
     AssertLockHeld(cs_main);
     if (pindex->nStatus.isInvalid()) {
         // We try to finalize an invalid block.
         LogPrintf("ERROR: %s: Trying to finalize invalid block %s\n", __func__,
                   pindex->GetBlockHash().ToString());
         return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID,
                              REJECT_INVALID, "finalize-invalid-block");
     }
 
     // Check that the request is consistent with current finalization.
     if (m_finalizedBlockIndex &&
         !AreOnTheSameFork(pindex, m_finalizedBlockIndex)) {
         LogPrintf("ERROR: %s: Trying to finalize block %s which conflicts with "
                   "already finalized block\n",
                   __func__, pindex->GetBlockHash().ToString());
         return state.Invalid(BlockValidationResult::BLOCK_FINALIZATION,
                              REJECT_AGAINST_FINALIZED,
                              "bad-fork-prior-finalized");
     }
 
     if (IsBlockFinalized(pindex)) {
         // The block is already finalized.
         return true;
     }
 
     // We have a new block to finalize.
     m_finalizedBlockIndex = pindex;
     return true;
 }
 
 static const CBlockIndex *FindBlockToFinalize(const Config &config,
                                               CBlockIndex *pindexNew)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
 
     const int32_t maxreorgdepth =
         gArgs.GetArg("-maxreorgdepth", DEFAULT_MAX_REORG_DEPTH);
 
     const int64_t finalizationdelay =
         gArgs.GetArg("-finalizationdelay", DEFAULT_MIN_FINALIZATION_DELAY);
 
     // Find our candidate.
     // If maxreorgdepth is < 0 pindex will be null and auto finalization
     // disabled
     const CBlockIndex *pindex =
         pindexNew->GetAncestor(pindexNew->nHeight - maxreorgdepth);
 
     int64_t now = GetTime();
 
     // If the finalization delay is not expired since the startup time,
     // finalization should be avoided. Header receive time is not saved to disk
     // and so cannot be anterior to startup time.
     if (now < (GetStartupTime() + finalizationdelay)) {
         return nullptr;
     }
 
     // While our candidate is not eligible (finalization delay not expired), try
     // the previous one.
     while (pindex && (pindex != ::ChainstateActive().GetFinalizedBlock())) {
         // Check that the block to finalize is known for a long enough time.
         // This test will ensure that an attacker could not cause a block to
         // finalize by forking the chain with a depth > maxreorgdepth.
         // If the block is loaded from disk, header receive time is 0 and the
         // block will be finalized. This is safe because the delay since the
         // node startup is already expired.
         auto headerReceivedTime = pindex->GetHeaderReceivedTime();
 
         // If finalization delay is <= 0, finalization always occurs immediately
         if (now >= (headerReceivedTime + finalizationdelay)) {
             return pindex;
         }
 
         pindex = pindex->pprev;
     }
 
     return nullptr;
 }
 
 /**
  * Connect a new block to m_chain. pblock is either nullptr or a pointer to
  * a CBlock corresponding to pindexNew, to bypass loading it again from disk.
  *
  * The block is always added to connectTrace (either after loading from disk or
  * by copying pblock) - if that is not intended, care must be taken to remove
  * the last entry in blocksConnected in case of failure.
  */
 bool CChainState::ConnectTip(const Config &config, BlockValidationState &state,
                              CBlockIndex *pindexNew,
                              const std::shared_ptr<const CBlock> &pblock,
                              ConnectTrace &connectTrace,
                              DisconnectedBlockTransactions &disconnectpool) {
     AssertLockHeld(cs_main);
     AssertLockHeld(g_mempool.cs);
 
     const CChainParams &params = config.GetChainParams();
     const Consensus::Params &consensusParams = params.GetConsensus();
 
     assert(pindexNew->pprev == m_chain.Tip());
     // Read block from disk.
     int64_t nTime1 = GetTimeMicros();
     std::shared_ptr<const CBlock> pthisBlock;
     if (!pblock) {
         std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
         if (!ReadBlockFromDisk(*pblockNew, pindexNew, consensusParams)) {
             return AbortNode(state, "Failed to read block");
         }
         pthisBlock = pblockNew;
     } else {
         pthisBlock = pblock;
     }
 
     const CBlock &blockConnecting = *pthisBlock;
 
     // Apply the block atomically to the chain state.
     int64_t nTime2 = GetTimeMicros();
     nTimeReadFromDisk += nTime2 - nTime1;
     int64_t nTime3;
     LogPrint(BCLog::BENCH, "  - Load block from disk: %.2fms [%.2fs]\n",
              (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
     {
         CCoinsViewCache view(pcoinsTip.get());
         bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, params,
                                BlockValidationOptions(config));
         GetMainSignals().BlockChecked(blockConnecting, state);
         if (!rv) {
             if (state.IsInvalid()) {
                 InvalidBlockFound(pindexNew, state);
             }
 
             return error("%s: ConnectBlock %s failed, %s", __func__,
                          pindexNew->GetBlockHash().ToString(),
                          FormatStateMessage(state));
         }
 
         // Update the finalized block.
         const CBlockIndex *pindexToFinalize =
             FindBlockToFinalize(config, pindexNew);
         if (pindexToFinalize &&
             !MarkBlockAsFinal(config, state, pindexToFinalize)) {
             return error("ConnectTip(): MarkBlockAsFinal %s failed (%s)",
                          pindexNew->GetBlockHash().ToString(),
                          FormatStateMessage(state));
         }
 
         nTime3 = GetTimeMicros();
         nTimeConnectTotal += nTime3 - nTime2;
         LogPrint(BCLog::BENCH,
                  "  - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
                  (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO,
                  nTimeConnectTotal * MILLI / nBlocksTotal);
         bool flushed = view.Flush();
         assert(flushed);
     }
 
     int64_t nTime4 = GetTimeMicros();
     nTimeFlush += nTime4 - nTime3;
     LogPrint(BCLog::BENCH, "  - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
              (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO,
              nTimeFlush * MILLI / nBlocksTotal);
 
     // Write the chain state to disk, if necessary.
     if (!FlushStateToDisk(config.GetChainParams(), state,
                           FlushStateMode::IF_NEEDED)) {
         return false;
     }
 
     int64_t nTime5 = GetTimeMicros();
     nTimeChainState += nTime5 - nTime4;
     LogPrint(BCLog::BENCH,
              "  - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
              (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO,
              nTimeChainState * MILLI / nBlocksTotal);
 
     // Remove conflicting transactions from the mempool.;
     g_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
     disconnectpool.removeForBlock(blockConnecting.vtx);
 
     // If this block is activating a fork, we move all mempool transactions
     // in front of disconnectpool for reprocessing in a future
     // updateMempoolForReorg call
     if (pindexNew->pprev != nullptr &&
         GetNextBlockScriptFlags(consensusParams, pindexNew) !=
             GetNextBlockScriptFlags(consensusParams, pindexNew->pprev)) {
         LogPrint(BCLog::MEMPOOL,
                  "Disconnecting mempool due to acceptance of upgrade block\n");
         disconnectpool.importMempool(g_mempool);
     }
 
     // Update m_chain & related variables.
     m_chain.SetTip(pindexNew);
     UpdateTip(params, pindexNew);
 
     int64_t nTime6 = GetTimeMicros();
     nTimePostConnect += nTime6 - nTime5;
     nTimeTotal += nTime6 - nTime1;
     LogPrint(BCLog::BENCH,
              "  - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
              (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO,
              nTimePostConnect * MILLI / nBlocksTotal);
     LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
              (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO,
              nTimeTotal * MILLI / nBlocksTotal);
 
     connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
     return true;
 }
 
 /**
  * Return the tip of the chain with the most work in it, that isn't known to be
  * invalid (it's however far from certain to be valid).
  */
 CBlockIndex *CChainState::FindMostWorkChain() {
     AssertLockHeld(cs_main);
     do {
         CBlockIndex *pindexNew = nullptr;
 
         // Find the best candidate header.
         {
             std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
                 it = setBlockIndexCandidates.rbegin();
             if (it == setBlockIndexCandidates.rend()) {
                 return nullptr;
             }
             pindexNew = *it;
         }
 
         // If this block will cause a finalized block to be reorged, then we
         // mark it as invalid.
         if (m_finalizedBlockIndex &&
             !AreOnTheSameFork(pindexNew, m_finalizedBlockIndex)) {
             LogPrintf("Mark block %s invalid because it forks prior to the "
                       "finalization point %d.\n",
                       pindexNew->GetBlockHash().ToString(),
                       m_finalizedBlockIndex->nHeight);
             pindexNew->nStatus = pindexNew->nStatus.withFailed();
             InvalidChainFound(pindexNew);
         }
 
         const CBlockIndex *pindexFork = m_chain.FindFork(pindexNew);
 
         // Check whether all blocks on the path between the currently active
         // chain and the candidate are valid. Just going until the active chain
         // is an optimization, as we know all blocks in it are valid already.
         CBlockIndex *pindexTest = pindexNew;
         bool hasValidAncestor = true;
         while (hasValidAncestor && pindexTest && pindexTest != pindexFork) {
             assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
 
             // If this is a parked chain, but it has enough PoW, clear the park
             // state.
             bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
             if (fParkedChain && gArgs.GetBoolArg("-automaticunparking", true)) {
                 const CBlockIndex *pindexTip = m_chain.Tip();
 
                 // During initialization, pindexTip and/or pindexFork may be
                 // null. In this case, we just ignore the fact that the chain is
                 // parked.
                 if (!pindexTip || !pindexFork) {
                     UnparkBlock(pindexTest);
                     continue;
                 }
 
                 // A parked chain can be unparked if it has twice as much PoW
                 // accumulated as the main chain has since the fork block.
                 CBlockIndex const *pindexExtraPow = pindexTip;
                 arith_uint256 requiredWork = pindexTip->nChainWork;
                 switch (pindexTip->nHeight - pindexFork->nHeight) {
                     // Limit the penality for depth 1, 2 and 3 to half a block
                     // worth of work to ensure we don't fork accidentally.
                     case 3:
                     case 2:
                         pindexExtraPow = pindexExtraPow->pprev;
                     // FALLTHROUGH
                     case 1: {
                         const arith_uint256 deltaWork =
                             pindexExtraPow->nChainWork - pindexFork->nChainWork;
                         requiredWork += (deltaWork >> 1);
                         break;
                     }
                     default:
                         requiredWork +=
                             pindexExtraPow->nChainWork - pindexFork->nChainWork;
                         break;
                 }
 
                 if (pindexNew->nChainWork > requiredWork) {
                     // We have enough, clear the parked state.
                     LogPrintf("Unpark chain up to block %s as it has "
                               "accumulated enough PoW.\n",
                               pindexNew->GetBlockHash().ToString());
                     fParkedChain = false;
                     UnparkBlock(pindexTest);
                 }
             }
 
             // Pruned nodes may have entries in setBlockIndexCandidates for
             // which block files have been deleted. Remove those as candidates
             // for the most work chain if we come across them; we can't switch
             // to a chain unless we have all the non-active-chain parent blocks.
             bool fInvalidChain = pindexTest->nStatus.isInvalid();
             bool fMissingData = !pindexTest->nStatus.hasData();
             if (!(fInvalidChain || fParkedChain || fMissingData)) {
                 // The current block is acceptable, move to the parent, up to
                 // the fork point.
                 pindexTest = pindexTest->pprev;
                 continue;
             }
 
             // Candidate chain is not usable (either invalid or parked or
             // missing data)
             hasValidAncestor = false;
             setBlockIndexCandidates.erase(pindexTest);
 
             if (fInvalidChain &&
                 (pindexBestInvalid == nullptr ||
                  pindexNew->nChainWork > pindexBestInvalid->nChainWork)) {
                 pindexBestInvalid = pindexNew;
             }
 
             if (fParkedChain &&
                 (pindexBestParked == nullptr ||
                  pindexNew->nChainWork > pindexBestParked->nChainWork)) {
                 pindexBestParked = pindexNew;
             }
 
             LogPrintf("Considered switching to better tip %s but that chain "
                       "contains a%s%s%s block.\n",
                       pindexNew->GetBlockHash().ToString(),
                       fInvalidChain ? "n invalid" : "",
                       fParkedChain ? " parked" : "",
                       fMissingData ? " missing-data" : "");
 
             CBlockIndex *pindexFailed = pindexNew;
             // Remove the entire chain from the set.
             while (pindexTest != pindexFailed) {
                 if (fInvalidChain || fParkedChain) {
                     pindexFailed->nStatus =
                         pindexFailed->nStatus.withFailedParent(fInvalidChain)
                             .withParkedParent(fParkedChain);
                 } else if (fMissingData) {
                     // If we're missing data, then add back to
                     // m_blocks_unlinked, so that if the block arrives in the
                     // future we can try adding to setBlockIndexCandidates
                     // again.
                     m_blockman.m_blocks_unlinked.insert(
                         std::make_pair(pindexFailed->pprev, pindexFailed));
                 }
                 setBlockIndexCandidates.erase(pindexFailed);
                 pindexFailed = pindexFailed->pprev;
             }
 
             if (fInvalidChain || fParkedChain) {
                 // We discovered a new chain tip that is either parked or
                 // invalid, we may want to warn.
                 CheckForkWarningConditionsOnNewFork(pindexNew);
             }
         }
 
         if (g_avalanche &&
             gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) {
             g_avalanche->addBlockToReconcile(pindexNew);
         }
 
         // We found a candidate that has valid ancestors. This is our guy.
         if (hasValidAncestor) {
             return pindexNew;
         }
     } while (true);
 }
 
 /**
  * Delete all entries in setBlockIndexCandidates that are worse than the current
  * tip.
  */
 void CChainState::PruneBlockIndexCandidates() {
     // Note that we can't delete the current block itself, as we may need to
     // return to it later in case a reorganization to a better block fails.
     auto it = setBlockIndexCandidates.begin();
     while (it != setBlockIndexCandidates.end() &&
            setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
         setBlockIndexCandidates.erase(it++);
     }
 
     // Either the current tip or a successor of it we're working towards is left
     // in setBlockIndexCandidates.
     assert(!setBlockIndexCandidates.empty());
 }
 
 /**
  * Try to make some progress towards making pindexMostWork the active block.
  * pblock is either nullptr or a pointer to a CBlock corresponding to
  * pindexMostWork.
  */
 bool CChainState::ActivateBestChainStep(
     const Config &config, BlockValidationState &state,
     CBlockIndex *pindexMostWork, const std::shared_ptr<const CBlock> &pblock,
     bool &fInvalidFound, ConnectTrace &connectTrace) {
     AssertLockHeld(cs_main);
 
     const CBlockIndex *pindexOldTip = m_chain.Tip();
     const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
 
     // Disconnect active blocks which are no longer in the best chain.
     bool fBlocksDisconnected = false;
     DisconnectedBlockTransactions disconnectpool;
     while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
         if (!DisconnectTip(config.GetChainParams(), state, &disconnectpool)) {
             // This is likely a fatal error, but keep the mempool consistent,
             // just in case. Only remove from the mempool in this case.
             disconnectpool.updateMempoolForReorg(config, false, g_mempool);
 
             // If we're unable to disconnect a block during normal operation,
             // then that is a failure of our local system -- we should abort
             // rather than stay on a less work chain.
             AbortNode(state,
                       "Failed to disconnect block; see debug.log for details");
             return false;
         }
 
         fBlocksDisconnected = true;
     }
 
     // Build list of new blocks to connect.
     std::vector<CBlockIndex *> vpindexToConnect;
     bool fContinue = true;
     int nHeight = pindexFork ? pindexFork->nHeight : -1;
     while (fContinue && nHeight != pindexMostWork->nHeight) {
         // Don't iterate the entire list of potential improvements toward the
         // best tip, as we likely only need a few blocks along the way.
         int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
         vpindexToConnect.clear();
         vpindexToConnect.reserve(nTargetHeight - nHeight);
         CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
         while (pindexIter && pindexIter->nHeight != nHeight) {
             vpindexToConnect.push_back(pindexIter);
             pindexIter = pindexIter->pprev;
         }
 
         nHeight = nTargetHeight;
 
         // Connect new blocks.
         for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
             if (!ConnectTip(config, state, pindexConnect,
                             pindexConnect == pindexMostWork
                                 ? pblock
                                 : std::shared_ptr<const CBlock>(),
                             connectTrace, disconnectpool)) {
                 if (state.IsInvalid()) {
                     // The block violates a consensus rule.
                     if (state.GetResult() !=
                         BlockValidationResult::BLOCK_MUTATED) {
                         InvalidChainFound(vpindexToConnect.back());
                     }
                     state = BlockValidationState();
                     fInvalidFound = true;
                     fContinue = false;
                     break;
                 }
 
                 // A system error occurred (disk space, database error, ...).
                 // Make the mempool consistent with the current tip, just in
                 // case any observers try to use it before shutdown.
                 disconnectpool.updateMempoolForReorg(config, false, g_mempool);
                 return false;
             } else {
                 PruneBlockIndexCandidates();
                 if (!pindexOldTip ||
                     m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
                     // We're in a better position than we were. Return
                     // temporarily to release the lock.
                     fContinue = false;
                     break;
                 }
             }
         }
     }
 
     if (fBlocksDisconnected || !disconnectpool.isEmpty()) {
         // If any blocks were disconnected, we need to update the mempool even
         // if disconnectpool is empty. The disconnectpool may also be non-empty
         // if the mempool was imported due to new validation rules being in
         // effect.
         LogPrint(BCLog::MEMPOOL, "Updating mempool due to reorganization or "
                                  "rules upgrade/downgrade\n");
         disconnectpool.updateMempoolForReorg(config, true, g_mempool);
     }
 
     g_mempool.check(pcoinsTip.get());
 
     // Callbacks/notifications for a new best chain.
     if (fInvalidFound) {
         CheckForkWarningConditionsOnNewFork(pindexMostWork);
     } else {
         CheckForkWarningConditions();
     }
 
     return true;
 }
 
 static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
     bool fNotify = false;
     bool fInitialBlockDownload = false;
     static CBlockIndex *pindexHeaderOld = nullptr;
     CBlockIndex *pindexHeader = nullptr;
     {
         LOCK(cs_main);
         pindexHeader = pindexBestHeader;
 
         if (pindexHeader != pindexHeaderOld) {
             fNotify = true;
             fInitialBlockDownload =
                 ::ChainstateActive().IsInitialBlockDownload();
             pindexHeaderOld = pindexHeader;
         }
     }
 
     // Send block tip changed notifications without cs_main
     if (fNotify) {
         uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader);
     }
     return fNotify;
 }
 
 static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
     AssertLockNotHeld(cs_main);
 
     if (GetMainSignals().CallbacksPending() > 10) {
         SyncWithValidationInterfaceQueue();
     }
 }
 
 /**
  * Make the best chain active, in multiple steps. The result is either failure
  * or an activated best chain. pblock is either nullptr or a pointer to a block
  * that is already loaded (to avoid loading it again from disk).
  *
  * ActivateBestChain is split into steps (see ActivateBestChainStep) so that
  * we avoid holding cs_main for an extended period of time; the length of this
  * call may be quite long during reindexing or a substantial reorg.
  */
 bool CChainState::ActivateBestChain(const Config &config,
                                     BlockValidationState &state,
                                     std::shared_ptr<const CBlock> pblock) {
     // Note that while we're often called here from ProcessNewBlock, this is
     // far from a guarantee. Things in the P2P/RPC will often end up calling
     // us in the middle of ProcessNewBlock - do not assume pblock is set
     // sanely for performance or correctness!
     AssertLockNotHeld(cs_main);
 
     const CChainParams &params = config.GetChainParams();
 
     // ABC maintains a fair degree of expensive-to-calculate internal state
     // because this function periodically releases cs_main so that it does not
     // lock up other threads for too long during large connects - and to allow
     // for e.g. the callback queue to drain we use m_cs_chainstate to enforce
     // mutual exclusion so that only one caller may execute this function at a
     // time
     LOCK(m_cs_chainstate);
 
     CBlockIndex *pindexMostWork = nullptr;
     CBlockIndex *pindexNewTip = nullptr;
     int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
     do {
         boost::this_thread::interruption_point();
 
         // Block until the validation queue drains. This should largely
         // never happen in normal operation, however may happen during
         // reindex, causing memory blowup if we run too far ahead.
         // Note that if a validationinterface callback ends up calling
         // ActivateBestChain this may lead to a deadlock! We should
         // probably have a DEBUG_LOCKORDER test for this in the future.
         LimitValidationInterfaceQueue();
 
         {
             // Lock transaction pool for at least as long as it takes for
             // connectTrace to be consumed
             LOCK2(cs_main, ::g_mempool.cs);
             CBlockIndex *starting_tip = m_chain.Tip();
             bool blocks_connected = false;
             do {
                 // We absolutely may not unlock cs_main until we've made forward
                 // progress (with the exception of shutdown due to hardware
                 // issues, low disk space, etc).
 
                 // Destructed before cs_main is unlocked
                 ConnectTrace connectTrace(g_mempool);
 
                 if (pindexMostWork == nullptr) {
                     pindexMostWork = FindMostWorkChain();
                 }
 
                 // Whether we have anything to do at all.
                 if (pindexMostWork == nullptr ||
                     pindexMostWork == m_chain.Tip()) {
                     break;
                 }
 
                 bool fInvalidFound = false;
                 std::shared_ptr<const CBlock> nullBlockPtr;
                 if (!ActivateBestChainStep(
                         config, state, pindexMostWork,
                         pblock && pblock->GetHash() ==
                                       pindexMostWork->GetBlockHash()
                             ? pblock
                             : nullBlockPtr,
                         fInvalidFound, connectTrace)) {
                     return false;
                 }
                 blocks_connected = true;
 
                 if (fInvalidFound) {
                     // Wipe cache, we may need another branch now.
                     pindexMostWork = nullptr;
                 }
 
                 pindexNewTip = m_chain.Tip();
                 for (const PerBlockConnectTrace &trace :
                      connectTrace.GetBlocksConnected()) {
                     assert(trace.pblock && trace.pindex);
                     GetMainSignals().BlockConnected(trace.pblock, trace.pindex,
                                                     trace.conflictedTxs);
                 }
             } while (!m_chain.Tip() ||
                      (starting_tip && CBlockIndexWorkComparator()(
                                           m_chain.Tip(), starting_tip)));
 
             // Check the index once we're done with the above loop, since
             // we're going to release cs_main soon. If the index is in a bad
             // state now, then it's better to know immediately rather than
             // randomly have it cause a problem in a race.
             CheckBlockIndex(params.GetConsensus());
 
             if (!blocks_connected) {
                 return true;
             }
 
             const CBlockIndex *pindexFork = m_chain.FindFork(starting_tip);
             bool fInitialDownload = IsInitialBlockDownload();
 
             // Notify external listeners about the new tip.
             // Enqueue while holding cs_main to ensure that UpdatedBlockTip is
             // called in the order in which blocks are connected
             if (pindexFork != pindexNewTip) {
                 // Notify ValidationInterface subscribers
                 GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
                                                  fInitialDownload);
 
                 // Always notify the UI if a new block tip was connected
                 uiInterface.NotifyBlockTip(fInitialDownload, pindexNewTip);
             }
         }
         // When we reach this point, we switched to a new tip (stored in
         // pindexNewTip).
 
         if (nStopAtHeight && pindexNewTip &&
             pindexNewTip->nHeight >= nStopAtHeight) {
             StartShutdown();
         }
 
         // We check shutdown only after giving ActivateBestChainStep a chance to
         // run once so that we never shutdown before connecting the genesis
         // block during LoadChainTip(). Previously this caused an assert()
         // failure during shutdown in such cases as the UTXO DB flushing checks
         // that the best block hash is non-null.
         if (ShutdownRequested()) {
             break;
         }
     } while (pindexNewTip != pindexMostWork);
 
     // Write changes periodically to disk, after relay.
     if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
         return false;
     }
 
     return true;
 }
 
 bool ActivateBestChain(const Config &config, BlockValidationState &state,
                        std::shared_ptr<const CBlock> pblock) {
     return ::ChainstateActive().ActivateBestChain(config, state,
                                                   std::move(pblock));
 }
 
 bool CChainState::PreciousBlock(const Config &config,
                                 BlockValidationState &state,
                                 CBlockIndex *pindex) {
     {
         LOCK(cs_main);
         if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
             // Nothing to do, this block is not at the tip.
             return true;
         }
 
         if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
             // The chain has been extended since the last call, reset the
             // counter.
             nBlockReverseSequenceId = -1;
         }
 
         nLastPreciousChainwork = m_chain.Tip()->nChainWork;
         setBlockIndexCandidates.erase(pindex);
         pindex->nSequenceId = nBlockReverseSequenceId;
         if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
             // We can't keep reducing the counter if somebody really wants to
             // call preciousblock 2**31-1 times on the same set of tips...
             nBlockReverseSequenceId--;
         }
 
         // In case this was parked, unpark it.
         UnparkBlock(pindex);
 
         // Make sure it is added to the candidate list if appropriate.
         if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
             pindex->HaveTxsDownloaded()) {
             setBlockIndexCandidates.insert(pindex);
             PruneBlockIndexCandidates();
         }
     }
 
     return ActivateBestChain(config, state);
 }
 
 bool PreciousBlock(const Config &config, BlockValidationState &state,
                    CBlockIndex *pindex) {
     return ::ChainstateActive().PreciousBlock(config, state, pindex);
 }
 
 bool CChainState::UnwindBlock(const Config &config, BlockValidationState &state,
                               CBlockIndex *pindex, bool invalidate) {
     CBlockIndex *to_mark_failed_or_parked = pindex;
     bool pindex_was_in_chain = false;
     int disconnected = 0;
     const CChainParams &chainparams = config.GetChainParams();
 
     // We do not allow ActivateBestChain() to run while UnwindBlock() is
     // running, as that could cause the tip to change while we disconnect
     // blocks. (Note for backport of Core PR16849: we acquire
     // LOCK(m_cs_chainstate) in the Park, Invalidate and FinalizeBlock functions
     // due to differences in our code)
     AssertLockHeld(m_cs_chainstate);
 
     // We'll be acquiring and releasing cs_main below, to allow the validation
     // callbacks to run. However, we should keep the block index in a
     // consistent state as we disconnect blocks -- in particular we need to
     // add equal-work blocks to setBlockIndexCandidates as we disconnect.
     // To avoid walking the block index repeatedly in search of candidates,
     // build a map once so that we can look up candidate blocks by chain
     // work as we go.
     std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
 
     {
         LOCK(cs_main);
         for (const auto &entry : m_blockman.m_block_index) {
             CBlockIndex *candidate = entry.second;
             // We don't need to put anything in our active chain into the
             // multimap, because those candidates will be found and considered
             // as we disconnect.
             // Instead, consider only non-active-chain blocks that have at
             // least as much work as where we expect the new tip to end up.
             if (!m_chain.Contains(candidate) &&
                 !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
                 candidate->IsValid(BlockValidity::TRANSACTIONS) &&
                 candidate->HaveTxsDownloaded()) {
                 candidate_blocks_by_work.insert(
                     std::make_pair(candidate->nChainWork, candidate));
             }
         }
     }
 
     // Disconnect (descendants of) pindex, and mark them invalid.
     while (true) {
         if (ShutdownRequested()) {
             break;
         }
 
         // Make sure the queue of validation callbacks doesn't grow unboundedly.
         LimitValidationInterfaceQueue();
 
         LOCK(cs_main);
         // Lock for as long as disconnectpool is in scope to make sure
         // UpdateMempoolForReorg is called after DisconnectTip without unlocking
         // in between
         LOCK(::g_mempool.cs);
 
         if (!m_chain.Contains(pindex)) {
             break;
         }
 
         pindex_was_in_chain = true;
         CBlockIndex *invalid_walk_tip = m_chain.Tip();
 
         // ActivateBestChain considers blocks already in m_chain
         // unconditionally valid already, so force disconnect away from it.
 
         DisconnectedBlockTransactions disconnectpool;
 
         bool ret = DisconnectTip(chainparams, state, &disconnectpool);
 
         // DisconnectTip will add transactions to disconnectpool.
         // Adjust the mempool to be consistent with the new tip, adding
         // transactions back to the mempool if disconnecting was successful,
         // and we're not doing a very deep invalidation (in which case
         // keeping the mempool up to date is probably futile anyway).
         disconnectpool.updateMempoolForReorg(
             config, /* fAddToMempool = */ (++disconnected <= 10) && ret,
             ::g_mempool);
 
         if (!ret) {
             return false;
         }
 
         assert(invalid_walk_tip->pprev == m_chain.Tip());
 
         // We immediately mark the disconnected blocks as invalid.
         // This prevents a case where pruned nodes may fail to invalidateblock
         // and be left unable to start as they have no tip candidates (as there
         // are no blocks that meet the "have data and are not invalid per
         // nStatus" criteria for inclusion in setBlockIndexCandidates).
 
         invalid_walk_tip->nStatus =
             invalidate ? invalid_walk_tip->nStatus.withFailed()
                        : invalid_walk_tip->nStatus.withParked();
 
         setDirtyBlockIndex.insert(invalid_walk_tip);
         setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
 
         if (invalid_walk_tip == to_mark_failed_or_parked->pprev &&
             (invalidate ? to_mark_failed_or_parked->nStatus.hasFailed()
                         : to_mark_failed_or_parked->nStatus.isParked())) {
             // We only want to mark the last disconnected block as
             // Failed (or Parked); its children need to be FailedParent (or
             // ParkedParent) instead.
             to_mark_failed_or_parked->nStatus =
                 (invalidate
                      ? to_mark_failed_or_parked->nStatus.withFailed(false)
                            .withFailedParent()
                      : to_mark_failed_or_parked->nStatus.withParked(false)
                            .withParkedParent());
 
             setDirtyBlockIndex.insert(to_mark_failed_or_parked);
         }
 
         // Add any equal or more work headers to setBlockIndexCandidates
         auto candidate_it = candidate_blocks_by_work.lower_bound(
             invalid_walk_tip->pprev->nChainWork);
         while (candidate_it != candidate_blocks_by_work.end()) {
             if (!CBlockIndexWorkComparator()(candidate_it->second,
                                              invalid_walk_tip->pprev)) {
                 setBlockIndexCandidates.insert(candidate_it->second);
                 candidate_it = candidate_blocks_by_work.erase(candidate_it);
             } else {
                 ++candidate_it;
             }
         }
 
         // Track the last disconnected block, so we can correct its
         // FailedParent (or ParkedParent) status in future iterations, or, if
         // it's the last one, call InvalidChainFound on it.
         to_mark_failed_or_parked = invalid_walk_tip;
     }
 
     CheckBlockIndex(chainparams.GetConsensus());
 
     {
         LOCK(cs_main);
         if (m_chain.Contains(to_mark_failed_or_parked)) {
             // If the to-be-marked invalid block is in the active chain,
             // something is interfering and we can't proceed.
             return false;
         }
 
         // Mark pindex (or the last disconnected block) as invalid (or parked),
         // even when it never was in the main chain.
         to_mark_failed_or_parked->nStatus =
             invalidate ? to_mark_failed_or_parked->nStatus.withFailed()
                        : to_mark_failed_or_parked->nStatus.withParked();
         setDirtyBlockIndex.insert(to_mark_failed_or_parked);
         if (invalidate) {
             m_blockman.m_failed_blocks.insert(to_mark_failed_or_parked);
         }
 
         // If any new blocks somehow arrived while we were disconnecting
         // (above), then the pre-calculation of what should go into
         // setBlockIndexCandidates may have missed entries. This would
         // technically be an inconsistency in the block index, but if we clean
         // it up here, this should be an essentially unobservable error.
         // Loop back over all block index entries and add any missing entries
         // to setBlockIndexCandidates.
         for (const std::pair<const BlockHash, CBlockIndex *> &it :
              m_blockman.m_block_index) {
             CBlockIndex *i = it.second;
             if (i->IsValid(BlockValidity::TRANSACTIONS) &&
                 i->HaveTxsDownloaded() &&
                 !setBlockIndexCandidates.value_comp()(i, m_chain.Tip())) {
                 setBlockIndexCandidates.insert(i);
             }
         }
 
         if (invalidate) {
             InvalidChainFound(to_mark_failed_or_parked);
         }
     }
 
     // Only notify about a new block tip if the active chain was modified.
     if (pindex_was_in_chain) {
         uiInterface.NotifyBlockTip(IsInitialBlockDownload(),
                                    to_mark_failed_or_parked->pprev);
     }
     return true;
 }
 
 bool CChainState::InvalidateBlock(const Config &config,
                                   BlockValidationState &state,
                                   CBlockIndex *pindex) {
     AssertLockNotHeld(m_cs_chainstate);
     // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
     LOCK(m_cs_chainstate);
 
     return UnwindBlock(config, state, pindex, true);
 }
 
 bool CChainState::ParkBlock(const Config &config, BlockValidationState &state,
                             CBlockIndex *pindex) {
     AssertLockNotHeld(m_cs_chainstate);
     // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
     LOCK(m_cs_chainstate);
 
     return UnwindBlock(config, state, pindex, false);
 }
 
 bool CChainState::FinalizeBlock(const Config &config,
                                 BlockValidationState &state,
                                 CBlockIndex *pindex) {
     AssertLockNotHeld(m_cs_chainstate);
     // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
     LOCK(m_cs_chainstate);
 
     AssertLockNotHeld(cs_main);
     CBlockIndex *pindexToInvalidate = nullptr;
     {
         LOCK(cs_main);
         if (!MarkBlockAsFinal(config, state, pindex)) {
             // state is set by MarkBlockAsFinal.
             return false;
         }
 
         // We have a valid candidate, make sure it is not parked.
         if (pindex->nStatus.isOnParkedChain()) {
             UnparkBlock(pindex);
         }
 
         // If the finalized block is on the active chain, there is no need to
         // rewind.
         if (::ChainActive().Contains(pindex)) {
             return true;
         }
 
         // If the finalized block is not on the active chain, that chain is
         // invalid
         // ...
         const CBlockIndex *pindexFork = ::ChainActive().FindFork(pindex);
         pindexToInvalidate = ::ChainActive().Next(pindexFork);
         if (!pindexToInvalidate) {
             return false;
         }
     } // end of locked cs_main scope
 
     // ... therefore, we invalidate the block on the active chain that comes
     // immediately after it
     return UnwindBlock(config, state, pindexToInvalidate,
                        true /* invalidating */);
 }
 
 template <typename F>
 bool CChainState::UpdateFlagsForBlock(CBlockIndex *pindexBase,
                                       CBlockIndex *pindex, F f) {
     BlockStatus newStatus = f(pindex->nStatus);
     if (pindex->nStatus != newStatus &&
         (!pindexBase ||
          pindex->GetAncestor(pindexBase->nHeight) == pindexBase)) {
         pindex->nStatus = newStatus;
         setDirtyBlockIndex.insert(pindex);
         if (newStatus.isValid()) {
             m_blockman.m_failed_blocks.erase(pindex);
         }
 
         if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
             pindex->HaveTxsDownloaded() &&
             setBlockIndexCandidates.value_comp()(::ChainActive().Tip(),
                                                  pindex)) {
             setBlockIndexCandidates.insert(pindex);
         }
         return true;
     }
     return false;
 }
 
 template <typename F, typename C, typename AC>
 void CChainState::UpdateFlags(CBlockIndex *pindex, CBlockIndex *&pindexReset,
                               F f, C fChild, AC fAncestorWasChanged) {
     AssertLockHeld(cs_main);
 
     // Update the current block and ancestors; while we're doing this, identify
     // which was the deepest ancestor we changed.
     CBlockIndex *pindexDeepestChanged = pindex;
     for (auto pindexAncestor = pindex; pindexAncestor != nullptr;
          pindexAncestor = pindexAncestor->pprev) {
         if (UpdateFlagsForBlock(nullptr, pindexAncestor, f)) {
             pindexDeepestChanged = pindexAncestor;
         }
     }
 
     if (pindexReset &&
         pindexReset->GetAncestor(pindexDeepestChanged->nHeight) ==
             pindexDeepestChanged) {
         // reset pindexReset if it had a modified ancestor.
         pindexReset = nullptr;
     }
 
     // Update all blocks under modified blocks.
     BlockMap::iterator it = m_blockman.m_block_index.begin();
     while (it != m_blockman.m_block_index.end()) {
         UpdateFlagsForBlock(pindex, it->second, fChild);
         UpdateFlagsForBlock(pindexDeepestChanged, it->second,
                             fAncestorWasChanged);
         it++;
     }
 }
 
 void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
     AssertLockHeld(cs_main);
 
     // In case we are reconsidering something before the finalization point,
     // move the finalization point to the last common ancestor.
     if (m_finalizedBlockIndex) {
         m_finalizedBlockIndex =
             LastCommonAncestor(pindex, m_finalizedBlockIndex);
     }
 
     UpdateFlags(
         pindex, pindexBestInvalid,
         [](const BlockStatus status) {
             return status.withClearedFailureFlags();
         },
         [](const BlockStatus status) {
             return status.withClearedFailureFlags();
         },
         [](const BlockStatus status) {
             return status.withFailedParent(false);
         });
 }
 
 void ResetBlockFailureFlags(CBlockIndex *pindex) {
     return ::ChainstateActive().ResetBlockFailureFlags(pindex);
 }
 
 void CChainState::UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren) {
     AssertLockHeld(cs_main);
 
     UpdateFlags(
         pindex, pindexBestParked,
         [](const BlockStatus status) {
             return status.withClearedParkedFlags();
         },
         [fClearChildren](const BlockStatus status) {
             return fClearChildren ? status.withClearedParkedFlags()
                                   : status.withParkedParent(false);
         },
         [](const BlockStatus status) {
             return status.withParkedParent(false);
         });
 }
 
 void UnparkBlockAndChildren(CBlockIndex *pindex) {
     return ::ChainstateActive().UnparkBlockImpl(pindex, true);
 }
 
 void UnparkBlock(CBlockIndex *pindex) {
     return ::ChainstateActive().UnparkBlockImpl(pindex, false);
 }
 
 bool CChainState::IsBlockFinalized(const CBlockIndex *pindex) const {
     AssertLockHeld(cs_main);
     return m_finalizedBlockIndex &&
            m_finalizedBlockIndex->GetAncestor(pindex->nHeight) == pindex;
 }
 
 /** Return the currently finalized block index. */
 const CBlockIndex *CChainState::GetFinalizedBlock() const {
     AssertLockHeld(cs_main);
     return m_finalizedBlockIndex;
 }
 
 CBlockIndex *BlockManager::AddToBlockIndex(const CBlockHeader &block) {
     AssertLockHeld(cs_main);
 
     // Check for duplicate
     BlockHash hash = block.GetHash();
     BlockMap::iterator it = m_block_index.find(hash);
     if (it != m_block_index.end()) {
         return it->second;
     }
 
     // Construct new block index object
     CBlockIndex *pindexNew = new CBlockIndex(block);
     // We assign the sequence id to blocks only when the full data is available,
     // to avoid miners withholding blocks but broadcasting headers, to get a
     // competitive advantage.
     pindexNew->nSequenceId = 0;
     BlockMap::iterator mi =
         m_block_index.insert(std::make_pair(hash, pindexNew)).first;
     pindexNew->phashBlock = &((*mi).first);
     BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
     if (miPrev != m_block_index.end()) {
         pindexNew->pprev = (*miPrev).second;
         pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
         pindexNew->BuildSkip();
     }
     pindexNew->nTimeReceived = GetTime();
     pindexNew->nTimeMax =
         (pindexNew->pprev
              ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime)
              : pindexNew->nTime);
     pindexNew->nChainWork =
         (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) +
         GetBlockProof(*pindexNew);
     pindexNew->RaiseValidity(BlockValidity::TREE);
     if (pindexBestHeader == nullptr ||
         pindexBestHeader->nChainWork < pindexNew->nChainWork) {
         pindexBestHeader = pindexNew;
     }
 
     setDirtyBlockIndex.insert(pindexNew);
     return pindexNew;
 }
 
 /**
  * Mark a block as having its data received and checked (up to
  * BLOCK_VALID_TRANSACTIONS).
  */
 void CChainState::ReceivedBlockTransactions(const CBlock &block,
                                             CBlockIndex *pindexNew,
                                             const FlatFilePos &pos) {
     pindexNew->nTx = block.vtx.size();
     pindexNew->nFile = pos.nFile;
     pindexNew->nDataPos = pos.nPos;
     pindexNew->nUndoPos = 0;
     pindexNew->nStatus = pindexNew->nStatus.withData();
     pindexNew->RaiseValidity(BlockValidity::TRANSACTIONS);
     setDirtyBlockIndex.insert(pindexNew);
 
     if (pindexNew->UpdateChainStats()) {
         // If pindexNew is the genesis block or all parents are
         // BLOCK_VALID_TRANSACTIONS.
         std::deque<CBlockIndex *> queue;
         queue.push_back(pindexNew);
 
         // Recursively process any descendant blocks that now may be eligible to
         // be connected.
         while (!queue.empty()) {
             CBlockIndex *pindex = queue.front();
             queue.pop_front();
             pindex->UpdateChainStats();
             if (pindex->nSequenceId == 0) {
                 // We assign a sequence is when transaction are received to
                 // prevent a miner from being able to broadcast a block but not
                 // its content. However, a sequence id may have been set
                 // manually, for instance via PreciousBlock, in which case, we
                 // don't need to assign one.
                 pindex->nSequenceId = nBlockSequenceId++;
             }
 
             if (m_chain.Tip() == nullptr ||
                 !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
                 setBlockIndexCandidates.insert(pindex);
             }
 
             std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
                       std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
                 range = m_blockman.m_blocks_unlinked.equal_range(pindex);
             while (range.first != range.second) {
                 std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
                     range.first;
                 queue.push_back(it->second);
                 range.first++;
                 m_blockman.m_blocks_unlinked.erase(it);
             }
         }
     } else if (pindexNew->pprev &&
                pindexNew->pprev->IsValid(BlockValidity::TREE)) {
         m_blockman.m_blocks_unlinked.insert(
             std::make_pair(pindexNew->pprev, pindexNew));
     }
 }
 
 static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize,
                          unsigned int nHeight, uint64_t nTime,
                          bool fKnown = false) {
     LOCK(cs_LastBlockFile);
 
     unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
     if (vinfoBlockFile.size() <= nFile) {
         vinfoBlockFile.resize(nFile + 1);
     }
 
     if (!fKnown) {
         while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
             nFile++;
             if (vinfoBlockFile.size() <= nFile) {
                 vinfoBlockFile.resize(nFile + 1);
             }
         }
         pos.nFile = nFile;
         pos.nPos = vinfoBlockFile[nFile].nSize;
     }
 
     if ((int)nFile != nLastBlockFile) {
         if (!fKnown) {
             LogPrintf("Leaving block file %i: %s\n", nLastBlockFile,
                       vinfoBlockFile[nLastBlockFile].ToString());
         }
         FlushBlockFile(!fKnown);
         nLastBlockFile = nFile;
     }
 
     vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
     if (fKnown) {
         vinfoBlockFile[nFile].nSize =
             std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
     } else {
         vinfoBlockFile[nFile].nSize += nAddSize;
     }
 
     if (!fKnown) {
         bool out_of_space;
         size_t bytes_allocated =
             BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
         if (out_of_space) {
             return AbortNode("Disk space is too low!",
                              _("Error: Disk space is too low!").translated,
                              CClientUIInterface::MSG_NOPREFIX);
         }
         if (bytes_allocated != 0 && fPruneMode) {
             fCheckForPruning = true;
         }
     }
 
     setDirtyFileInfo.insert(nFile);
     return true;
 }
 
 static bool FindUndoPos(BlockValidationState &state, int nFile,
                         FlatFilePos &pos, unsigned int nAddSize) {
     pos.nFile = nFile;
 
     LOCK(cs_LastBlockFile);
 
     pos.nPos = vinfoBlockFile[nFile].nUndoSize;
     vinfoBlockFile[nFile].nUndoSize += nAddSize;
     setDirtyFileInfo.insert(nFile);
 
     bool out_of_space;
     size_t bytes_allocated =
         UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
     if (out_of_space) {
         return AbortNode(state, "Disk space is too low!",
                          _("Error: Disk space is too low!").translated,
                          CClientUIInterface::MSG_NOPREFIX);
     }
     if (bytes_allocated != 0 && fPruneMode) {
         fCheckForPruning = true;
     }
 
     return true;
 }
 
 /**
  * Return true if the provided block header is valid.
  * Only verify PoW if blockValidationOptions is configured to do so.
  * This allows validation of headers on which the PoW hasn't been done.
  * For example: to validate template handed to mining software.
  * Do not call this for any check that depends on the context.
  * For context-dependent calls, see ContextualCheckBlockHeader.
  */
 static bool CheckBlockHeader(const CBlockHeader &block,
                              BlockValidationState &state,
                              const Consensus::Params &params,
                              BlockValidationOptions validationOptions) {
     // Check proof of work matches claimed amount
     if (validationOptions.shouldValidatePoW() &&
         !CheckProofOfWork(block.GetHash(), block.nBits, params)) {
         return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
                              REJECT_INVALID, "high-hash",
                              "proof of work failed");
     }
 
     return true;
 }
 
 bool CheckBlock(const CBlock &block, BlockValidationState &state,
                 const Consensus::Params &params,
                 BlockValidationOptions validationOptions) {
     // These are checks that are independent of context.
     if (block.fChecked) {
         return true;
     }
 
     // Check that the header is valid (particularly PoW).  This is mostly
     // redundant with the call in AcceptBlockHeader.
     if (!CheckBlockHeader(block, state, params, validationOptions)) {
         return false;
     }
 
     // Check the merkle root.
     if (validationOptions.shouldValidateMerkleRoot()) {
         bool mutated;
         uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
         if (block.hashMerkleRoot != hashMerkleRoot2) {
             return state.Invalid(BlockValidationResult::BLOCK_MUTATED,
                                  REJECT_INVALID, "bad-txnmrklroot",
                                  "hashMerkleRoot mismatch");
         }
 
         // Check for merkle tree malleability (CVE-2012-2459): repeating
         // sequences of transactions in a block without affecting the merkle
         // root of a block, while still invalidating it.
         if (mutated) {
             return state.Invalid(BlockValidationResult::BLOCK_MUTATED,
                                  REJECT_INVALID, "bad-txns-duplicate",
                                  "duplicate transaction");
         }
     }
 
     // All potential-corruption validation must be done before we do any
     // transaction validation, as otherwise we may mark the header as invalid
     // because we receive the wrong transactions for it.
 
     // First transaction must be coinbase.
     if (block.vtx.empty()) {
         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                              REJECT_INVALID, "bad-cb-missing",
                              "first tx is not coinbase");
     }
 
     // Size limits.
     auto nMaxBlockSize = validationOptions.getExcessiveBlockSize();
 
     // Bail early if there is no way this block is of reasonable size.
     if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                              REJECT_INVALID, "bad-blk-length",
                              "size limits failed");
     }
 
     auto currentBlockSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
     if (currentBlockSize > nMaxBlockSize) {
         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                              REJECT_INVALID, "bad-blk-length",
                              "size limits failed");
     }
 
     // And a valid coinbase.
     TxValidationState tx_state;
     if (!CheckCoinbase(*block.vtx[0], tx_state)) {
         return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                              tx_state.GetRejectCode(),
                              tx_state.GetRejectReason(),
                              strprintf("Coinbase check failed (txid %s) %s",
                                        block.vtx[0]->GetId().ToString(),
                                        tx_state.GetDebugMessage()));
     }
 
     // Check transactions for regularity, skipping the first. Note that this
     // is the first time we check that all after the first are !IsCoinBase.
     for (size_t i = 1; i < block.vtx.size(); i++) {
         auto *tx = block.vtx[i].get();
         if (!CheckRegularTransaction(*tx, tx_state)) {
             return state.Invalid(
                 BlockValidationResult::BLOCK_CONSENSUS,
                 tx_state.GetRejectCode(), tx_state.GetRejectReason(),
                 strprintf("Transaction check failed (txid %s) %s",
                           tx->GetId().ToString(), tx_state.GetDebugMessage()));
         }
     }
 
     if (validationOptions.shouldValidatePoW() &&
         validationOptions.shouldValidateMerkleRoot()) {
         block.fChecked = true;
     }
 
     return true;
 }
 
 /**
  * Context-dependent validity checks.
  * By "context", we mean only the previous block headers, but not the UTXO
  * set; UTXO-related validity checks are done in ConnectBlock().
  * NOTE: This function is not currently invoked by ConnectBlock(), so we
  * should consider upgrade issues if we change which consensus rules are
  * enforced in this function (eg by adding a new consensus rule). See comment
  * in ConnectBlock().
  * Note that -reindex-chainstate skips the validation that happens here!
  */
 static bool ContextualCheckBlockHeader(const CChainParams &params,
                                        const CBlockHeader &block,
                                        BlockValidationState &state,
                                        const CBlockIndex *pindexPrev,
                                        int64_t nAdjustedTime)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     assert(pindexPrev != nullptr);
     const int nHeight = pindexPrev->nHeight + 1;
 
     // Check proof of work
     if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params)) {
         LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
         return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
                              REJECT_INVALID, "bad-diffbits",
                              "incorrect proof of work");
     }
 
     // Check against checkpoints
     if (fCheckpointsEnabled) {
         const CCheckpointData &checkpoints = params.Checkpoints();
 
         // Check that the block chain matches the known block chain up to a
         // checkpoint.
         if (!Checkpoints::CheckBlock(checkpoints, nHeight, block.GetHash())) {
             LogPrintf("ERROR: %s: rejected by checkpoint lock-in at %d\n",
                       __func__, nHeight);
             return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT,
                                  REJECT_CHECKPOINT, "checkpoint mismatch");
         }
 
         // Don't accept any forks from the main chain prior to last checkpoint.
         // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's
         // in our g_blockman.m_block_index.
         CBlockIndex *pcheckpoint = Checkpoints::GetLastCheckpoint(checkpoints);
         if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
             LogPrintf("ERROR: %s: forked chain older than last checkpoint "
                       "(height %d)\n",
                       __func__, nHeight);
             return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT,
                                  REJECT_CHECKPOINT,
                                  "bad-fork-prior-to-checkpoint");
         }
     }
 
     // Check timestamp against prev
     if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
         return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER,
                              REJECT_INVALID, "time-too-old",
                              "block's timestamp is too early");
     }
 
     // Check timestamp
     if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) {
         return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE,
                              REJECT_INVALID, "time-too-new",
                              "block timestamp too far in the future");
     }
 
     // Reject outdated version blocks when 95% (75% on testnet) of the network
     // has upgraded:
     // check for version 2, 3 and 4 upgrades
     const Consensus::Params &consensusParams = params.GetConsensus();
     if ((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
         (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
         (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) {
         return state.Invalid(
             BlockValidationResult::BLOCK_INVALID_HEADER, REJECT_OBSOLETE,
             strprintf("bad-version(0x%08x)", block.nVersion),
             strprintf("rejected nVersion=0x%08x block", block.nVersion));
     }
 
     return true;
 }
 
 bool ContextualCheckTransactionForCurrentBlock(const Consensus::Params &params,
                                                const CTransaction &tx,
                                                TxValidationState &state,
                                                int flags) {
     AssertLockHeld(cs_main);
 
     // By convention a negative value for flags indicates that the current
     // network-enforced consensus rules should be used. In a future soft-fork
     // scenario that would mean checking which rules would be enforced for the
     // next block and setting the appropriate flags. At the present time no
     // soft-forks are scheduled, so no flags are set.
     flags = std::max(flags, 0);
 
     // ContextualCheckTransactionForCurrentBlock() uses
     // ::ChainActive().Height()+1 to evaluate nLockTime because when IsFinalTx()
     // is called within CBlock::AcceptBlock(), the height of the block *being*
     // evaluated is what is used. Thus if we want to know if a transaction can
     // be part of the *next* block, we need to call ContextualCheckTransaction()
     // with one more than ::ChainActive().Height().
     const int nBlockHeight = ::ChainActive().Height() + 1;
 
     // BIP113 will require that time-locked transactions have nLockTime set to
     // less than the median time of the previous block they're contained in.
     // When the next block is created its previous block will be the current
     // chain tip, so we use that to calculate the median time passed to
     // ContextualCheckTransaction() if LOCKTIME_MEDIAN_TIME_PAST is set.
     const int64_t nMedianTimePast =
         ::ChainActive().Tip() == nullptr
             ? 0
             : ::ChainActive().Tip()->GetMedianTimePast();
     const int64_t nLockTimeCutoff = (flags & LOCKTIME_MEDIAN_TIME_PAST)
                                         ? nMedianTimePast
                                         : GetAdjustedTime();
 
     return ContextualCheckTransaction(params, tx, state, nBlockHeight,
                                       nLockTimeCutoff, nMedianTimePast);
 }
 
 /**
  * NOTE: This function is not currently invoked by ConnectBlock(), so we
  * should consider upgrade issues if we change which consensus rules are
  * enforced in this function (eg by adding a new consensus rule). See comment
  * in ConnectBlock().
  * Note that -reindex-chainstate skips the validation that happens here!
  */
 static bool ContextualCheckBlock(const CBlock &block,
                                  BlockValidationState &state,
                                  const Consensus::Params &params,
                                  const CBlockIndex *pindexPrev) {
     const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
 
     // Start enforcing BIP113 (Median Time Past).
     int nLockTimeFlags = 0;
     if (nHeight >= params.CSVHeight) {
         assert(pindexPrev != nullptr);
         nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
     }
 
     const int64_t nMedianTimePast =
         pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
 
     const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
                                         ? nMedianTimePast
                                         : block.GetBlockTime();
 
     const bool fIsMagneticAnomalyEnabled =
         IsMagneticAnomalyEnabled(params, pindexPrev);
 
     // Check transactions:
     // - canonical ordering
     // - ensure they are finalized
     // - perform a preliminary block-sigops count (they will be recounted more
     // strictly during ConnectBlock).
     // - perform a transaction-sigops check (again, a more strict check will
     // happen in ConnectBlock).
     const CTransaction *prevTx = nullptr;
     for (const auto &ptx : block.vtx) {
         const CTransaction &tx = *ptx;
         if (fIsMagneticAnomalyEnabled) {
             if (prevTx && (tx.GetId() <= prevTx->GetId())) {
                 if (tx.GetId() == prevTx->GetId()) {
                     return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                                          REJECT_INVALID, "tx-duplicate",
                                          strprintf("Duplicated transaction %s",
                                                    tx.GetId().ToString()));
                 }
 
                 return state.Invalid(
                     BlockValidationResult::BLOCK_CONSENSUS, REJECT_INVALID,
                     "tx-ordering",
                     strprintf("Transaction order is invalid (%s < %s)",
                               tx.GetId().ToString(),
                               prevTx->GetId().ToString()));
             }
 
             if (prevTx || !tx.IsCoinBase()) {
                 prevTx = &tx;
             }
         }
 
         TxValidationState tx_state;
         if (!ContextualCheckTransaction(params, tx, tx_state, nHeight,
                                         nLockTimeCutoff, nMedianTimePast)) {
             return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                                  REJECT_INVALID, tx_state.GetRejectReason(),
                                  tx_state.GetDebugMessage());
         }
     }
 
     // Enforce rule that the coinbase starts with serialized block height
     if (nHeight >= params.BIP34Height) {
         CScript expect = CScript() << nHeight;
         if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
             !std::equal(expect.begin(), expect.end(),
                         block.vtx[0]->vin[0].scriptSig.begin())) {
             return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
                                  REJECT_INVALID, "bad-cb-height",
                                  "block height mismatch in coinbase");
         }
     }
 
     return true;
 }
 
 /**
  * If the provided block header is valid, add it to the block index.
  *
  * Returns true if the block is successfully added to the block index.
  */
 bool BlockManager::AcceptBlockHeader(const Config &config,
                                      const CBlockHeader &block,
                                      BlockValidationState &state,
                                      CBlockIndex **ppindex) {
     AssertLockHeld(cs_main);
     const CChainParams &chainparams = config.GetChainParams();
 
     // Check for duplicate
     BlockHash hash = block.GetHash();
     BlockMap::iterator miSelf = m_block_index.find(hash);
     CBlockIndex *pindex = nullptr;
     if (hash != chainparams.GetConsensus().hashGenesisBlock) {
         if (miSelf != m_block_index.end()) {
             // Block header is already known.
             pindex = miSelf->second;
             if (ppindex) {
                 *ppindex = pindex;
             }
 
             if (pindex->nStatus.isInvalid()) {
                 LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__,
                           hash.ToString());
                 return state.Invalid(
                     BlockValidationResult::BLOCK_CACHED_INVALID, 0,
                     "duplicate");
             }
 
             return true;
         }
 
         if (!CheckBlockHeader(block, state, chainparams.GetConsensus(),
                               BlockValidationOptions(config))) {
             return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__,
                          hash.ToString(), FormatStateMessage(state));
         }
 
         // Get prev block index
         BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
         if (mi == m_block_index.end()) {
             LogPrintf("ERROR: %s: prev block not found\n", __func__);
             return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, 0,
                                  "prev-blk-not-found");
         }
 
         CBlockIndex *pindexPrev = (*mi).second;
         assert(pindexPrev);
         if (pindexPrev->nStatus.isInvalid()) {
             LogPrintf("ERROR: %s: prev block invalid\n", __func__);
             return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV,
                                  REJECT_INVALID, "bad-prevblk");
         }
 
         if (!ContextualCheckBlockHeader(chainparams, block, state, pindexPrev,
                                         GetAdjustedTime())) {
             return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s",
                          __func__, hash.ToString(), FormatStateMessage(state));
         }
 
         /* Determine if this block descends from any block which has been found
          * invalid (m_failed_blocks), then mark pindexPrev and any blocks
          * between them as failed. For example:
          *
          *                D3
          *              /
          *      B2 - C2
          *    /         \
          *  A             D2 - E2 - F2
          *    \
          *      B1 - C1 - D1 - E1
          *
          * In the case that we attempted to reorg from E1 to F2, only to find
          * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
          * but NOT D3 (it was not in any of our candidate sets at the time).
          *
          * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
          * in LoadBlockIndex.
          */
         if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
             // The above does not mean "invalid": it checks if the previous
             // block hasn't been validated up to BlockValidity::SCRIPTS. This is
             // a performance optimization, in the common case of adding a new
             // block to the tip, we don't need to iterate over the failed blocks
             // list.
             for (const CBlockIndex *failedit : m_failed_blocks) {
                 if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
                     assert(failedit->nStatus.hasFailed());
                     CBlockIndex *invalid_walk = pindexPrev;
                     while (invalid_walk != failedit) {
                         invalid_walk->nStatus =
                             invalid_walk->nStatus.withFailedParent();
                         setDirtyBlockIndex.insert(invalid_walk);
                         invalid_walk = invalid_walk->pprev;
                     }
                     LogPrintf("ERROR: %s: prev block invalid\n", __func__);
                     return state.Invalid(
                         BlockValidationResult::BLOCK_INVALID_PREV,
                         REJECT_INVALID, "bad-prevblk");
                 }
             }
         }
     }
 
     if (pindex == nullptr) {
         pindex = AddToBlockIndex(block);
     }
 
     if (ppindex) {
         *ppindex = pindex;
     }
 
     return true;
 }
 
 // Exposed wrapper for AcceptBlockHeader
 bool ProcessNewBlockHeaders(const Config &config,
                             const std::vector<CBlockHeader> &headers,
                             BlockValidationState &state,
                             const CBlockIndex **ppindex) {
     {
         LOCK(cs_main);
         for (const CBlockHeader &header : headers) {
             // Use a temp pindex instead of ppindex to avoid a const_cast
             CBlockIndex *pindex = nullptr;
             bool accepted =
                 g_blockman.AcceptBlockHeader(config, header, state, &pindex);
             ::ChainstateActive().CheckBlockIndex(
                 config.GetChainParams().GetConsensus());
 
             if (!accepted) {
                 return false;
             }
 
             if (ppindex) {
                 *ppindex = pindex;
             }
         }
     }
 
     if (NotifyHeaderTip()) {
         if (::ChainstateActive().IsInitialBlockDownload() && ppindex &&
             *ppindex) {
             LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n",
                       (*ppindex)->nHeight,
                       100.0 /
                           ((*ppindex)->nHeight +
                            (GetAdjustedTime() - (*ppindex)->GetBlockTime()) /
                                Params().GetConsensus().nPowTargetSpacing) *
                           (*ppindex)->nHeight);
         }
     }
     return true;
 }
 
 /**
  * Store block on disk. If dbp is non-nullptr, the file is known to already
  * reside on disk.
  */
 static FlatFilePos SaveBlockToDisk(const CBlock &block, int nHeight,
                                    const CChainParams &chainparams,
                                    const FlatFilePos *dbp) {
     unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
     FlatFilePos blockPos;
     if (dbp != nullptr) {
         blockPos = *dbp;
     }
     if (!FindBlockPos(blockPos, nBlockSize + 8, nHeight, block.GetBlockTime(),
                       dbp != nullptr)) {
         error("%s: FindBlockPos failed", __func__);
         return FlatFilePos();
     }
     if (dbp == nullptr) {
         if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) {
             AbortNode("Failed to write block");
             return FlatFilePos();
         }
     }
     return blockPos;
 }
 
 /**
  * Store a block on disk.
  *
  * @param[in]     config     The global config.
  * @param[in-out] pblock     The block we want to accept.
  * @param[in]     fRequested A boolean to indicate if this block was requested
  *                           from our peers.
  * @param[in]     dbp        If non-null, the disk position of the block.
  * @param[in-out] fNewBlock  True if block was first received via this call.
  * @return True if the block is accepted as a valid block and written to disk.
  */
 bool CChainState::AcceptBlock(const Config &config,
                               const std::shared_ptr<const CBlock> &pblock,
                               BlockValidationState &state, bool fRequested,
                               const FlatFilePos *dbp, bool *fNewBlock) {
     AssertLockHeld(cs_main);
 
     const CBlock &block = *pblock;
     if (fNewBlock) {
         *fNewBlock = false;
     }
 
     CBlockIndex *pindex = nullptr;
 
     bool accepted_header =
         m_blockman.AcceptBlockHeader(config, block, state, &pindex);
     CheckBlockIndex(config.GetChainParams().GetConsensus());
 
     if (!accepted_header) {
         return false;
     }
 
     // Try to process all requested blocks that we don't have, but only
     // process an unrequested block if it's new and has enough work to
     // advance our tip, and isn't too many blocks ahead.
     bool fAlreadyHave = pindex->nStatus.hasData();
 
     // TODO: deal better with return value and error conditions for duplicate
     // and unrequested blocks.
     if (fAlreadyHave) {
         return true;
     }
 
     // Compare block header timestamps and received times of the block and the
     // chaintip.  If they have the same chain height, use these diffs as a
     // tie-breaker, attempting to pick the more honestly-mined block.
     int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff());
     int64_t chainTipTimeDiff =
         m_chain.Tip() ? std::llabs(m_chain.Tip()->GetReceivedTimeDiff()) : 0;
 
     bool isSameHeight =
         m_chain.Tip() && (pindex->nChainWork == m_chain.Tip()->nChainWork);
     if (isSameHeight) {
         LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, "
                   "diff=%d\n",
                   m_chain.Tip()->GetBlockHash().ToString(), chainTipTimeDiff);
         LogPrintf("New block timestamp-to-received-time difference: hash=%s, "
                   "diff=%d\n",
                   pindex->GetBlockHash().ToString(), newBlockTimeDiff);
     }
 
     bool fHasMoreOrSameWork =
         (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork
                        : true);
 
     // Blocks that are too out-of-order needlessly limit the effectiveness of
     // pruning, because pruning will not delete block files that contain any
     // blocks which are too close in height to the tip.  Apply this test
     // regardless of whether pruning is enabled; it should generally be safe to
     // not process unrequested blocks.
     bool fTooFarAhead =
         (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
 
     // TODO: Decouple this function from the block download logic by removing
     // fRequested
     // This requires some new chain data structure to efficiently look up if a
     // block is in a chain leading to a candidate for best tip, despite not
     // being such a candidate itself.
 
     // If we didn't ask for it:
     if (!fRequested) {
         // This is a previously-processed block that was pruned.
         if (pindex->nTx != 0) {
             return true;
         }
 
         // Don't process less-work chains.
         if (!fHasMoreOrSameWork) {
             return true;
         }
 
         // Block height is too high.
         if (fTooFarAhead) {
             return true;
         }
 
         // Protect against DoS attacks from low-work chains.
         // If our tip is behind, a peer could try to send us
         // low-work blocks on a fake chain that we would never
         // request; don't process these.
         if (pindex->nChainWork < nMinimumChainWork) {
             return true;
         }
     }
 
     const CChainParams &chainparams = config.GetChainParams();
     const Consensus::Params &consensusParams = chainparams.GetConsensus();
 
     if (!CheckBlock(block, state, consensusParams,
                     BlockValidationOptions(config)) ||
         !ContextualCheckBlock(block, state, consensusParams, pindex->pprev)) {
         if (state.IsInvalid() &&
             state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
             pindex->nStatus = pindex->nStatus.withFailed();
             setDirtyBlockIndex.insert(pindex);
         }
 
         return error("%s: %s (block %s)", __func__, FormatStateMessage(state),
                      block.GetHash().ToString());
     }
 
     // If connecting the new block would require rewinding more than one block
     // from the active chain (i.e., a "deep reorg"), then mark the new block as
     // parked. If it has enough work then it will be automatically unparked
     // later, during FindMostWorkChain. We mark the block as parked at the very
     // last minute so we can make sure everything is ready to be reorged if
     // needed.
     if (gArgs.GetBoolArg("-parkdeepreorg", true)) {
         const CBlockIndex *pindexFork = m_chain.FindFork(pindex);
         if (pindexFork && pindexFork->nHeight + 1 < m_chain.Height()) {
             LogPrintf("Park block %s as it would cause a deep reorg.\n",
                       pindex->GetBlockHash().ToString());
             pindex->nStatus = pindex->nStatus.withParked();
             setDirtyBlockIndex.insert(pindex);
         }
     }
 
     // Header is valid/has work and the merkle tree is good.
     // Relay now, but if it does not build on our best tip, let the
     // SendMessages loop relay it.
     if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev) {
         GetMainSignals().NewPoWValidBlock(pindex, pblock);
     }
 
     // Write block to history file
     if (fNewBlock) {
         *fNewBlock = true;
     }
     try {
         FlatFilePos blockPos =
             SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
         if (blockPos.IsNull()) {
             state.Error(strprintf(
                 "%s: Failed to find position to write new block to disk",
                 __func__));
             return false;
         }
         ReceivedBlockTransactions(block, pindex, blockPos);
     } catch (const std::runtime_error &e) {
         return AbortNode(state, std::string("System error: ") + e.what());
     }
 
     FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
 
     CheckBlockIndex(consensusParams);
 
     return true;
 }
 
 bool ProcessNewBlock(const Config &config,
                      const std::shared_ptr<const CBlock> pblock,
                      bool fForceProcessing, bool *fNewBlock) {
     AssertLockNotHeld(cs_main);
 
     {
         if (fNewBlock) {
             *fNewBlock = false;
         }
 
         BlockValidationState state;
 
         // CheckBlock() does not support multi-threaded block validation
         // because CBlock::fChecked can cause data race.
         // Therefore, the following critical section must include the
         // CheckBlock() call as well.
         LOCK(cs_main);
 
         // Ensure that CheckBlock() passes before calling AcceptBlock, as
         // belt-and-suspenders.
         bool ret =
             CheckBlock(*pblock, state, config.GetChainParams().GetConsensus(),
                        BlockValidationOptions(config));
         if (ret) {
             // Store to disk
             ret = ::ChainstateActive().AcceptBlock(
                 config, pblock, state, fForceProcessing, nullptr, fNewBlock);
         }
 
         if (!ret) {
             GetMainSignals().BlockChecked(*pblock, state);
             return error("%s: AcceptBlock FAILED (%s)", __func__,
                          FormatStateMessage(state));
         }
     }
 
     NotifyHeaderTip();
 
     // Only used to report errors, not invalidity - ignore it
     BlockValidationState state;
     if (!::ChainstateActive().ActivateBestChain(config, state, pblock)) {
         return error("%s: ActivateBestChain failed (%s)", __func__,
                      FormatStateMessage(state));
     }
 
     return true;
 }
 
 bool TestBlockValidity(BlockValidationState &state, const CChainParams &params,
                        const CBlock &block, CBlockIndex *pindexPrev,
                        BlockValidationOptions validationOptions) {
     AssertLockHeld(cs_main);
     assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
     CCoinsViewCache viewNew(pcoinsTip.get());
     BlockHash block_hash(block.GetHash());
     CBlockIndex indexDummy(block);
     indexDummy.pprev = pindexPrev;
     indexDummy.nHeight = pindexPrev->nHeight + 1;
     indexDummy.phashBlock = &block_hash;
 
     // NOTE: CheckBlockHeader is called by CheckBlock
     if (!ContextualCheckBlockHeader(params, block, state, pindexPrev,
                                     GetAdjustedTime())) {
         return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
                      FormatStateMessage(state));
     }
 
     if (!CheckBlock(block, state, params.GetConsensus(), validationOptions)) {
         return error("%s: Consensus::CheckBlock: %s", __func__,
                      FormatStateMessage(state));
     }
 
     if (!ContextualCheckBlock(block, state, params.GetConsensus(),
                               pindexPrev)) {
         return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
                      FormatStateMessage(state));
     }
 
     if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew,
                                            params, validationOptions, true)) {
         return false;
     }
 
     assert(state.IsValid());
     return true;
 }
 
 /**
  * BLOCK PRUNING CODE
  */
 
 /**
  * Calculate the amount of disk space the block & undo files currently use.
  */
 uint64_t CalculateCurrentUsage() {
     LOCK(cs_LastBlockFile);
 
     uint64_t retval = 0;
     for (const CBlockFileInfo &file : vinfoBlockFile) {
         retval += file.nSize + file.nUndoSize;
     }
 
     return retval;
 }
 
 /**
  * Prune a block file (modify associated database entries)
  */
 void PruneOneBlockFile(const int fileNumber) {
     LOCK(cs_LastBlockFile);
 
     for (const auto &entry : g_blockman.m_block_index) {
         CBlockIndex *pindex = entry.second;
         if (pindex->nFile == fileNumber) {
             pindex->nStatus = pindex->nStatus.withData(false).withUndo(false);
             pindex->nFile = 0;
             pindex->nDataPos = 0;
             pindex->nUndoPos = 0;
             setDirtyBlockIndex.insert(pindex);
 
             // Prune from m_blocks_unlinked -- any block we prune would have
             // to be downloaded again in order to consider its chain, at which
             // point it would be considered as a candidate for
             // m_blocks_unlinked or setBlockIndexCandidates.
             auto range =
                 g_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
             while (range.first != range.second) {
                 std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it =
                     range.first;
                 range.first++;
                 if (_it->second == pindex) {
                     g_blockman.m_blocks_unlinked.erase(_it);
                 }
             }
         }
     }
 
     vinfoBlockFile[fileNumber].SetNull();
     setDirtyFileInfo.insert(fileNumber);
 }
 
 void UnlinkPrunedFiles(const std::set<int> &setFilesToPrune) {
     for (const int i : setFilesToPrune) {
         FlatFilePos pos(i, 0);
         fs::remove(BlockFileSeq().FileName(pos));
         fs::remove(UndoFileSeq().FileName(pos));
         LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, i);
     }
 }
 
 /**
  * Calculate the block/rev files to delete based on height specified by user
  * with RPC command pruneblockchain
  */
 static void FindFilesToPruneManual(std::set<int> &setFilesToPrune,
                                    int nManualPruneHeight) {
     assert(fPruneMode && nManualPruneHeight > 0);
 
     LOCK2(cs_main, cs_LastBlockFile);
     if (::ChainActive().Tip() == nullptr) {
         return;
     }
 
     // last block to prune is the lesser of (user-specified height,
     // MIN_BLOCKS_TO_KEEP from the tip)
     unsigned int nLastBlockWeCanPrune =
         std::min((unsigned)nManualPruneHeight,
                  ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
     int count = 0;
     for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
         if (vinfoBlockFile[fileNumber].nSize == 0 ||
             vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
             continue;
         }
         PruneOneBlockFile(fileNumber);
         setFilesToPrune.insert(fileNumber);
         count++;
     }
     LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
               nLastBlockWeCanPrune, count);
 }
 
 /* This function is called from the RPC code for pruneblockchain */
 void PruneBlockFilesManual(int nManualPruneHeight) {
     BlockValidationState state;
     const CChainParams &chainparams = Params();
     if (!::ChainstateActive().FlushStateToDisk(
             chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
         LogPrintf("%s: failed to flush state (%s)\n", __func__,
                   FormatStateMessage(state));
     }
 }
 
 /**
  * Prune block and undo files (blk???.dat and undo???.dat) so that the disk
  * space used is less than a user-defined target. The user sets the target (in
  * MB) on the command line or in config file.  This will be run on startup and
  * whenever new space is allocated in a block or undo file, staying below the
  * target. Changing back to unpruned requires a reindex (which in this case
  * means the blockchain must be re-downloaded.)
  *
  * Pruning functions are called from FlushStateToDisk when the global
  * fCheckForPruning flag has been set. Block and undo files are deleted in
  * lock-step (when blk00003.dat is deleted, so is rev00003.dat.). Pruning cannot
  * take place until the longest chain is at least a certain length (100000 on
  * mainnet, 1000 on testnet, 1000 on regtest). Pruning will never delete a block
  * within a defined distance (currently 288) from the active chain's tip. The
  * block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks
  * that were stored in the deleted files. A db flag records the fact that at
  * least some block files have been pruned.
  *
  * @param[out]   setFilesToPrune   The set of file indices that can be unlinked
  * will be returned
  */
 static void FindFilesToPrune(std::set<int> &setFilesToPrune,
                              uint64_t nPruneAfterHeight) {
     LOCK2(cs_main, cs_LastBlockFile);
     if (::ChainActive().Tip() == nullptr || nPruneTarget == 0) {
         return;
     }
     if (uint64_t(::ChainActive().Tip()->nHeight) <= nPruneAfterHeight) {
         return;
     }
 
     unsigned int nLastBlockWeCanPrune =
         ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
     uint64_t nCurrentUsage = CalculateCurrentUsage();
     // We don't check to prune until after we've allocated new space for files,
     // so we should leave a buffer under our target to account for another
     // allocation before the next pruning.
     uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
     uint64_t nBytesToPrune;
     int count = 0;
 
     if (nCurrentUsage + nBuffer >= nPruneTarget) {
         // On a prune event, the chainstate DB is flushed.
         // To avoid excessive prune events negating the benefit of high dbcache
         // values, we should not prune too rapidly.
         // So when pruning in IBD, increase the buffer a bit to avoid a re-prune
         // too soon.
         if (::ChainstateActive().IsInitialBlockDownload()) {
             // Since this is only relevant during IBD, we use a fixed 10%
             nBuffer += nPruneTarget / 10;
         }
 
         for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
             nBytesToPrune = vinfoBlockFile[fileNumber].nSize +
                             vinfoBlockFile[fileNumber].nUndoSize;
 
             if (vinfoBlockFile[fileNumber].nSize == 0) {
                 continue;
             }
 
             // are we below our target?
             if (nCurrentUsage + nBuffer < nPruneTarget) {
                 break;
             }
 
             // don't prune files that could have a block within
             // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
             if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
                 continue;
             }
 
             PruneOneBlockFile(fileNumber);
             // Queue up the files for removal
             setFilesToPrune.insert(fileNumber);
             nCurrentUsage -= nBytesToPrune;
             count++;
         }
     }
 
     LogPrint(BCLog::PRUNE,
              "Prune: target=%dMiB actual=%dMiB diff=%dMiB "
              "max_prune_height=%d removed %d blk/rev pairs\n",
              nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024,
              ((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024,
              nLastBlockWeCanPrune, count);
 }
 
-static FlatFileSeq BlockFileSeq() {
-    return FlatFileSeq(GetBlocksDir(), "blk", BLOCKFILE_CHUNK_SIZE);
-}
-
-static FlatFileSeq UndoFileSeq() {
-    return FlatFileSeq(GetBlocksDir(), "rev", UNDOFILE_CHUNK_SIZE);
-}
-
-FILE *OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
-    return BlockFileSeq().Open(pos, fReadOnly);
-}
-
-/** Open an undo file (rev?????.dat) */
-static FILE *OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
-    return UndoFileSeq().Open(pos, fReadOnly);
-}
-
-fs::path GetBlockPosFilename(const FlatFilePos &pos) {
-    return BlockFileSeq().FileName(pos);
-}
-
 CBlockIndex *BlockManager::InsertBlockIndex(const BlockHash &hash) {
     AssertLockHeld(cs_main);
 
     if (hash.IsNull()) {
         return nullptr;
     }
 
     // Return existing
     BlockMap::iterator mi = m_block_index.find(hash);
     if (mi != m_block_index.end()) {
         return (*mi).second;
     }
 
     // Create new
     CBlockIndex *pindexNew = new CBlockIndex();
     mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
     pindexNew->phashBlock = &((*mi).first);
 
     return pindexNew;
 }
 
 bool BlockManager::LoadBlockIndex(
     const Consensus::Params &params, CBlockTreeDB &blocktree,
     std::set<CBlockIndex *, CBlockIndexWorkComparator>
         &block_index_candidates) {
     AssertLockHeld(cs_main);
     if (!blocktree.LoadBlockIndexGuts(
             params, [this](const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(
                         cs_main) { return this->InsertBlockIndex(hash); })) {
         return false;
     }
 
     // Calculate nChainWork
     std::vector<std::pair<int, CBlockIndex *>> vSortedByHeight;
     vSortedByHeight.reserve(m_block_index.size());
     for (const std::pair<const BlockHash, CBlockIndex *> &item :
          m_block_index) {
         CBlockIndex *pindex = item.second;
         vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
     }
 
     sort(vSortedByHeight.begin(), vSortedByHeight.end());
     for (const std::pair<int, CBlockIndex *> &item : vSortedByHeight) {
         if (ShutdownRequested()) {
             return false;
         }
         CBlockIndex *pindex = item.second;
         pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) +
                              GetBlockProof(*pindex);
         pindex->nTimeMax =
             (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime)
                            : pindex->nTime);
         // We can link the chain of blocks for which we've received transactions
         // at some point. Pruned nodes may have deleted the block.
         if (pindex->nTx > 0) {
             if (!pindex->UpdateChainStats() && pindex->pprev) {
                 m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
             }
         }
 
         if (!pindex->nStatus.hasFailed() && pindex->pprev &&
             pindex->pprev->nStatus.hasFailed()) {
             pindex->nStatus = pindex->nStatus.withFailedParent();
             setDirtyBlockIndex.insert(pindex);
         }
         if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
             (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
             block_index_candidates.insert(pindex);
         }
 
         if (pindex->nStatus.isInvalid() &&
             (!pindexBestInvalid ||
              pindex->nChainWork > pindexBestInvalid->nChainWork)) {
             pindexBestInvalid = pindex;
         }
 
         if (pindex->nStatus.isOnParkedChain() &&
             (!pindexBestParked ||
              pindex->nChainWork > pindexBestParked->nChainWork)) {
             pindexBestParked = pindex;
         }
 
         if (pindex->pprev) {
             pindex->BuildSkip();
         }
 
         if (pindex->IsValid(BlockValidity::TREE) &&
             (pindexBestHeader == nullptr ||
              CBlockIndexWorkComparator()(pindexBestHeader, pindex))) {
             pindexBestHeader = pindex;
         }
     }
 
     return true;
 }
 
 void BlockManager::Unload() {
     m_failed_blocks.clear();
     m_blocks_unlinked.clear();
 
     for (const BlockMap::value_type &entry : m_block_index) {
         delete entry.second;
     }
 
     m_block_index.clear();
 }
 
 static bool LoadBlockIndexDB(const Consensus::Params &params)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     if (!g_blockman.LoadBlockIndex(
             params, *pblocktree,
             ::ChainstateActive().setBlockIndexCandidates)) {
         return false;
     }
 
     // Load block file info
     pblocktree->ReadLastBlockFile(nLastBlockFile);
     vinfoBlockFile.resize(nLastBlockFile + 1);
     LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
     for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
         pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
     }
 
     LogPrintf("%s: last block file info: %s\n", __func__,
               vinfoBlockFile[nLastBlockFile].ToString());
 
     for (int nFile = nLastBlockFile + 1; true; nFile++) {
         CBlockFileInfo info;
         if (pblocktree->ReadBlockFileInfo(nFile, info)) {
             vinfoBlockFile.push_back(info);
         } else {
             break;
         }
     }
 
     // Check presence of blk files
     LogPrintf("Checking all blk files are present...\n");
     std::set<int> setBlkDataFiles;
     for (const std::pair<const BlockHash, CBlockIndex *> &item :
          g_blockman.m_block_index) {
         CBlockIndex *pindex = item.second;
         if (pindex->nStatus.hasData()) {
             setBlkDataFiles.insert(pindex->nFile);
         }
     }
 
     for (const int i : setBlkDataFiles) {
         FlatFilePos pos(i, 0);
         if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION)
                 .IsNull()) {
             return false;
         }
     }
 
     // Check whether we have ever pruned block & undo files
     pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
     if (fHavePruned) {
         LogPrintf(
             "LoadBlockIndexDB(): Block files have previously been pruned\n");
     }
 
     // Check whether we need to continue reindexing
     if (pblocktree->IsReindexing()) {
         fReindex = true;
     }
 
     return true;
 }
 
 bool LoadChainTip(const Config &config) {
     AssertLockHeld(cs_main);
     // Never called when the coins view is empty
     assert(!pcoinsTip->GetBestBlock().IsNull());
 
     if (::ChainActive().Tip() &&
         ::ChainActive().Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) {
         return true;
     }
 
     // Load pointer to end of best chain
     CBlockIndex *pindex = LookupBlockIndex(pcoinsTip->GetBestBlock());
     if (!pindex) {
         return false;
     }
     ::ChainActive().SetTip(pindex);
 
     ::ChainstateActive().PruneBlockIndexCandidates();
 
     LogPrintf(
         "Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
         ::ChainActive().Tip()->GetBlockHash().ToString(),
         ::ChainActive().Height(),
         FormatISO8601DateTime(::ChainActive().Tip()->GetBlockTime()),
         GuessVerificationProgress(config.GetChainParams().TxData(),
                                   ::ChainActive().Tip()));
     return true;
 }
 
 CVerifyDB::CVerifyDB() {
     uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
 }
 
 CVerifyDB::~CVerifyDB() {
     uiInterface.ShowProgress("", 100, false);
 }
 
 bool CVerifyDB::VerifyDB(const Config &config, CCoinsView *coinsview,
                          int nCheckLevel, int nCheckDepth) {
     LOCK(cs_main);
 
     const CChainParams &params = config.GetChainParams();
     const Consensus::Params &consensusParams = params.GetConsensus();
 
     if (::ChainActive().Tip() == nullptr ||
         ::ChainActive().Tip()->pprev == nullptr) {
         return true;
     }
 
     // Verify blocks in the best chain
     if (nCheckDepth <= 0 || nCheckDepth > ::ChainActive().Height()) {
         nCheckDepth = ::ChainActive().Height();
     }
 
     nCheckLevel = std::max(0, std::min(4, nCheckLevel));
     LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
               nCheckLevel);
 
     CCoinsViewCache coins(coinsview);
     CBlockIndex *pindex;
     CBlockIndex *pindexFailure = nullptr;
     int nGoodTransactions = 0;
     BlockValidationState state;
     int reportDone = 0;
     LogPrintfToBeContinued("[0%%]...");
     for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev;
          pindex = pindex->pprev) {
         boost::this_thread::interruption_point();
         const int percentageDone =
             std::max(1, std::min(99, (int)(((double)(::ChainActive().Height() -
                                                      pindex->nHeight)) /
                                            (double)nCheckDepth *
                                            (nCheckLevel >= 4 ? 50 : 100))));
         if (reportDone < percentageDone / 10) {
             // report every 10% step
             LogPrintfToBeContinued("[%d%%]...", percentageDone);
             reportDone = percentageDone / 10;
         }
 
         uiInterface.ShowProgress(_("Verifying blocks...").translated,
                                  percentageDone, false);
         if (pindex->nHeight <= ::ChainActive().Height() - nCheckDepth) {
             break;
         }
 
         if (fPruneMode && !pindex->nStatus.hasData()) {
             // If pruning, only go back as far as we have data.
             LogPrintf("VerifyDB(): block verification stopping at height %d "
                       "(pruning, no data)\n",
                       pindex->nHeight);
             break;
         }
 
         CBlock block;
 
         // check level 0: read from disk
         if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
             return error(
                 "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
                 pindex->nHeight, pindex->GetBlockHash().ToString());
         }
 
         // check level 1: verify block validity
         if (nCheckLevel >= 1 && !CheckBlock(block, state, consensusParams,
                                             BlockValidationOptions(config))) {
             return error("%s: *** found bad block at %d, hash=%s (%s)\n",
                          __func__, pindex->nHeight,
                          pindex->GetBlockHash().ToString(),
                          FormatStateMessage(state));
         }
 
         // check level 2: verify undo validity
         if (nCheckLevel >= 2 && pindex) {
             CBlockUndo undo;
             if (!pindex->GetUndoPos().IsNull()) {
                 if (!UndoReadFromDisk(undo, pindex)) {
                     return error(
                         "VerifyDB(): *** found bad undo data at %d, hash=%s\n",
                         pindex->nHeight, pindex->GetBlockHash().ToString());
                 }
             }
         }
 
         // check level 3: check for inconsistencies during memory-only
         // disconnect of tip blocks
         if (nCheckLevel >= 3 &&
             (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <=
                 nCoinCacheUsage) {
             assert(coins.GetBestBlock() == pindex->GetBlockHash());
             DisconnectResult res =
                 ::ChainstateActive().DisconnectBlock(block, pindex, coins);
             if (res == DisconnectResult::FAILED) {
                 return error("VerifyDB(): *** irrecoverable inconsistency in "
                              "block data at %d, hash=%s",
                              pindex->nHeight,
                              pindex->GetBlockHash().ToString());
             }
 
             if (res == DisconnectResult::UNCLEAN) {
                 nGoodTransactions = 0;
                 pindexFailure = pindex;
             } else {
                 nGoodTransactions += block.vtx.size();
             }
         }
 
         if (ShutdownRequested()) {
             return true;
         }
     }
 
     if (pindexFailure) {
         return error("VerifyDB(): *** coin database inconsistencies found "
                      "(last %i blocks, %i good transactions before that)\n",
                      ::ChainActive().Height() - pindexFailure->nHeight + 1,
                      nGoodTransactions);
     }
 
     // store block count as we move pindex at check level >= 4
     int block_count = ::ChainActive().Height() - pindex->nHeight;
 
     // check level 4: try reconnecting blocks
     if (nCheckLevel >= 4) {
         while (pindex != ::ChainActive().Tip()) {
             boost::this_thread::interruption_point();
             const int percentageDone = std::max(
                 1, std::min(99, 100 - int(double(::ChainActive().Height() -
                                                  pindex->nHeight) /
                                           double(nCheckDepth) * 50)));
             if (reportDone < percentageDone / 10) {
                 // report every 10% step
                 LogPrintfToBeContinued("[%d%%]...", percentageDone);
                 reportDone = percentageDone / 10;
             }
             uiInterface.ShowProgress(_("Verifying blocks...").translated,
                                      percentageDone, false);
             pindex = ::ChainActive().Next(pindex);
             CBlock block;
             if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
                 return error(
                     "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
                     pindex->nHeight, pindex->GetBlockHash().ToString());
             }
             if (!::ChainstateActive().ConnectBlock(
                     block, state, pindex, coins, params,
                     BlockValidationOptions(config))) {
                 return error("VerifyDB(): *** found unconnectable block at %d, "
                              "hash=%s (%s)",
                              pindex->nHeight, pindex->GetBlockHash().ToString(),
                              FormatStateMessage(state));
             }
         }
     }
 
     LogPrintf("[DONE].\n");
     LogPrintf("No coin database inconsistencies in last %i blocks (%i "
               "transactions)\n",
               block_count, nGoodTransactions);
 
     return true;
 }
 
 /**
  * Apply the effects of a block on the utxo cache, ignoring that it may already
  * have been applied.
  */
 bool CChainState::RollforwardBlock(const CBlockIndex *pindex,
                                    CCoinsViewCache &view,
                                    const Consensus::Params &params) {
     // TODO: merge with ConnectBlock
     CBlock block;
     if (!ReadBlockFromDisk(block, pindex, params)) {
         return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s",
                      pindex->nHeight, pindex->GetBlockHash().ToString());
     }
 
     for (const CTransactionRef &tx : block.vtx) {
         // Pass check = true as every addition may be an overwrite.
         AddCoins(view, *tx, pindex->nHeight, true);
     }
 
     for (const CTransactionRef &tx : block.vtx) {
         if (tx->IsCoinBase()) {
             continue;
         }
 
         for (const CTxIn &txin : tx->vin) {
             view.SpendCoin(txin.prevout);
         }
     }
 
     return true;
 }
 
 bool CChainState::ReplayBlocks(const Consensus::Params &params,
                                CCoinsView *view) {
     LOCK(cs_main);
 
     CCoinsViewCache cache(view);
 
     std::vector<BlockHash> hashHeads = view->GetHeadBlocks();
     if (hashHeads.empty()) {
         // We're already in a consistent state.
         return true;
     }
 
     if (hashHeads.size() != 2) {
         return error("ReplayBlocks(): unknown inconsistent state");
     }
 
     uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
     LogPrintf("Replaying blocks\n");
 
     // Old tip during the interrupted flush.
     const CBlockIndex *pindexOld = nullptr;
     // New tip during the interrupted flush.
     const CBlockIndex *pindexNew;
     // Latest block common to both the old and the new tip.
     const CBlockIndex *pindexFork = nullptr;
 
     if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
         return error(
             "ReplayBlocks(): reorganization to unknown block requested");
     }
 
     pindexNew = m_blockman.m_block_index[hashHeads[0]];
 
     if (!hashHeads[1].IsNull()) {
         // The old tip is allowed to be 0, indicating it's the first flush.
         if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
             return error(
                 "ReplayBlocks(): reorganization from unknown block requested");
         }
 
         pindexOld = m_blockman.m_block_index[hashHeads[1]];
         pindexFork = LastCommonAncestor(pindexOld, pindexNew);
         assert(pindexFork != nullptr);
     }
 
     // Rollback along the old branch.
     while (pindexOld != pindexFork) {
         if (pindexOld->nHeight > 0) {
             // Never disconnect the genesis block.
             CBlock block;
             if (!ReadBlockFromDisk(block, pindexOld, params)) {
                 return error("RollbackBlock(): ReadBlockFromDisk() failed at "
                              "%d, hash=%s",
                              pindexOld->nHeight,
                              pindexOld->GetBlockHash().ToString());
             }
 
             LogPrintf("Rolling back %s (%i)\n",
                       pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
             DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
             if (res == DisconnectResult::FAILED) {
                 return error(
                     "RollbackBlock(): DisconnectBlock failed at %d, hash=%s",
                     pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
             }
 
             // If DisconnectResult::UNCLEAN is returned, it means a non-existing
             // UTXO was deleted, or an existing UTXO was overwritten. It
             // corresponds to cases where the block-to-be-disconnect never had
             // all its operations applied to the UTXO set. However, as both
             // writing a UTXO and deleting a UTXO are idempotent operations, the
             // result is still a version of the UTXO set with the effects of
             // that block undone.
         }
         pindexOld = pindexOld->pprev;
     }
 
     // Roll forward from the forking point to the new tip.
     int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
     for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight;
          ++nHeight) {
         const CBlockIndex *pindex = pindexNew->GetAncestor(nHeight);
         LogPrintf("Rolling forward %s (%i)\n",
                   pindex->GetBlockHash().ToString(), nHeight);
         uiInterface.ShowProgress(_("Replaying blocks...").translated,
                                  (int)((nHeight - nForkHeight) * 100.0 /
                                        (pindexNew->nHeight - nForkHeight)),
                                  false);
         if (!RollforwardBlock(pindex, cache, params)) {
             return false;
         }
     }
 
     cache.SetBestBlock(pindexNew->GetBlockHash());
     cache.Flush();
     uiInterface.ShowProgress("", 100, false);
     return true;
 }
 
 bool ReplayBlocks(const Consensus::Params &params, CCoinsView *view) {
     return ::ChainstateActive().ReplayBlocks(params, view);
 }
 
 // May NOT be used after any connections are up as much of the peer-processing
 // logic assumes a consistent block index state
 void CChainState::UnloadBlockIndex() {
     nBlockSequenceId = 1;
     setBlockIndexCandidates.clear();
 
     // Do not point to CBlockIndex that will be free'd
     m_finalizedBlockIndex = nullptr;
 }
 
 // May NOT be used after any connections are up as much
 // of the peer-processing logic assumes a consistent
 // block index state
 void UnloadBlockIndex() {
     LOCK(cs_main);
     ::ChainActive().SetTip(nullptr);
     g_blockman.Unload();
     pindexBestInvalid = nullptr;
     pindexBestParked = nullptr;
     pindexBestHeader = nullptr;
     pindexBestForkTip = nullptr;
     pindexBestForkBase = nullptr;
     ResetASERTAnchorBlockCache();
     g_mempool.clear();
     vinfoBlockFile.clear();
     nLastBlockFile = 0;
     setDirtyBlockIndex.clear();
     setDirtyFileInfo.clear();
     fHavePruned = false;
 
     ::ChainstateActive().UnloadBlockIndex();
 }
 
 bool LoadBlockIndex(const Consensus::Params &params) {
     // Load block index from databases
     bool needs_init = fReindex;
     if (!fReindex) {
         bool ret = LoadBlockIndexDB(params);
         if (!ret) {
             return false;
         }
 
         needs_init = g_blockman.m_block_index.empty();
     }
 
     if (needs_init) {
         // Everything here is for *new* reindex/DBs. Thus, though
         // LoadBlockIndexDB may have set fReindex if we shut down
         // mid-reindex previously, we don't check fReindex and
         // instead only check it prior to LoadBlockIndexDB to set
         // needs_init.
 
         LogPrintf("Initializing databases...\n");
     }
     return true;
 }
 
 bool CChainState::LoadGenesisBlock(const CChainParams &chainparams) {
     LOCK(cs_main);
 
     // Check whether we're already initialized by checking for genesis in
     // m_blockman.m_block_index. Note that we can't use m_chain here, since it
     // is set based on the coins db, not the block index db, which is the only
     // thing loaded at this point.
     if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash())) {
         return true;
     }
 
     try {
         const CBlock &block = chainparams.GenesisBlock();
         FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
         if (blockPos.IsNull()) {
             return error("%s: writing genesis block to disk failed", __func__);
         }
         CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
         ReceivedBlockTransactions(block, pindex, blockPos);
     } catch (const std::runtime_error &e) {
         return error("%s: failed to write genesis block: %s", __func__,
                      e.what());
     }
 
     return true;
 }
 
 bool LoadGenesisBlock(const CChainParams &chainparams) {
     return ::ChainstateActive().LoadGenesisBlock(chainparams);
 }
 
 bool LoadExternalBlockFile(const Config &config, FILE *fileIn,
                            FlatFilePos *dbp) {
     // Map of disk positions for blocks with unknown parent (only used for
     // reindex)
     static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
     int64_t nStart = GetTimeMillis();
 
     const CChainParams &chainparams = config.GetChainParams();
 
     int nLoaded = 0;
     try {
         // This takes over fileIn and calls fclose() on it in the CBufferedFile
         // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
         // so any transaction can fit in the buffer.
         CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
                              CLIENT_VERSION);
         uint64_t nRewind = blkdat.GetPos();
         while (!blkdat.eof()) {
             boost::this_thread::interruption_point();
 
             blkdat.SetPos(nRewind);
             // Start one byte further next time, in case of failure.
             nRewind++;
             // Remove former limit.
             blkdat.SetLimit();
             unsigned int nSize = 0;
             try {
                 // Locate a header.
                 uint8_t buf[CMessageHeader::MESSAGE_START_SIZE];
                 blkdat.FindByte(chainparams.DiskMagic()[0]);
                 nRewind = blkdat.GetPos() + 1;
                 blkdat >> buf;
                 if (memcmp(buf, chainparams.DiskMagic().data(),
                            CMessageHeader::MESSAGE_START_SIZE)) {
                     continue;
                 }
 
                 // Read size.
                 blkdat >> nSize;
                 if (nSize < 80) {
                     continue;
                 }
             } catch (const std::exception &) {
                 // No valid block header found; don't complain.
                 break;
             }
 
             try {
                 // read block
                 uint64_t nBlockPos = blkdat.GetPos();
                 if (dbp) {
                     dbp->nPos = nBlockPos;
                 }
                 blkdat.SetLimit(nBlockPos + nSize);
                 blkdat.SetPos(nBlockPos);
                 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
                 CBlock &block = *pblock;
                 blkdat >> block;
                 nRewind = blkdat.GetPos();
 
                 const BlockHash hash = block.GetHash();
                 {
                     LOCK(cs_main);
                     // detect out of order blocks, and store them for later
                     if (hash != chainparams.GetConsensus().hashGenesisBlock &&
                         !LookupBlockIndex(block.hashPrevBlock)) {
                         LogPrint(
                             BCLog::REINDEX,
                             "%s: Out of order block %s, parent %s not known\n",
                             __func__, hash.ToString(),
                             block.hashPrevBlock.ToString());
                         if (dbp) {
                             mapBlocksUnknownParent.insert(
                                 std::make_pair(block.hashPrevBlock, *dbp));
                         }
                         continue;
                     }
 
                     // process in case the block isn't known yet
                     CBlockIndex *pindex = LookupBlockIndex(hash);
                     if (!pindex || !pindex->nStatus.hasData()) {
                         BlockValidationState state;
                         if (::ChainstateActive().AcceptBlock(
                                 config, pblock, state, true, dbp, nullptr)) {
                             nLoaded++;
                         }
                         if (state.IsError()) {
                             break;
                         }
                     } else if (hash != chainparams.GetConsensus()
                                            .hashGenesisBlock &&
                                pindex->nHeight % 1000 == 0) {
                         LogPrint(
                             BCLog::REINDEX,
                             "Block Import: already had block %s at height %d\n",
                             hash.ToString(), pindex->nHeight);
                     }
                 }
 
                 // Activate the genesis block so normal node progress can
                 // continue
                 if (hash == chainparams.GetConsensus().hashGenesisBlock) {
                     BlockValidationState state;
                     if (!ActivateBestChain(config, state)) {
                         break;
                     }
                 }
 
                 NotifyHeaderTip();
 
                 // Recursively process earlier encountered successors of this
                 // block
                 std::deque<uint256> queue;
                 queue.push_back(hash);
                 while (!queue.empty()) {
                     uint256 head = queue.front();
                     queue.pop_front();
                     std::pair<std::multimap<uint256, FlatFilePos>::iterator,
                               std::multimap<uint256, FlatFilePos>::iterator>
                         range = mapBlocksUnknownParent.equal_range(head);
                     while (range.first != range.second) {
                         std::multimap<uint256, FlatFilePos>::iterator it =
                             range.first;
                         std::shared_ptr<CBlock> pblockrecursive =
                             std::make_shared<CBlock>();
                         if (ReadBlockFromDisk(*pblockrecursive, it->second,
                                               chainparams.GetConsensus())) {
                             LogPrint(
                                 BCLog::REINDEX,
                                 "%s: Processing out of order child %s of %s\n",
                                 __func__, pblockrecursive->GetHash().ToString(),
                                 head.ToString());
                             LOCK(cs_main);
                             BlockValidationState dummy;
                             if (::ChainstateActive().AcceptBlock(
                                     config, pblockrecursive, dummy, true,
                                     &it->second, nullptr)) {
                                 nLoaded++;
                                 queue.push_back(pblockrecursive->GetHash());
                             }
                         }
                         range.first++;
                         mapBlocksUnknownParent.erase(it);
                         NotifyHeaderTip();
                     }
                 }
             } catch (const std::exception &e) {
                 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__,
                           e.what());
             }
         }
     } catch (const std::runtime_error &e) {
         AbortNode(std::string("System error: ") + e.what());
     }
 
     if (nLoaded > 0) {
         LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
                   GetTimeMillis() - nStart);
     }
 
     return nLoaded > 0;
 }
 
 void CChainState::CheckBlockIndex(const Consensus::Params &consensusParams) {
     if (!fCheckBlockIndex) {
         return;
     }
 
     LOCK(cs_main);
 
     // During a reindex, we read the genesis block and call CheckBlockIndex
     // before ActivateBestChain, so we have the genesis block in
     // m_blockman.m_block_index but no active chain. (A few of the tests when
     // iterating the block tree require that m_chain has been initialized.)
     if (m_chain.Height() < 0) {
         assert(m_blockman.m_block_index.size() <= 1);
         return;
     }
 
     // Build forward-pointing map of the entire block tree.
     std::multimap<CBlockIndex *, CBlockIndex *> forward;
     for (const auto &entry : m_blockman.m_block_index) {
         forward.emplace(entry.second->pprev, entry.second);
     }
 
     assert(forward.size() == m_blockman.m_block_index.size());
 
     std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
               std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
         rangeGenesis = forward.equal_range(nullptr);
     CBlockIndex *pindex = rangeGenesis.first->second;
     rangeGenesis.first++;
     // There is only one index entry with parent nullptr.
     assert(rangeGenesis.first == rangeGenesis.second);
 
     // Iterate over the entire block tree, using depth-first search.
     // Along the way, remember whether there are blocks on the path from genesis
     // block being explored which are the first to have certain properties.
     size_t nNodes = 0;
     int nHeight = 0;
     // Oldest ancestor of pindex which is invalid.
     CBlockIndex *pindexFirstInvalid = nullptr;
     // Oldest ancestor of pindex which is parked.
     CBlockIndex *pindexFirstParked = nullptr;
     // Oldest ancestor of pindex which does not have data available.
     CBlockIndex *pindexFirstMissing = nullptr;
     // Oldest ancestor of pindex for which nTx == 0.
     CBlockIndex *pindexFirstNeverProcessed = nullptr;
     // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
     // (regardless of being valid or not).
     CBlockIndex *pindexFirstNotTreeValid = nullptr;
     // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
     // (regardless of being valid or not).
     CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
     // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
     // (regardless of being valid or not).
     CBlockIndex *pindexFirstNotChainValid = nullptr;
     // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
     // (regardless of being valid or not).
     CBlockIndex *pindexFirstNotScriptsValid = nullptr;
     while (pindex != nullptr) {
         nNodes++;
         if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) {
             pindexFirstInvalid = pindex;
         }
         if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) {
             pindexFirstParked = pindex;
         }
         if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData()) {
             pindexFirstMissing = pindex;
         }
         if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
             pindexFirstNeverProcessed = pindex;
         }
         if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
             pindex->nStatus.getValidity() < BlockValidity::TREE) {
             pindexFirstNotTreeValid = pindex;
         }
         if (pindex->pprev != nullptr &&
             pindexFirstNotTransactionsValid == nullptr &&
             pindex->nStatus.getValidity() < BlockValidity::TRANSACTIONS) {
             pindexFirstNotTransactionsValid = pindex;
         }
         if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr &&
             pindex->nStatus.getValidity() < BlockValidity::CHAIN) {
             pindexFirstNotChainValid = pindex;
         }
         if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr &&
             pindex->nStatus.getValidity() < BlockValidity::SCRIPTS) {
             pindexFirstNotScriptsValid = pindex;
         }
 
         // Begin: actual consistency checks.
         if (pindex->pprev == nullptr) {
             // Genesis block checks.
             // Genesis block's hash must match.
             assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock);
             // The current active chain's genesis block must be this block.
             assert(pindex == m_chain.Genesis());
         }
         if (!pindex->HaveTxsDownloaded()) {
             // nSequenceId can't be set positive for blocks that aren't linked
             // (negative is used for preciousblock)
             assert(pindex->nSequenceId <= 0);
         }
         // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
         // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
         // (or VALID_TRANSACTIONS) if no pruning has occurred.
         if (!fHavePruned) {
             // If we've never pruned, then HAVE_DATA should be equivalent to nTx
             // > 0
             assert(pindex->nStatus.hasData() == (pindex->nTx > 0));
             assert(pindexFirstMissing == pindexFirstNeverProcessed);
         } else if (pindex->nStatus.hasData()) {
             // If we have pruned, then we can only say that HAVE_DATA implies
             // nTx > 0
             assert(pindex->nTx > 0);
         }
         if (pindex->nStatus.hasUndo()) {
             assert(pindex->nStatus.hasData());
         }
         // This is pruning-independent.
         assert((pindex->nStatus.getValidity() >= BlockValidity::TRANSACTIONS) ==
                (pindex->nTx > 0));
         // All parents having had data (at some point) is equivalent to all
         // parents being VALID_TRANSACTIONS, which is equivalent to
         // HaveTxsDownloaded(). All parents having had data (at some point) is
         // equivalent to all parents being VALID_TRANSACTIONS, which is
         // equivalent to HaveTxsDownloaded().
         assert((pindexFirstNeverProcessed == nullptr) ==
                (pindex->HaveTxsDownloaded()));
         assert((pindexFirstNotTransactionsValid == nullptr) ==
                (pindex->HaveTxsDownloaded()));
         // nHeight must be consistent.
         assert(pindex->nHeight == nHeight);
         // For every block except the genesis block, the chainwork must be
         // larger than the parent's.
         assert(pindex->pprev == nullptr ||
                pindex->nChainWork >= pindex->pprev->nChainWork);
         // The pskip pointer must point back for all but the first 2 blocks.
         assert(nHeight < 2 ||
                (pindex->pskip && (pindex->pskip->nHeight < nHeight)));
         // All m_blockman.m_block_index entries must at least be TREE valid
         assert(pindexFirstNotTreeValid == nullptr);
         if (pindex->nStatus.getValidity() >= BlockValidity::TREE) {
             // TREE valid implies all parents are TREE valid
             assert(pindexFirstNotTreeValid == nullptr);
         }
         if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) {
             // CHAIN valid implies all parents are CHAIN valid
             assert(pindexFirstNotChainValid == nullptr);
         }
         if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) {
             // SCRIPTS valid implies all parents are SCRIPTS valid
             assert(pindexFirstNotScriptsValid == nullptr);
         }
         if (pindexFirstInvalid == nullptr) {
             // Checks for not-invalid blocks.
             // The failed mask cannot be set for blocks without invalid parents.
             assert(!pindex->nStatus.isInvalid());
         }
         if (pindexFirstParked == nullptr) {
             // Checks for not-parked blocks.
             // The parked mask cannot be set for blocks without parked parents.
             // (i.e., hasParkedParent only if an ancestor is properly parked).
             assert(!pindex->nStatus.isOnParkedChain());
         }
         if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
             pindexFirstNeverProcessed == nullptr) {
             if (pindexFirstInvalid == nullptr) {
                 // If this block sorts at least as good as the current tip and
                 // is valid and we have all data for its parents, it must be in
                 // setBlockIndexCandidates or be parked.
                 if (pindexFirstMissing == nullptr) {
                     assert(pindex->nStatus.isOnParkedChain() ||
                            setBlockIndexCandidates.count(pindex));
                 }
                 // m_chain.Tip() must also be there even if some data has
                 // been pruned.
                 if (pindex == m_chain.Tip()) {
                     assert(setBlockIndexCandidates.count(pindex));
                 }
                 // If some parent is missing, then it could be that this block
                 // was in setBlockIndexCandidates but had to be removed because
                 // of the missing data. In this case it must be in
                 // m_blocks_unlinked -- see test below.
             }
         } else {
             // If this block sorts worse than the current tip or some ancestor's
             // block has never been seen, it cannot be in
             // setBlockIndexCandidates.
             assert(setBlockIndexCandidates.count(pindex) == 0);
         }
         // Check whether this block is in m_blocks_unlinked.
         std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
                   std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
             rangeUnlinked =
                 m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
         bool foundInUnlinked = false;
         while (rangeUnlinked.first != rangeUnlinked.second) {
             assert(rangeUnlinked.first->first == pindex->pprev);
             if (rangeUnlinked.first->second == pindex) {
                 foundInUnlinked = true;
                 break;
             }
             rangeUnlinked.first++;
         }
         if (pindex->pprev && pindex->nStatus.hasData() &&
             pindexFirstNeverProcessed != nullptr &&
             pindexFirstInvalid == nullptr) {
             // If this block has block data available, some parent was never
             // received, and has no invalid parents, it must be in
             // m_blocks_unlinked.
             assert(foundInUnlinked);
         }
         if (!pindex->nStatus.hasData()) {
             // Can't be in m_blocks_unlinked if we don't HAVE_DATA
             assert(!foundInUnlinked);
         }
         if (pindexFirstMissing == nullptr) {
             // We aren't missing data for any parent -- cannot be in
             // m_blocks_unlinked.
             assert(!foundInUnlinked);
         }
         if (pindex->pprev && pindex->nStatus.hasData() &&
             pindexFirstNeverProcessed == nullptr &&
             pindexFirstMissing != nullptr) {
             // We HAVE_DATA for this block, have received data for all parents
             // at some point, but we're currently missing data for some parent.
             // We must have pruned.
             assert(fHavePruned);
             // This block may have entered m_blocks_unlinked if:
             //  - it has a descendant that at some point had more work than the
             //    tip, and
             //  - we tried switching to that descendant but were missing
             //    data for some intermediate block between m_chain and the
             //    tip.
             // So if this block is itself better than m_chain.Tip() and it
             // wasn't in
             // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
             if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
                 setBlockIndexCandidates.count(pindex) == 0) {
                 if (pindexFirstInvalid == nullptr) {
                     assert(foundInUnlinked);
                 }
             }
         }
         // Perhaps too slow
         // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash());
         // End: actual consistency checks.
 
         // Try descending into the first subnode.
         std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
                   std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
             range = forward.equal_range(pindex);
         if (range.first != range.second) {
             // A subnode was found.
             pindex = range.first->second;
             nHeight++;
             continue;
         }
         // This is a leaf node. Move upwards until we reach a node of which we
         // have not yet visited the last child.
         while (pindex) {
             // We are going to either move to a parent or a sibling of pindex.
             // If pindex was the first with a certain property, unset the
             // corresponding variable.
             if (pindex == pindexFirstInvalid) {
                 pindexFirstInvalid = nullptr;
             }
             if (pindex == pindexFirstParked) {
                 pindexFirstParked = nullptr;
             }
             if (pindex == pindexFirstMissing) {
                 pindexFirstMissing = nullptr;
             }
             if (pindex == pindexFirstNeverProcessed) {
                 pindexFirstNeverProcessed = nullptr;
             }
             if (pindex == pindexFirstNotTreeValid) {
                 pindexFirstNotTreeValid = nullptr;
             }
             if (pindex == pindexFirstNotTransactionsValid) {
                 pindexFirstNotTransactionsValid = nullptr;
             }
             if (pindex == pindexFirstNotChainValid) {
                 pindexFirstNotChainValid = nullptr;
             }
             if (pindex == pindexFirstNotScriptsValid) {
                 pindexFirstNotScriptsValid = nullptr;
             }
             // Find our parent.
             CBlockIndex *pindexPar = pindex->pprev;
             // Find which child we just visited.
             std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
                       std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
                 rangePar = forward.equal_range(pindexPar);
             while (rangePar.first->second != pindex) {
                 // Our parent must have at least the node we're coming from as
                 // child.
                 assert(rangePar.first != rangePar.second);
                 rangePar.first++;
             }
             // Proceed to the next one.
             rangePar.first++;
             if (rangePar.first != rangePar.second) {
                 // Move to the sibling.
                 pindex = rangePar.first->second;
                 break;
             } else {
                 // Move up further.
                 pindex = pindexPar;
                 nHeight--;
                 continue;
             }
         }
     }
 
     // Check that we actually traversed the entire map.
     assert(nNodes == forward.size());
 }
 
 std::string CBlockFileInfo::ToString() const {
     return strprintf(
         "CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)",
         nBlocks, nSize, nHeightFirst, nHeightLast,
         FormatISO8601DateTime(nTimeFirst), FormatISO8601DateTime(nTimeLast));
 }
 
 CBlockFileInfo *GetBlockFileInfo(size_t n) {
     LOCK(cs_LastBlockFile);
 
     return &vinfoBlockFile.at(n);
 }
 
 static ThresholdState VersionBitsStateImpl(const Consensus::Params &params,
                                            Consensus::DeploymentPos pos,
                                            const CBlockIndex *pindex)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     return VersionBitsState(pindex, params, pos, versionbitscache);
 }
 
 ThresholdState VersionBitsTipState(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos) {
     LOCK(cs_main);
     return VersionBitsStateImpl(params, pos, ::ChainActive().Tip());
 }
 
 ThresholdState VersionBitsBlockState(const Consensus::Params &params,
                                      Consensus::DeploymentPos pos,
                                      const CBlockIndex *pindex) {
     LOCK(cs_main);
     return VersionBitsStateImpl(params, pos, pindex);
 }
 
 BIP9Stats VersionBitsTipStatistics(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos) {
     LOCK(cs_main);
     return VersionBitsStatistics(::ChainActive().Tip(), params, pos);
 }
 
 int VersionBitsTipStateSinceHeight(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos) {
     LOCK(cs_main);
     return VersionBitsStateSinceHeight(::ChainActive().Tip(), params, pos,
                                        versionbitscache);
 }
 
 static const uint64_t MEMPOOL_DUMP_VERSION = 1;
 
 bool LoadMempool(const Config &config, CTxMemPool &pool) {
     int64_t nExpiryTimeout =
         gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
     FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb");
     CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
     if (file.IsNull()) {
         LogPrintf(
             "Failed to open mempool file from disk. Continuing anyway.\n");
         return false;
     }
 
     int64_t count = 0;
     int64_t expired = 0;
     int64_t failed = 0;
     int64_t already_there = 0;
     int64_t nNow = GetTime();
 
     try {
         uint64_t version;
         file >> version;
         if (version != MEMPOOL_DUMP_VERSION) {
             return false;
         }
 
         uint64_t num;
         file >> num;
         while (num--) {
             CTransactionRef tx;
             int64_t nTime;
             int64_t nFeeDelta;
             file >> tx;
             file >> nTime;
             file >> nFeeDelta;
 
             Amount amountdelta = nFeeDelta * SATOSHI;
             if (amountdelta != Amount::zero()) {
                 pool.PrioritiseTransaction(tx->GetId(), amountdelta);
             }
             TxValidationState state;
             if (nTime + nExpiryTimeout > nNow) {
                 LOCK(cs_main);
                 AcceptToMemoryPoolWithTime(
                     config, pool, state, tx, nTime, false /* bypass_limits */,
                     Amount::zero() /* nAbsurdFee */, false /* test_accept */);
                 if (state.IsValid()) {
                     ++count;
                 } else {
                     // mempool may contain the transaction already, e.g. from
                     // wallet(s) having loaded it while we were processing
                     // mempool transactions; consider these as valid, instead of
                     // failed, but mark them as 'already there'
                     if (pool.exists(tx->GetId())) {
                         ++already_there;
                     } else {
                         ++failed;
                     }
                 }
             } else {
                 ++expired;
             }
 
             if (ShutdownRequested()) {
                 return false;
             }
         }
         std::map<TxId, Amount> mapDeltas;
         file >> mapDeltas;
 
         for (const auto &i : mapDeltas) {
             pool.PrioritiseTransaction(i.first, i.second);
         }
     } catch (const std::exception &e) {
         LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing "
                   "anyway.\n",
                   e.what());
         return false;
     }
 
     LogPrintf("Imported mempool transactions from disk: %i succeeded, %i "
               "failed, %i expired, %i already there\n",
               count, failed, expired, already_there);
     return true;
 }
 
 bool DumpMempool(const CTxMemPool &pool) {
     int64_t start = GetTimeMicros();
 
     std::map<uint256, Amount> mapDeltas;
     std::vector<TxMempoolInfo> vinfo;
 
     static Mutex dump_mutex;
     LOCK(dump_mutex);
 
     {
         LOCK(pool.cs);
         for (const auto &i : pool.mapDeltas) {
             mapDeltas[i.first] = i.second;
         }
 
         vinfo = pool.infoAll();
     }
 
     int64_t mid = GetTimeMicros();
 
     try {
         FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb");
         if (!filestr) {
             return false;
         }
 
         CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
 
         uint64_t version = MEMPOOL_DUMP_VERSION;
         file << version;
 
         file << uint64_t(vinfo.size());
         for (const auto &i : vinfo) {
             file << *(i.tx);
             file << int64_t(count_seconds(i.m_time));
             file << i.nFeeDelta;
             mapDeltas.erase(i.tx->GetId());
         }
 
         file << mapDeltas;
         if (!FileCommit(file.Get())) {
             throw std::runtime_error("FileCommit failed");
         }
         file.fclose();
         RenameOver(GetDataDir() / "mempool.dat.new",
                    GetDataDir() / "mempool.dat");
         int64_t last = GetTimeMicros();
         LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n",
                   (mid - start) * MICRO, (last - mid) * MICRO);
     } catch (const std::exception &e) {
         LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
         return false;
     }
     return true;
 }
 
 bool IsBlockPruned(const CBlockIndex *pblockindex) {
     return (fHavePruned && !pblockindex->nStatus.hasData() &&
             pblockindex->nTx > 0);
 }
 
 //! Guess how far we are in the verification process at the given block index
 //! require cs_main if pindex has not been validated yet (because the chain's
 //! transaction count might be unset) This conditional lock requirement might be
 //! confusing, see: https://github.com/bitcoin/bitcoin/issues/15994
 double GuessVerificationProgress(const ChainTxData &data,
                                  const CBlockIndex *pindex) {
     if (pindex == nullptr) {
         return 0.0;
     }
 
     int64_t nNow = time(nullptr);
 
     double fTxTotal;
     if (pindex->GetChainTxCount() <= data.nTxCount) {
         fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
     } else {
         fTxTotal = pindex->GetChainTxCount() +
                    (nNow - pindex->GetBlockTime()) * data.dTxRate;
     }
 
     return pindex->GetChainTxCount() / fTxTotal;
 }
 
 class CMainCleanup {
 public:
     CMainCleanup() {}
     ~CMainCleanup() {
         // block headers
         for (const std::pair<const BlockHash, CBlockIndex *> &it :
              g_blockman.m_block_index) {
             delete it.second;
         }
         g_blockman.m_block_index.clear();
     }
 };
 static CMainCleanup instance_of_cmaincleanup;
diff --git a/src/validation.h b/src/validation.h
index 6a7b18284..3f1c85dd4 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -1,1059 +1,1039 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2019 The Bitcoin Core developers
 // Copyright (c) 2017-2020 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_VALIDATION_H
 #define BITCOIN_VALIDATION_H
 
 #if defined(HAVE_CONFIG_H)
 #include <config/bitcoin-config.h>
 #endif
 
 #include <amount.h>
 #include <blockfileinfo.h>
 #include <blockindexworkcomparator.h>
 #include <coins.h>
 #include <consensus/consensus.h>
 #include <disconnectresult.h>
 #include <flatfile.h>
 #include <fs.h>
 #include <protocol.h> // For CMessageHeader::MessageMagic
 #include <script/script_error.h>
 #include <script/script_metrics.h>
 #include <sync.h>
 #include <txmempool.h> // For CTxMemPool::cs
 #include <versionbits.h>
 
 #include <algorithm>
 #include <atomic>
 #include <cstdint>
 #include <map>
 #include <memory>
 #include <set>
 #include <utility>
 #include <vector>
 
 class BlockValidationState;
 class CBlockIndex;
 class CBlockTreeDB;
 class CBlockUndo;
 class CChainParams;
 class CChain;
 class CCoinsViewDB;
 class CConnman;
 class CInv;
 class Config;
 class CScriptCheck;
 class CTxMemPool;
 class CTxUndo;
 class DisconnectedBlockTransactions;
 class TxValidationState;
 
 struct ChainTxData;
 struct FlatFilePos;
 struct PrecomputedTransactionData;
 struct LockPoints;
 
 namespace Consensus {
 struct Params;
 }
 
 #define MIN_TRANSACTION_SIZE                                                   \
     (::GetSerializeSize(CTransaction(), PROTOCOL_VERSION))
 
 /** Default for -minrelaytxfee, minimum relay fee for transactions */
 static const Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 * SATOSHI);
 /** Default for -excessutxocharge for transactions transactions */
 static const Amount DEFAULT_UTXO_FEE = Amount::zero();
 /**
  * Default for -mempoolexpiry, expiration time for mempool transactions in
  * hours.
  */
 static const unsigned int DEFAULT_MEMPOOL_EXPIRY = 336;
 /** The maximum size of a blk?????.dat file (since 0.8) */
 static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
-/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
-static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
-/** The pre-allocation chunk size for rev?????.dat files (since 0.8) */
-static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
 
 /** Maximum number of dedicated script-checking threads allowed */
 static const int MAX_SCRIPTCHECK_THREADS = 15;
 /** -par default (number of script-checking threads, 0 = auto) */
 static const int DEFAULT_SCRIPTCHECK_THREADS = 0;
 /**
  * Number of blocks that can be requested at any given time from a single peer.
  */
 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
 /**
  * Timeout in seconds during which a peer must stall block download progress
  * before being disconnected.
  */
 static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
 /**
  * Number of headers sent in one getheaders result. We rely on the assumption
  * that if a peer sends less than this number, we reached its tip. Changing this
  * value is a protocol upgrade.
  */
 static const unsigned int MAX_HEADERS_RESULTS = 2000;
 /**
  * Maximum depth of blocks we're willing to serve as compact blocks to peers
  * when requested. For older blocks, a regular BLOCK response will be sent.
  */
 static const int MAX_CMPCTBLOCK_DEPTH = 5;
 /**
  * Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
  */
 static const int MAX_BLOCKTXN_DEPTH = 10;
 /**
  * Size of the "block download window": how far ahead of our current height do
  * we fetch ? Larger windows tolerate larger download speed differences between
  * peer, but increase the potential degree of disordering of blocks on disk
  * (which make reindexing and in the future perhaps pruning harder). We'll
  * probably want to make this a per-peer adaptive value at some point.
  */
 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
 /** Time to wait (in seconds) between writing blocks/block index to disk. */
 static const unsigned int DATABASE_WRITE_INTERVAL = 60 * 60;
 /** Time to wait (in seconds) between flushing chainstate to disk. */
 static const unsigned int DATABASE_FLUSH_INTERVAL = 24 * 60 * 60;
 /** Maximum length of reject messages. */
 static const unsigned int MAX_REJECT_MESSAGE_LENGTH = 111;
 /** Block download timeout base, expressed in millionths of the block interval
  * (i.e. 10 min) */
 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
 /**
  * Additional block download timeout per parallel downloading peer (i.e. 5 min)
  */
 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
 
 static const int64_t DEFAULT_MAX_TIP_AGE = 24 * 60 * 60;
 /**
  * Maximum age of our tip in seconds for us to be considered current for fee
  * estimation.
  */
 static const int64_t MAX_FEE_ESTIMATION_TIP_AGE = 3 * 60 * 60;
 
 static const bool DEFAULT_CHECKPOINTS_ENABLED = true;
 static const bool DEFAULT_TXINDEX = false;
 static const char *const DEFAULT_BLOCKFILTERINDEX = "0";
 static const unsigned int DEFAULT_BANSCORE_THRESHOLD = 100;
 
 /** Default for -persistmempool */
 static const bool DEFAULT_PERSIST_MEMPOOL = true;
 /** Default for using fee filter */
 static const bool DEFAULT_FEEFILTER = true;
 
 /**
  * Maximum number of headers to announce when relaying blocks with headers
  * message.
  */
 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
 
 /** Maximum number of unconnecting headers announcements before DoS score */
 static const int MAX_UNCONNECTING_HEADERS = 10;
 
 static const bool DEFAULT_PEERBLOOMFILTERS = true;
 
 /** Default for -stopatheight */
 static const int DEFAULT_STOPATHEIGHT = 0;
 /** Default for -maxreorgdepth */
 static const int DEFAULT_MAX_REORG_DEPTH = 10;
 /**
  * Default for -finalizationdelay
  * This is the minimum time between a block header reception and the block
  * finalization.
  * This value should be >> block propagation and validation time
  */
 static const int64_t DEFAULT_MIN_FINALIZATION_DELAY = 2 * 60 * 60;
 
 extern CScript COINBASE_FLAGS;
 extern RecursiveMutex cs_main;
 extern CTxMemPool g_mempool;
 typedef std::unordered_map<BlockHash, CBlockIndex *, BlockHasher> BlockMap;
 extern Mutex g_best_block_mutex;
 extern std::condition_variable g_best_block_cv;
 extern uint256 g_best_block;
 extern std::atomic_bool fImporting;
 extern std::atomic_bool fReindex;
 extern bool fRequireStandard;
 extern bool fCheckBlockIndex;
 extern bool fCheckpointsEnabled;
 extern size_t nCoinCacheUsage;
 
 /**
  * A fee rate smaller than this is considered zero fee (for relaying, mining and
  * transaction creation)
  */
 extern CFeeRate minRelayTxFee;
 /**
  * If the tip is older than this (in seconds), the node is considered to be in
  * initial block download.
  */
 extern int64_t nMaxTipAge;
 
 /**
  * Block hash whose ancestors we will assume to have valid scripts without
  * checking them.
  */
 extern BlockHash hashAssumeValid;
 
 /**
  * Minimum work we will assume exists on some valid chain.
  */
 extern arith_uint256 nMinimumChainWork;
 
 /**
  * Best header we've seen so far (used for getheaders queries' starting points).
  */
 extern CBlockIndex *pindexBestHeader;
 
 /** Pruning-related variables and constants */
 /** True if any block files have ever been pruned. */
 extern bool fHavePruned;
 /** True if we're running in -prune mode. */
 extern bool fPruneMode;
 /** Number of MiB of block files that we're trying to stay below. */
 extern uint64_t nPruneTarget;
 /**
  * Block files containing a block-height within MIN_BLOCKS_TO_KEEP of
  * ::ChainActive().Tip() will not be pruned.
  */
 static const unsigned int MIN_BLOCKS_TO_KEEP = 288;
 /** Minimum blocks required to signal NODE_NETWORK_LIMITED */
 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
 
 static const signed int DEFAULT_CHECKBLOCKS = 6;
 static const unsigned int DEFAULT_CHECKLEVEL = 3;
 
 /**
  * Require that user allocate at least 550 MiB for block & undo files
  * (blk???.dat and rev???.dat)
  * At 1MB per block, 288 blocks = 288MB.
  * Add 15% for Undo data = 331MB
  * Add 20% for Orphan block rate = 397MB
  * We want the low water mark after pruning to be at least 397 MB and since we
  * prune in full block file chunks, we need the high water mark which triggers
  * the prune to be one 128MB block file + added 15% undo data = 147MB greater
  * for a total of 545MB
  * Setting the target to >= 550 MiB will make it likely we can respect the
  * target.
  */
 static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
 
 class BlockValidationOptions {
 private:
     uint64_t excessiveBlockSize;
     bool checkPoW : 1;
     bool checkMerkleRoot : 1;
 
 public:
     // Do full validation by default
     explicit BlockValidationOptions(const Config &config);
     explicit BlockValidationOptions(uint64_t _excessiveBlockSize,
                                     bool _checkPow = true,
                                     bool _checkMerkleRoot = true)
         : excessiveBlockSize(_excessiveBlockSize), checkPoW(_checkPow),
           checkMerkleRoot(_checkMerkleRoot) {}
 
     BlockValidationOptions withCheckPoW(bool _checkPoW = true) const {
         BlockValidationOptions ret = *this;
         ret.checkPoW = _checkPoW;
         return ret;
     }
 
     BlockValidationOptions
     withCheckMerkleRoot(bool _checkMerkleRoot = true) const {
         BlockValidationOptions ret = *this;
         ret.checkMerkleRoot = _checkMerkleRoot;
         return ret;
     }
 
     bool shouldValidatePoW() const { return checkPoW; }
     bool shouldValidateMerkleRoot() const { return checkMerkleRoot; }
     uint64_t getExcessiveBlockSize() const { return excessiveBlockSize; }
 };
 
 /**
  * Process an incoming block. This only returns after the best known valid
  * block is made active. Note that it does not, however, guarantee that the
  * specific block passed to it has been checked for validity!
  *
  * If you want to *possibly* get feedback on whether pblock is valid, you must
  * install a CValidationInterface (see validationinterface.h) - this will have
  * its BlockChecked method called whenever *any* block completes validation.
  *
  * Note that we guarantee that either the proof-of-work is valid on pblock, or
  * (and possibly also) BlockChecked will have been called.
  *
  * May not be called in a validationinterface callback.
  *
  * @param[in]   config  The global config.
  * @param[in]   pblock  The block we want to process.
  * @param[in]   fForceProcessing Process this block even if unrequested; used
  * for non-network block sources and whitelisted peers.
  * @param[out]  fNewBlock A boolean which is set to indicate if the block was
  *                        first received via this call.
  * @return True if the block is accepted as a valid block.
  */
 bool ProcessNewBlock(const Config &config,
                      const std::shared_ptr<const CBlock> pblock,
                      bool fForceProcessing, bool *fNewBlock)
     LOCKS_EXCLUDED(cs_main);
 
 /**
  * Process incoming block headers.
  *
  * May not be called in a validationinterface callback.
  *
  * @param[in]  config        The config.
  * @param[in]  block         The block headers themselves.
  * @param[out] state         This may be set to an Error state if any error
  *                           occurred processing them.
  * @param[out] ppindex       If set, the pointer will be set to point to the
  *                           last new block index object for the given headers.
  * @return True if block headers were accepted as valid.
  */
 bool ProcessNewBlockHeaders(const Config &config,
                             const std::vector<CBlockHeader> &block,
                             BlockValidationState &state,
                             const CBlockIndex **ppindex = nullptr)
     LOCKS_EXCLUDED(cs_main);
 
-/**
- * Open a block file (blk?????.dat).
- */
-FILE *OpenBlockFile(const FlatFilePos &pos, bool fReadOnly = false);
-
-/**
- * Translation to a filesystem path.
- */
-fs::path GetBlockPosFilename(const FlatFilePos &pos);
-
 /**
  * Import blocks from an external file.
  */
 bool LoadExternalBlockFile(const Config &config, FILE *fileIn,
                            FlatFilePos *dbp = nullptr);
 
 /**
  * Ensures we have a genesis block in the block tree, possibly writing one to
  * disk.
  */
 bool LoadGenesisBlock(const CChainParams &chainparams);
 
 /**
  * Load the block tree and coins database from disk, initializing state if we're
  * running with -reindex.
  */
 bool LoadBlockIndex(const Consensus::Params &params)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Update the chain tip based on database information.
  */
 bool LoadChainTip(const Config &config) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Unload database information.
  */
 void UnloadBlockIndex();
 
 /**
  * Run an instance of the script checking thread.
  */
 void ThreadScriptCheck(int worker_num);
 
 /**
  * Retrieve a transaction (from memory pool, or from disk, if possible).
  */
 bool GetTransaction(const TxId &txid, CTransactionRef &txOut,
                     const Consensus::Params &params, BlockHash &hashBlock,
                     const CBlockIndex *const blockIndex = nullptr);
 
 /**
  * Find the best known block, and make it the tip of the block chain
  *
  * May not be called with cs_main held. May not be called in a
  * validationinterface callback.
  */
 bool ActivateBestChain(
     const Config &config, BlockValidationState &state,
     std::shared_ptr<const CBlock> pblock = std::shared_ptr<const CBlock>());
 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams);
 
 /**
  * Guess verification progress (as a fraction between 0.0=genesis and
  * 1.0=current tip).
  */
 double GuessVerificationProgress(const ChainTxData &data,
                                  const CBlockIndex *pindex);
 
 /**
  * Calculate the amount of disk space the block & undo files currently use.
  */
 uint64_t CalculateCurrentUsage();
 
 /**
  * Mark one block file as pruned.
  */
 void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Actually unlink the specified files
  */
 void UnlinkPrunedFiles(const std::set<int> &setFilesToPrune);
 
 /** Prune block files up to a given height */
 void PruneBlockFilesManual(int nManualPruneHeight);
 
 /**
  * (try to) add transaction to memory pool
  */
 bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool,
                         TxValidationState &state, const CTransactionRef &tx,
                         bool bypass_limits, const Amount nAbsurdFee,
                         bool test_accept = false)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Simple class for regulating resource usage during CheckInputs (and
  * CScriptCheck), atomic so as to be compatible with parallel validation.
  */
 class CheckInputsLimiter {
 protected:
     std::atomic<int64_t> remaining;
 
 public:
     explicit CheckInputsLimiter(int64_t limit) : remaining(limit) {}
 
     bool consume_and_check(int consumed) {
         auto newvalue = (remaining -= consumed);
         return newvalue >= 0;
     }
 
     bool check() { return remaining >= 0; }
 };
 
 class TxSigCheckLimiter : public CheckInputsLimiter {
 public:
     TxSigCheckLimiter() : CheckInputsLimiter(MAX_TX_SIGCHECKS) {}
 
     // Let's make this bad boy copiable.
     TxSigCheckLimiter(const TxSigCheckLimiter &rhs)
         : CheckInputsLimiter(rhs.remaining.load()) {}
 
     TxSigCheckLimiter &operator=(const TxSigCheckLimiter &rhs) {
         remaining = rhs.remaining.load();
         return *this;
     }
 
     static TxSigCheckLimiter getDisabled() {
         TxSigCheckLimiter txLimiter;
         // Historically, there has not been a transaction with more than 20k sig
         // checks on testnet or mainnet, so this effectively disable sigchecks.
         txLimiter.remaining = 20000;
         return txLimiter;
     }
 };
 
 class ConnectTrace;
 
 /**
  * Check whether all inputs of this transaction are valid (no double spends,
  * scripts & sigs, amounts). This does not modify the UTXO set.
  *
  * If pvChecks is not nullptr, script checks are pushed onto it instead of being
  * performed inline. Any script checks which are not necessary (eg due to script
  * execution cache hits) are, obviously, not pushed onto pvChecks/run.
  *
  * Upon success nSigChecksOut will be filled in with either:
  * - correct total for all inputs, or,
  * - 0, in the case when checks were pushed onto pvChecks (i.e., a cache miss
  * with pvChecks non-null), in which case the total can be found by executing
  * pvChecks and adding the results.
  *
  * Setting sigCacheStore/scriptCacheStore to false will remove elements from the
  * corresponding cache which are matched. This is useful for checking blocks
  * where we will likely never need the cache entry again.
  *
  * pLimitSigChecks can be passed to limit the sigchecks count either in parallel
  * or serial validation. With pvChecks null (serial validation), breaking the
  * pLimitSigChecks limit will abort evaluation early and return false. With
  * pvChecks not-null (parallel validation): the cached nSigChecks may itself
  * break the limit in which case false is returned, OR, each entry in the
  * returned pvChecks must be executed exactly once in order to probe the limit
  * accurately.
  */
 bool CheckInputs(const CTransaction &tx, TxValidationState &state,
                  const CCoinsViewCache &view, const uint32_t flags,
                  bool sigCacheStore, bool scriptCacheStore,
                  const PrecomputedTransactionData &txdata, int &nSigChecksOut,
                  TxSigCheckLimiter &txLimitSigChecks,
                  CheckInputsLimiter *pBlockLimitSigChecks,
                  std::vector<CScriptCheck> *pvChecks)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Handy shortcut to full fledged CheckInputs call.
  */
 static inline bool
 CheckInputs(const CTransaction &tx, TxValidationState &state,
             const CCoinsViewCache &view, const uint32_t flags,
             bool sigCacheStore, bool scriptCacheStore,
             const PrecomputedTransactionData &txdata, int &nSigChecksOut)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     TxSigCheckLimiter nSigChecksTxLimiter;
     return CheckInputs(tx, state, view, flags, sigCacheStore, scriptCacheStore,
                        txdata, nSigChecksOut, nSigChecksTxLimiter, nullptr,
                        nullptr);
 }
 
 /** Get the BIP9 state for a given deployment at the current tip. */
 ThresholdState VersionBitsTipState(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos);
 
 /** Get the BIP9 state for a given deployment at a given block. */
 ThresholdState VersionBitsBlockState(const Consensus::Params &params,
                                      Consensus::DeploymentPos pos,
                                      const CBlockIndex *pindex);
 
 /**
  * Get the numerical statistics for the BIP9 state for a given deployment at the
  * current tip.
  */
 BIP9Stats VersionBitsTipStatistics(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos);
 
 /**
  * Get the block height at which the BIP9 deployment switched into the state for
  * the block building on the current tip.
  */
 int VersionBitsTipStateSinceHeight(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos);
 
 /** Apply the effects of this transaction on the UTXO set represented by view */
 void UpdateCoins(const CTransaction &tx, CCoinsViewCache &inputs, int nHeight);
 
 /**
  * Mark all the coins corresponding to a given transaction inputs as spent.
  */
 void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
                 int nHeight);
 
 /**
  * Apply the effects of this transaction on the UTXO set represented by view.
  */
 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, int nHeight);
 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
                  int nHeight);
 
 /**
  * Test whether the LockPoints height and time are still valid on the current
  * chain.
  */
 bool TestLockPointValidity(const LockPoints *lp)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Check if transaction will be BIP 68 final in the next block to be created.
  *
  * Simulates calling SequenceLocks() with data from the tip of the current
  * active chain. Optionally stores in LockPoints the resulting height and time
  * calculated and the hash of the block needed for calculation or skips the
  * calculation and uses the LockPoints passed in for evaluation. The LockPoints
  * should not be considered valid if CheckSequenceLocks returns false.
  *
  * See consensus/consensus.h for flag definitions.
  */
 bool CheckSequenceLocks(const CTxMemPool &pool, const CTransaction &tx,
                         int flags, LockPoints *lp = nullptr,
                         bool useExistingLockPoints = false)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Closure representing one script verification.
  * Note that this stores references to the spending transaction.
  *
  * Note that if pLimitSigChecks is passed, then failure does not imply that
  * scripts have failed.
  */
 class CScriptCheck {
 private:
     CTxOut m_tx_out;
     const CTransaction *ptxTo;
     unsigned int nIn;
     uint32_t nFlags;
     bool cacheStore;
     ScriptError error;
     ScriptExecutionMetrics metrics;
     PrecomputedTransactionData txdata;
     TxSigCheckLimiter *pTxLimitSigChecks;
     CheckInputsLimiter *pBlockLimitSigChecks;
 
 public:
     CScriptCheck()
         : ptxTo(nullptr), nIn(0), nFlags(0), cacheStore(false),
           error(ScriptError::UNKNOWN), txdata(), pTxLimitSigChecks(nullptr),
           pBlockLimitSigChecks(nullptr) {}
 
     CScriptCheck(const CTxOut &outIn, const CTransaction &txToIn,
                  unsigned int nInIn, uint32_t nFlagsIn, bool cacheIn,
                  const PrecomputedTransactionData &txdataIn,
                  TxSigCheckLimiter *pTxLimitSigChecksIn = nullptr,
                  CheckInputsLimiter *pBlockLimitSigChecksIn = nullptr)
         : m_tx_out(outIn), ptxTo(&txToIn), nIn(nInIn), nFlags(nFlagsIn),
           cacheStore(cacheIn), error(ScriptError::UNKNOWN), txdata(txdataIn),
           pTxLimitSigChecks(pTxLimitSigChecksIn),
           pBlockLimitSigChecks(pBlockLimitSigChecksIn) {}
 
     bool operator()();
 
     void swap(CScriptCheck &check) {
         std::swap(ptxTo, check.ptxTo);
         std::swap(m_tx_out, check.m_tx_out);
         std::swap(nIn, check.nIn);
         std::swap(nFlags, check.nFlags);
         std::swap(cacheStore, check.cacheStore);
         std::swap(error, check.error);
         std::swap(metrics, check.metrics);
         std::swap(txdata, check.txdata);
         std::swap(pTxLimitSigChecks, check.pTxLimitSigChecks);
         std::swap(pBlockLimitSigChecks, check.pBlockLimitSigChecks);
     }
 
     ScriptError GetScriptError() const { return error; }
 
     ScriptExecutionMetrics GetScriptExecutionMetrics() const { return metrics; }
 };
 
-/** Functions for disk access for blocks */
-bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos,
-                       const Consensus::Params &params);
-bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex,
-                       const Consensus::Params &params);
-
 bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex);
 
 /** Functions for validating blocks and updating the block tree */
 
 /**
  * Context-independent validity checks.
  *
  * Returns true if the provided block is valid (has valid header,
  * transactions are valid, block is a valid size, etc.)
  */
 bool CheckBlock(const CBlock &block, BlockValidationState &state,
                 const Consensus::Params &params,
                 BlockValidationOptions validationOptions);
 
 /**
  * This is a variant of ContextualCheckTransaction which computes the contextual
  * check for a transaction based on the chain tip.
  *
  * See consensus/consensus.h for flag definitions.
  */
 bool ContextualCheckTransactionForCurrentBlock(const Consensus::Params &params,
                                                const CTransaction &tx,
                                                TxValidationState &state,
                                                int flags = -1);
 
 /**
  * Check a block is completely valid from start to finish (only works on top of
  * our current best block)
  */
 bool TestBlockValidity(BlockValidationState &state, const CChainParams &params,
                        const CBlock &block, CBlockIndex *pindexPrev,
                        BlockValidationOptions validationOptions)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * RAII wrapper for VerifyDB: Verify consistency of the block and coin
  * databases.
  */
 class CVerifyDB {
 public:
     CVerifyDB();
     ~CVerifyDB();
     bool VerifyDB(const Config &config, CCoinsView *coinsview, int nCheckLevel,
                   int nCheckDepth);
 };
 
 /** Replay blocks that aren't fully applied to the database. */
 bool ReplayBlocks(const Consensus::Params &params, CCoinsView *view);
 
 CBlockIndex *LookupBlockIndex(const BlockHash &hash)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** Find the last common block between the parameter chain and a locator. */
 CBlockIndex *FindForkInGlobalIndex(const CChain &chain,
                                    const CBlockLocator &locator)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** @see CChainState::FlushStateToDisk */
 enum class FlushStateMode { NONE, IF_NEEDED, PERIODIC, ALWAYS };
 
 /**
  * Maintains a tree of blocks (stored in `m_block_index`) which is consulted
  * to determine where the most-work tip is.
  *
  * This data is used mostly in `CChainState` - information about, e.g.,
  * candidate tips is not maintained here.
  */
 class BlockManager {
 public:
     BlockMap m_block_index GUARDED_BY(cs_main);
 
     /**
      * In order to efficiently track invalidity of headers, we keep the set of
      * blocks which we tried to connect and found to be invalid here (ie which
      * were set to BLOCK_FAILED_VALID since the last restart). We can then
      * walk this set and check if a new header is a descendant of something in
      * this set, preventing us from having to walk m_block_index when we try
      * to connect a bad block and fail.
      *
      * While this is more complicated than marking everything which descends
      * from an invalid block as invalid at the time we discover it to be
      * invalid, doing so would require walking all of m_block_index to find all
      * descendants. Since this case should be very rare, keeping track of all
      * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
      * well.
      *
      * Because we already walk m_block_index in height-order at startup, we go
      * ahead and mark descendants of invalid blocks as FAILED_CHILD at that
      * time, instead of putting things in this set.
      */
     std::set<CBlockIndex *> m_failed_blocks;
 
     /**
      * All pairs A->B, where A (or one of its ancestors) misses transactions,
      * but B has transactions. Pruned nodes may have entries where B is missing
      * data.
      */
     std::multimap<CBlockIndex *, CBlockIndex *> m_blocks_unlinked;
 
     /**
      * Load the blocktree off disk and into memory. Populate certain metadata
      * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as
      * peripheral collections like setDirtyBlockIndex.
      *
      * @param[out] block_index_candidates  Fill this set with any valid blocks
      * for which we've downloaded all transactions.
      */
     bool LoadBlockIndex(const Consensus::Params &consensus_params,
                         CBlockTreeDB &blocktree,
                         std::set<CBlockIndex *, CBlockIndexWorkComparator>
                             &block_index_candidates)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /** Clear all data members. */
     void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     CBlockIndex *AddToBlockIndex(const CBlockHeader &block)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /** Create a new block index entry for a given block hash */
     CBlockIndex *InsertBlockIndex(const BlockHash &hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * If a block header hasn't already been seen, call CheckBlockHeader on it,
      * ensure that it doesn't descend from an invalid block, and then add it to
      * m_block_index.
      */
     bool AcceptBlockHeader(const Config &config, const CBlockHeader &block,
                            BlockValidationState &state, CBlockIndex **ppindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 };
 
 /**
  * CChainState stores and provides an API to update our local knowledge of the
  * current best chain.
  *
  * Eventually, the API here is targeted at being exposed externally as a
  * consumable libconsensus library, so any functions added must only call
  * other class member functions, pure functions in other parts of the consensus
  * library, callbacks via the validation interface, or read/write-to-disk
  * functions (eventually this will also be via callbacks).
  *
  * Anything that is contingent on the current tip of the chain is stored here,
  * whereas block information and metadata independent of the current tip is
  * kept in `BlockMetadataManager`.
  */
 class CChainState {
 private:
     /**
      * the ChainState CriticalSection
      * A lock that must be held when modifying this ChainState - held in
      * ActivateBestChain()
      */
     RecursiveMutex m_cs_chainstate;
 
     /**
      * Every received block is assigned a unique and increasing identifier, so
      * we know which one to give priority in case of a fork.
      * Blocks loaded from disk are assigned id 0, so start the counter at 1.
      */
     std::atomic<int32_t> nBlockSequenceId{1};
     /** Decreasing counter (used by subsequent preciousblock calls). */
     int32_t nBlockReverseSequenceId = -1;
     /** chainwork for the last block that preciousblock has been applied to. */
     arith_uint256 nLastPreciousChainwork = 0;
 
     /**
      * Whether this chainstate is undergoing initial block download.
      *
      * Mutable because we need to be able to mark IsInitialBlockDownload()
      * const, which latches this for caching purposes.
      */
     mutable std::atomic<bool> m_cached_finished_ibd{false};
 
     //! Reference to a BlockManager instance which itself is shared across all
     //! CChainState instances. Keeping a local reference allows us to test more
     //! easily as opposed to referencing a global.
     BlockManager &m_blockman;
 
     /**
      * The best finalized block.
      * This block cannot be reorged in any way except by explicit user action.
      */
     const CBlockIndex *m_finalizedBlockIndex GUARDED_BY(cs_main) = nullptr;
 
 public:
     explicit CChainState(BlockManager &blockman) : m_blockman(blockman) {}
 
     //! The current chain of blockheaders we consult and build on.
     //! @see CChain, CBlockIndex.
     CChain m_chain;
     /**
      * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for
      * itself and all ancestors) and as good as our current tip or better.
      * Entries may be failed, though, and pruning nodes may be missing the data
      * for the block.
      */
     std::set<CBlockIndex *, CBlockIndexWorkComparator> setBlockIndexCandidates;
 
     /**
      * Update the on-disk chain state.
      * The caches and indexes are flushed depending on the mode we're called
      * with if they're too large, if it's been a while since the last write, or
      * always and in all cases if we're in prune mode and are deleting files.
      *
      * If FlushStateMode::NONE is used, then FlushStateToDisk(...) won't do
      * anything besides checking if we need to prune.
      */
     bool FlushStateToDisk(const CChainParams &chainparams,
                           BlockValidationState &state, FlushStateMode mode,
                           int nManualPruneHeight = 0);
 
     //! Unconditionally flush all changes to disk.
     void ForceFlushStateToDisk();
 
     //! Prune blockfiles from the disk if necessary and then flush chainstate
     //! changes if we pruned.
     void PruneAndFlush();
 
     bool ActivateBestChain(
         const Config &config, BlockValidationState &state,
         std::shared_ptr<const CBlock> pblock = std::shared_ptr<const CBlock>())
         LOCKS_EXCLUDED(cs_main);
 
     bool AcceptBlock(const Config &config,
                      const std::shared_ptr<const CBlock> &pblock,
                      BlockValidationState &state, bool fRequested,
                      const FlatFilePos *dbp, bool *fNewBlock)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     // Block (dis)connection on a given view:
     DisconnectResult DisconnectBlock(const CBlock &block,
                                      const CBlockIndex *pindex,
                                      CCoinsViewCache &view);
     bool ConnectBlock(const CBlock &block, BlockValidationState &state,
                       CBlockIndex *pindex, CCoinsViewCache &view,
                       const CChainParams &params,
                       BlockValidationOptions options, bool fJustCheck = false)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     // Block disconnection on our pcoinsTip:
     bool DisconnectTip(const CChainParams &params, BlockValidationState &state,
                        DisconnectedBlockTransactions *disconnectpool)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::g_mempool.cs);
 
     // Manual block validity manipulation:
     bool PreciousBlock(const Config &config, BlockValidationState &state,
                        CBlockIndex *pindex) LOCKS_EXCLUDED(cs_main);
     /** Mark a block as invalid. */
     bool InvalidateBlock(const Config &config, BlockValidationState &state,
                          CBlockIndex *pindex)
         LOCKS_EXCLUDED(cs_main, m_cs_chainstate);
     /** Park a block. */
     bool ParkBlock(const Config &config, BlockValidationState &state,
                    CBlockIndex *pindex)
         LOCKS_EXCLUDED(cs_main, m_cs_chainstate);
 
     /**
      * Finalize a block.
      * A finalized block can not be reorged in any way.
      */
     bool FinalizeBlock(const Config &config, BlockValidationState &state,
                        CBlockIndex *pindex)
         LOCKS_EXCLUDED(cs_main, m_cs_chainstate);
     /** Return the currently finalized block index. */
     const CBlockIndex *GetFinalizedBlock() const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /**
      * Checks if a block is finalized.
      */
     bool IsBlockFinalized(const CBlockIndex *pindex) const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     void ResetBlockFailureFlags(CBlockIndex *pindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     template <typename F>
     bool UpdateFlagsForBlock(CBlockIndex *pindexBase, CBlockIndex *pindex, F f)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     template <typename F, typename C, typename AC>
     void UpdateFlags(CBlockIndex *pindex, CBlockIndex *&pindexReset, F f,
                      C fChild, AC fAncestorWasChanged)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /** Remove parked status from a block and its descendants. */
     void UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     bool ReplayBlocks(const Consensus::Params &params, CCoinsView *view);
     bool LoadGenesisBlock(const CChainParams &chainparams);
 
     void PruneBlockIndexCandidates();
 
     void UnloadBlockIndex() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Check whether we are doing an initial block download (synchronizing from
      * disk or network)
      */
     bool IsInitialBlockDownload() const;
 
     /**
      * Make various assertions about the state of the block index.
      *
      * By default this only executes fully when using the Regtest chain; see:
      * fCheckBlockIndex.
      */
     void CheckBlockIndex(const Consensus::Params &consensusParams);
 
 private:
     bool ActivateBestChainStep(const Config &config,
                                BlockValidationState &state,
                                CBlockIndex *pindexMostWork,
                                const std::shared_ptr<const CBlock> &pblock,
                                bool &fInvalidFound, ConnectTrace &connectTrace)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::g_mempool.cs);
     bool ConnectTip(const Config &config, BlockValidationState &state,
                     CBlockIndex *pindexNew,
                     const std::shared_ptr<const CBlock> &pblock,
                     ConnectTrace &connectTrace,
                     DisconnectedBlockTransactions &disconnectpool)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::g_mempool.cs);
     void InvalidBlockFound(CBlockIndex *pindex,
                            const BlockValidationState &state)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     void InvalidChainFound(CBlockIndex *pindexNew)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     CBlockIndex *FindMostWorkChain() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool MarkBlockAsFinal(const Config &config, BlockValidationState &state,
                           const CBlockIndex *pindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     void ReceivedBlockTransactions(const CBlock &block, CBlockIndex *pindexNew,
                                    const FlatFilePos &pos)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     bool RollforwardBlock(const CBlockIndex *pindex, CCoinsViewCache &inputs,
                           const Consensus::Params &params)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool UnwindBlock(const Config &config, BlockValidationState &state,
                      CBlockIndex *pindex, bool invalidate)
         EXCLUSIVE_LOCKS_REQUIRED(m_cs_chainstate);
 };
 
 /**
  * Mark a block as precious and reorganize.
  *
  * May not be called in a validationinterface callback.
  */
 bool PreciousBlock(const Config &config, BlockValidationState &state,
                    CBlockIndex *pindex) LOCKS_EXCLUDED(cs_main);
 
 /** Remove invalidity status from a block and its descendants. */
 void ResetBlockFailureFlags(CBlockIndex *pindex)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** Remove parked status from a block and its descendants. */
 void UnparkBlockAndChildren(CBlockIndex *pindex)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** Remove parked status from a block. */
 void UnparkBlock(CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /** @returns the most-work valid chainstate. */
 CChainState &ChainstateActive();
 
 /** @returns the most-work chain. */
 CChain &ChainActive();
 
 /** @returns the global block index map. */
 BlockMap &BlockIndex();
 
 /**
  * Global variable that points to the coins database (protected by cs_main)
  */
 extern std::unique_ptr<CCoinsViewDB> pcoinsdbview;
 
 /**
  * Global variable that points to the active CCoinsView (protected by cs_main)
  */
 extern std::unique_ptr<CCoinsViewCache> pcoinsTip;
 
 /**
  * Global variable that points to the active block tree (protected by cs_main)
  */
 extern std::unique_ptr<CBlockTreeDB> pblocktree;
 
 /**
  * Return the spend height, which is one more than the inputs.GetBestBlock().
  * While checking, GetBestBlock() refers to the parent block. (protected by
  * cs_main)
  * This is also true for mempool checks.
  */
 int GetSpendHeight(const CCoinsViewCache &inputs);
 
 /**
  * Determine what nVersion a new block should use.
  */
 int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev,
                             const Consensus::Params &params);
 
 /**
  * Reject codes greater or equal to this can be returned by AcceptToMemPool or
  * AcceptBlock for blocks/transactions, to signal internal conditions. They
  * cannot and should not be sent over the P2P network.
  */
 static const unsigned int REJECT_INTERNAL = 0x100;
 /** Too high fee. Can not be triggered by P2P transactions */
 static const unsigned int REJECT_HIGHFEE = 0x100;
 /** Block conflicts with a transaction already known */
 static const unsigned int REJECT_AGAINST_FINALIZED = 0x103;
 
 /** Get block file info entry for one block file */
 CBlockFileInfo *GetBlockFileInfo(size_t n);
 
 /** Dump the mempool to disk. */
 bool DumpMempool(const CTxMemPool &pool);
 
 /** Load the mempool from disk. */
 bool LoadMempool(const Config &config, CTxMemPool &pool);
 
 //! Check whether the block associated with this index entry is pruned or not.
 bool IsBlockPruned(const CBlockIndex *pblockindex);
 
 #endif // BITCOIN_VALIDATION_H
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 787648c3c..e0183be57 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -1,4885 +1,4884 @@
 // Copyright (c) 2010 Satoshi Nakamoto
 // Copyright (c) 2009-2019 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <amount.h>
 #include <chainparams.h> // for GetConsensus.
 #include <coins.h>
 #include <config.h>
 #include <consensus/validation.h>
 #include <core_io.h>
 #include <interfaces/chain.h>
 #include <key_io.h>
 #include <node/context.h>
 #include <outputtype.h>
 #include <policy/fees.h>
 #include <rpc/rawtransaction_util.h>
 #include <rpc/server.h>
 #include <rpc/util.h>
 #include <script/descriptor.h>
 #include <util/bip32.h>
 #include <util/error.h>
 #include <util/moneystr.h>
 #include <util/string.h>
 #include <util/system.h>
 #include <util/translation.h>
 #include <util/url.h>
 #include <util/validation.h>
-#include <validation.h>
 #include <wallet/coincontrol.h>
 #include <wallet/psbtwallet.h>
 #include <wallet/rpcwallet.h>
 #include <wallet/wallet.h>
 #include <wallet/walletdb.h>
 #include <wallet/walletutil.h>
 
 #include <univalue.h>
 
 #include <event2/http.h>
 
 using interfaces::FoundBlock;
 
 static const std::string WALLET_ENDPOINT_BASE = "/wallet/";
 
 static inline bool GetAvoidReuseFlag(CWallet *const pwallet,
                                      const UniValue &param) {
     bool can_avoid_reuse = pwallet->IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE);
     bool avoid_reuse = param.isNull() ? can_avoid_reuse : param.get_bool();
 
     if (avoid_reuse && !can_avoid_reuse) {
         throw JSONRPCError(
             RPC_WALLET_ERROR,
             "wallet does not have the \"avoid reuse\" feature enabled");
     }
 
     return avoid_reuse;
 }
 
 /**
  * Used by RPC commands that have an include_watchonly parameter. We default to
  * true for watchonly wallets if include_watchonly isn't explicitly set.
  */
 static bool ParseIncludeWatchonly(const UniValue &include_watchonly,
                                   const CWallet &pwallet) {
     if (include_watchonly.isNull()) {
         // if include_watchonly isn't explicitly set, then check if we have a
         // watchonly wallet
         return pwallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
     }
 
     // otherwise return whatever include_watchonly was set to
     return include_watchonly.get_bool();
 }
 
 /**
  * Checks if a CKey is in the given CWallet compressed or otherwise
  */
 bool HaveKey(const SigningProvider &wallet, const CKey &key) {
     CKey key2;
     key2.Set(key.begin(), key.end(), !key.IsCompressed());
     return wallet.HaveKey(key.GetPubKey().GetID()) ||
            wallet.HaveKey(key2.GetPubKey().GetID());
 }
 
 bool GetWalletNameFromJSONRPCRequest(const JSONRPCRequest &request,
                                      std::string &wallet_name) {
     if (request.URI.substr(0, WALLET_ENDPOINT_BASE.size()) ==
         WALLET_ENDPOINT_BASE) {
         // wallet endpoint was used
         wallet_name =
             urlDecode(request.URI.substr(WALLET_ENDPOINT_BASE.size()));
         return true;
     }
     return false;
 }
 
 std::shared_ptr<CWallet>
 GetWalletForJSONRPCRequest(const JSONRPCRequest &request) {
     std::string wallet_name;
     if (GetWalletNameFromJSONRPCRequest(request, wallet_name)) {
         std::shared_ptr<CWallet> pwallet = GetWallet(wallet_name);
         if (!pwallet) {
             throw JSONRPCError(
                 RPC_WALLET_NOT_FOUND,
                 "Requested wallet does not exist or is not loaded");
         }
         return pwallet;
     }
 
     std::vector<std::shared_ptr<CWallet>> wallets = GetWallets();
     return wallets.size() == 1 || (request.fHelp && wallets.size() > 0)
                ? wallets[0]
                : nullptr;
 }
 
 std::string HelpRequiringPassphrase(const CWallet *pwallet) {
     return pwallet && pwallet->IsCrypted()
                ? "\nRequires wallet passphrase to be set with walletpassphrase "
                  "call."
                : "";
 }
 
 bool EnsureWalletIsAvailable(const CWallet *pwallet, bool avoidException) {
     if (pwallet) {
         return true;
     }
     if (avoidException) {
         return false;
     }
     if (!HasWallets()) {
         throw JSONRPCError(RPC_METHOD_NOT_FOUND,
                            "Method not found (wallet method is disabled "
                            "because no wallet is loaded)");
     }
 
     throw JSONRPCError(RPC_WALLET_NOT_SPECIFIED,
                        "Wallet file not specified (must request wallet RPC "
                        "through /wallet/<filename> uri-path).");
 }
 
 void EnsureWalletIsUnlocked(const CWallet *pwallet) {
     if (pwallet->IsLocked()) {
         throw JSONRPCError(RPC_WALLET_UNLOCK_NEEDED,
                            "Error: Please enter the wallet passphrase with "
                            "walletpassphrase first.");
     }
 }
 
 // also_create should only be set to true only when the RPC is expected to add
 // things to a blank wallet and make it no longer blank
 LegacyScriptPubKeyMan &EnsureLegacyScriptPubKeyMan(CWallet &wallet,
                                                    bool also_create) {
     LegacyScriptPubKeyMan *spk_man = wallet.GetLegacyScriptPubKeyMan();
     if (!spk_man && also_create) {
         spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan();
     }
     if (!spk_man) {
         throw JSONRPCError(RPC_WALLET_ERROR,
                            "This type of wallet does not support this command");
     }
     return *spk_man;
 }
 
 static void WalletTxToJSON(interfaces::Chain &chain, const CWalletTx &wtx,
                            UniValue &entry) {
     int confirms = wtx.GetDepthInMainChain();
     entry.pushKV("confirmations", confirms);
     if (wtx.IsCoinBase()) {
         entry.pushKV("generated", true);
     }
     if (confirms > 0) {
         entry.pushKV("blockhash", wtx.m_confirm.hashBlock.GetHex());
         entry.pushKV("blockheight", wtx.m_confirm.block_height);
         entry.pushKV("blockindex", wtx.m_confirm.nIndex);
         int64_t block_time;
         CHECK_NONFATAL(chain.findBlock(wtx.m_confirm.hashBlock,
                                        FoundBlock().time(block_time)));
         entry.pushKV("blocktime", block_time);
     } else {
         entry.pushKV("trusted", wtx.IsTrusted());
     }
     uint256 hash = wtx.GetId();
     entry.pushKV("txid", hash.GetHex());
     UniValue conflicts(UniValue::VARR);
     for (const uint256 &conflict : wtx.GetConflicts()) {
         conflicts.push_back(conflict.GetHex());
     }
     entry.pushKV("walletconflicts", conflicts);
     entry.pushKV("time", wtx.GetTxTime());
     entry.pushKV("timereceived", (int64_t)wtx.nTimeReceived);
 
     for (const std::pair<const std::string, std::string> &item : wtx.mapValue) {
         entry.pushKV(item.first, item.second);
     }
 }
 
 static std::string LabelFromValue(const UniValue &value) {
     std::string label = value.get_str();
     if (label == "*") {
         throw JSONRPCError(RPC_WALLET_INVALID_LABEL_NAME, "Invalid label name");
     }
     return label;
 }
 
 static UniValue getnewaddress(const Config &config,
                               const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "getnewaddress",
         "Returns a new Bitcoin address for receiving payments.\n"
         "If 'label' is specified, it is added to the address book \n"
         "so payments received with the address will be associated with "
         "'label'.\n",
         {
             {"label", RPCArg::Type::STR, /* default */ "null",
              "The label name for the address to be linked to. If not provided, "
              "the default label \"\" is used. It can also be set to the empty "
              "string \"\" to represent the default label. The label does not "
              "need to exist, it will be created if there is no label by the "
              "given name."},
             {"address_type", RPCArg::Type::STR,
              /* default */ "set by -addresstype",
              "The address type to use. Options are \"legacy\"."},
         },
         RPCResult{"\"address\"    (string) The new bitcoin address\n"},
         RPCExamples{HelpExampleCli("getnewaddress", "") +
                     HelpExampleRpc("getnewaddress", "")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     if (!pwallet->CanGetAddresses()) {
         throw JSONRPCError(RPC_WALLET_ERROR,
                            "Error: This wallet has no available keys");
     }
 
     // Parse the label first so we don't generate a key if there's an error
     std::string label;
     if (!request.params[0].isNull()) {
         label = LabelFromValue(request.params[0]);
     }
 
     OutputType output_type = pwallet->m_default_address_type;
     if (!request.params[1].isNull()) {
         if (!ParseOutputType(request.params[1].get_str(), output_type)) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                strprintf("Unknown address type '%s'",
                                          request.params[1].get_str()));
         }
     }
 
     CTxDestination dest;
     std::string error;
     if (!pwallet->GetNewDestination(output_type, label, dest, error)) {
         throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, error);
     }
 
     return EncodeDestination(dest, config);
 }
 
 static UniValue getrawchangeaddress(const Config &config,
                                     const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "getrawchangeaddress",
         "Returns a new Bitcoin address, for receiving change.\n"
         "This is for use with raw transactions, NOT normal use.\n",
         {},
         RPCResult{"\"address\"    (string) The address\n"},
         RPCExamples{HelpExampleCli("getrawchangeaddress", "") +
                     HelpExampleRpc("getrawchangeaddress", "")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     if (!pwallet->CanGetAddresses(true)) {
         throw JSONRPCError(RPC_WALLET_ERROR,
                            "Error: This wallet has no available keys");
     }
 
     OutputType output_type =
         pwallet->m_default_change_type != OutputType::CHANGE_AUTO
             ? pwallet->m_default_change_type
             : pwallet->m_default_address_type;
     if (!request.params[0].isNull()) {
         if (!ParseOutputType(request.params[0].get_str(), output_type)) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                strprintf("Unknown address type '%s'",
                                          request.params[0].get_str()));
         }
     }
 
     CTxDestination dest;
     std::string error;
     if (!pwallet->GetNewChangeDestination(output_type, dest, error)) {
         throw JSONRPCError(RPC_WALLET_KEYPOOL_RAN_OUT, error);
     }
     return EncodeDestination(dest, config);
 }
 
 static UniValue setlabel(const Config &config, const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "setlabel",
         "Sets the label associated with the given address.\n",
         {
             {"address", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The bitcoin address to be associated with a label."},
             {"label", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The label to assign to the address."},
         },
         RPCResults{},
         RPCExamples{
             HelpExampleCli("setlabel",
                            "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" \"tabby\"") +
             HelpExampleRpc(
                 "setlabel",
                 "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\", \"tabby\"")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     CTxDestination dest =
         DecodeDestination(request.params[0].get_str(), wallet->chainParams);
     if (!IsValidDestination(dest)) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            "Invalid Bitcoin address");
     }
 
     std::string label = LabelFromValue(request.params[1]);
 
     if (pwallet->IsMine(dest)) {
         pwallet->SetAddressBook(dest, label, "receive");
     } else {
         pwallet->SetAddressBook(dest, label, "send");
     }
 
     return NullUniValue;
 }
 
 static CTransactionRef SendMoney(CWallet *const pwallet,
                                  const CTxDestination &address, Amount nValue,
                                  bool fSubtractFeeFromAmount,
                                  const CCoinControl &coin_control,
                                  mapValue_t mapValue) {
     Amount curBalance =
         pwallet->GetBalance(0, coin_control.m_avoid_address_reuse)
             .m_mine_trusted;
 
     // Check amount
     if (nValue <= Amount::zero()) {
         throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid amount");
     }
 
     if (nValue > curBalance) {
         throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, "Insufficient funds");
     }
 
     // Parse Bitcoin address
     CScript scriptPubKey = GetScriptForDestination(address);
 
     // Create and send the transaction
     Amount nFeeRequired;
     bilingual_str error;
     std::vector<CRecipient> vecSend;
     int nChangePosRet = -1;
     CRecipient recipient = {scriptPubKey, nValue, fSubtractFeeFromAmount};
     vecSend.push_back(recipient);
 
     CTransactionRef tx;
     if (!pwallet->CreateTransaction(vecSend, tx, nFeeRequired, nChangePosRet,
                                     error, coin_control)) {
         if (!fSubtractFeeFromAmount && nValue + nFeeRequired > curBalance) {
             error = strprintf(Untranslated("Error: This transaction requires a "
                                            "transaction fee of at least %s"),
                               FormatMoney(nFeeRequired));
         }
         throw JSONRPCError(RPC_WALLET_ERROR, error.original);
     }
     pwallet->CommitTransaction(tx, std::move(mapValue), {} /* orderForm */);
     return tx;
 }
 
 static UniValue sendtoaddress(const Config &config,
                               const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "sendtoaddress",
         "Send an amount to a given address.\n" +
             HelpRequiringPassphrase(pwallet) + "\n",
         {
             {"address", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The bitcoin address to send to."},
             {"amount", RPCArg::Type::AMOUNT, RPCArg::Optional::NO,
              "The amount in " + CURRENCY_UNIT + " to send. eg 0.1"},
             {"comment", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG,
              "A comment used to store what the transaction is for.\n"
              "                             This is not part of the "
              "transaction, just kept in your wallet."},
             {"comment_to", RPCArg::Type::STR,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "A comment to store the name of the person or organization\n"
              "                             to which you're sending the "
              "transaction. This is not part of the \n"
              "                             transaction, just kept in "
              "your wallet."},
             {"subtractfeefromamount", RPCArg::Type::BOOL,
              /* default */ "false",
              "The fee will be deducted from the amount being sent.\n"
              "                             The recipient will receive "
              "less bitcoins than you enter in the amount field."},
             {"avoid_reuse", RPCArg::Type::BOOL,
              /* default */ "true",
              "(only available if avoid_reuse wallet flag is set) Avoid "
              "spending from dirty addresses; addresses are considered\n"
              "                             dirty if they have previously "
              "been used in a transaction."},
         },
         RPCResult{"\"txid\"                  (string) The transaction id.\n"},
         RPCExamples{
             HelpExampleCli("sendtoaddress",
                            "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\" 0.1") +
             HelpExampleCli("sendtoaddress", "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvay"
                                             "dd\" 0.1 \"donation\" \"seans "
                                             "outpost\"") +
             HelpExampleCli("sendtoaddress", "\"1M72Sfpbz1BPpXFHz9m3CdqATR44"
                                             "Jvaydd\" 0.1 \"\" \"\" true") +
             HelpExampleRpc("sendtoaddress", "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvay"
                                             "dd\", 0.1, \"donation\", \"seans "
                                             "outpost\"")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     CTxDestination dest =
         DecodeDestination(request.params[0].get_str(), wallet->chainParams);
     if (!IsValidDestination(dest)) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
     }
 
     // Amount
     Amount nAmount = AmountFromValue(request.params[1]);
     if (nAmount <= Amount::zero()) {
         throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount for send");
     }
 
     // Wallet comments
     mapValue_t mapValue;
     if (!request.params[2].isNull() && !request.params[2].get_str().empty()) {
         mapValue["comment"] = request.params[2].get_str();
     }
     if (!request.params[3].isNull() && !request.params[3].get_str().empty()) {
         mapValue["to"] = request.params[3].get_str();
     }
 
     bool fSubtractFeeFromAmount = false;
     if (!request.params[4].isNull()) {
         fSubtractFeeFromAmount = request.params[4].get_bool();
     }
 
     CCoinControl coin_control;
     coin_control.m_avoid_address_reuse =
         GetAvoidReuseFlag(pwallet, request.params[5]);
     // We also enable partial spend avoidance if reuse avoidance is set.
     coin_control.m_avoid_partial_spends |= coin_control.m_avoid_address_reuse;
 
     EnsureWalletIsUnlocked(pwallet);
 
     CTransactionRef tx =
         SendMoney(pwallet, dest, nAmount, fSubtractFeeFromAmount, coin_control,
                   std::move(mapValue));
     return tx->GetId().GetHex();
 }
 
 static UniValue listaddressgroupings(const Config &config,
                                      const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "listaddressgroupings",
         "Lists groups of addresses which have had their common ownership\n"
         "made public by common use as inputs or as the resulting change\n"
         "in past transactions\n",
         {},
         RPCResult{"[\n"
                   "  [\n"
                   "    [\n"
                   "      \"address\",            (string) The bitcoin address\n"
                   "      amount,                 (numeric) The amount in " +
                   CURRENCY_UNIT +
                   "\n"
                   "      \"label\"               (string, optional) The label\n"
                   "    ]\n"
                   "    ,...\n"
                   "  ]\n"
                   "  ,...\n"
                   "]\n"},
         RPCExamples{HelpExampleCli("listaddressgroupings", "") +
                     HelpExampleRpc("listaddressgroupings", "")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     UniValue jsonGroupings(UniValue::VARR);
     std::map<CTxDestination, Amount> balances = pwallet->GetAddressBalances();
     for (const std::set<CTxDestination> &grouping :
          pwallet->GetAddressGroupings()) {
         UniValue jsonGrouping(UniValue::VARR);
         for (const CTxDestination &address : grouping) {
             UniValue addressInfo(UniValue::VARR);
             addressInfo.push_back(EncodeDestination(address, config));
             addressInfo.push_back(ValueFromAmount(balances[address]));
 
             const auto *address_book_entry =
                 pwallet->FindAddressBookEntry(address);
             if (address_book_entry) {
                 addressInfo.push_back(address_book_entry->GetLabel());
             }
             jsonGrouping.push_back(addressInfo);
         }
         jsonGroupings.push_back(jsonGrouping);
     }
 
     return jsonGroupings;
 }
 
 static UniValue signmessage(const Config &config,
                             const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "signmessage",
         "Sign a message with the private key of an address" +
             HelpRequiringPassphrase(pwallet) + "\n",
         {
             {"address", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The bitcoin address to use for the private key."},
             {"message", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The message to create a signature of."},
         },
         RPCResult{"\"signature\"          (string) The signature of the "
                   "message encoded in base 64\n"},
         RPCExamples{
             "\nUnlock the wallet for 30 seconds\n" +
             HelpExampleCli("walletpassphrase", "\"mypassphrase\" 30") +
             "\nCreate the signature\n" +
             HelpExampleCli(
                 "signmessage",
                 "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" \"my message\"") +
             "\nVerify the signature\n" +
             HelpExampleCli("verifymessage",
                            "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" "
                            "\"signature\" \"my message\"") +
             "\nAs a JSON-RPC call\n" +
             HelpExampleRpc(
                 "signmessage",
                 "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\", \"my message\"")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     EnsureWalletIsUnlocked(pwallet);
 
     std::string strAddress = request.params[0].get_str();
     std::string strMessage = request.params[1].get_str();
 
     CTxDestination dest = DecodeDestination(strAddress, wallet->chainParams);
     if (!IsValidDestination(dest)) {
         throw JSONRPCError(RPC_TYPE_ERROR, "Invalid address");
     }
 
     const PKHash *pkhash = boost::get<PKHash>(&dest);
     if (!pkhash) {
         throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to key");
     }
 
     CScript script_pub_key = GetScriptForDestination(*pkhash);
     std::unique_ptr<SigningProvider> provider =
         pwallet->GetSigningProvider(script_pub_key);
     if (!provider) {
         throw JSONRPCError(RPC_WALLET_ERROR, "Private key not available");
     }
 
     CKey key;
     CKeyID keyID(*pkhash);
     if (!provider->GetKey(keyID, key)) {
         throw JSONRPCError(RPC_WALLET_ERROR, "Private key not available");
     }
 
     CHashWriter ss(SER_GETHASH, 0);
     ss << strMessageMagic;
     ss << strMessage;
 
     std::vector<uint8_t> vchSig;
     if (!key.SignCompact(ss.GetHash(), vchSig)) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Sign failed");
     }
 
     return EncodeBase64(vchSig.data(), vchSig.size());
 }
 
 static Amount GetReceived(const CWallet &wallet, const UniValue &params,
                           bool by_label)
     EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet) {
     std::set<CTxDestination> address_set;
 
     if (by_label) {
         // Get the set of addresses assigned to label
         std::string label = LabelFromValue(params[0]);
         address_set = wallet.GetLabelAddresses(label);
     } else {
         // Get the address
         CTxDestination dest =
             DecodeDestination(params[0].get_str(), wallet.chainParams);
         if (!IsValidDestination(dest)) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                "Invalid Bitcoin address");
         }
         CScript script_pub_key = GetScriptForDestination(dest);
         if (!wallet.IsMine(script_pub_key)) {
             throw JSONRPCError(RPC_WALLET_ERROR, "Address not found in wallet");
         }
         address_set.insert(dest);
     }
 
     // Minimum confirmations
     int min_depth = 1;
     if (!params[1].isNull()) {
         min_depth = params[1].get_int();
     }
 
     // Tally
     Amount amount = Amount::zero();
     for (const std::pair<const TxId, CWalletTx> &wtx_pair : wallet.mapWallet) {
         const CWalletTx &wtx = wtx_pair.second;
         TxValidationState txState;
         if (wtx.IsCoinBase() ||
             !wallet.chain().contextualCheckTransactionForCurrentBlock(
                 *wtx.tx, txState) ||
             wtx.GetDepthInMainChain() < min_depth) {
             continue;
         }
 
         for (const CTxOut &txout : wtx.tx->vout) {
             CTxDestination address;
             if (ExtractDestination(txout.scriptPubKey, address) &&
                 wallet.IsMine(address) && address_set.count(address)) {
                 amount += txout.nValue;
             }
         }
     }
 
     return amount;
 }
 
 static UniValue getreceivedbyaddress(const Config &config,
                                      const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "getreceivedbyaddress",
         "Returns the total amount received by the given address in "
         "transactions with at least minconf confirmations.\n",
         {
             {"address", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The bitcoin address for transactions."},
             {"minconf", RPCArg::Type::NUM, /* default */ "1",
              "Only include transactions confirmed at least this many times."},
         },
         RPCResult{"amount   (numeric) The total amount in " + CURRENCY_UNIT +
                   " received at this address.\n"},
         RPCExamples{
             "\nThe amount from transactions with at least 1 confirmation\n" +
             HelpExampleCli("getreceivedbyaddress",
                            "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\"") +
             "\nThe amount including unconfirmed transactions, zero "
             "confirmations\n" +
             HelpExampleCli("getreceivedbyaddress",
                            "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" 0") +
             "\nThe amount with at least 6 confirmations\n" +
             HelpExampleCli("getreceivedbyaddress",
                            "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" 6") +
             "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("getreceivedbyaddress",
                            "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\", 6")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     return ValueFromAmount(GetReceived(*pwallet, request.params,
                                        /* by_label */ false));
 }
 
 static UniValue getreceivedbylabel(const Config &config,
                                    const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "getreceivedbylabel",
         "Returns the total amount received by addresses with <label> in "
         "transactions with at least [minconf] confirmations.\n",
         {
             {"label", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The selected label, may be the default label using \"\"."},
             {"minconf", RPCArg::Type::NUM, /* default */ "1",
              "Only include transactions confirmed at least this many times."},
         },
         RPCResult{"amount              (numeric) The total amount in " +
                   CURRENCY_UNIT + " received for this label.\n"},
         RPCExamples{"\nAmount received by the default label with at least 1 "
                     "confirmation\n" +
                     HelpExampleCli("getreceivedbylabel", "\"\"") +
                     "\nAmount received at the tabby label including "
                     "unconfirmed amounts with zero confirmations\n" +
                     HelpExampleCli("getreceivedbylabel", "\"tabby\" 0") +
                     "\nThe amount with at least 6 confirmations\n" +
                     HelpExampleCli("getreceivedbylabel", "\"tabby\" 6") +
                     "\nAs a JSON-RPC call\n" +
                     HelpExampleRpc("getreceivedbylabel", "\"tabby\", 6")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     return ValueFromAmount(GetReceived(*pwallet, request.params,
                                        /* by_label */ true));
 }
 
 static UniValue getbalance(const Config &config,
                            const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "getbalance",
         "Returns the total available balance.\n"
         "The available balance is what the wallet considers currently "
         "spendable, and is\n"
         "thus affected by options which limit spendability such as "
         "-spendzeroconfchange.\n",
         {
             {"dummy", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG,
              "Remains for backward compatibility. Must be excluded or set to "
              "\"*\"."},
             {"minconf", RPCArg::Type::NUM, /* default */ "0",
              "Only include transactions confirmed at least this many times."},
             {"include_watchonly", RPCArg::Type::BOOL,
              /* default */ "true for watch-only wallets, otherwise false",
              "Also include balance in watch-only addresses (see "
              "'importaddress')"},
             {"avoid_reuse", RPCArg::Type::BOOL,
              /* default */ "true",
              "(only available if avoid_reuse wallet flag is set) Do not "
              "include balance in dirty outputs; addresses are considered dirty "
              "if they have previously been used in a transaction."},
         },
         RPCResult{"amount              (numeric) The total amount in " +
                   CURRENCY_UNIT + " received for this wallet.\n"},
         RPCExamples{
             "\nThe total amount in the wallet with 1 or more confirmations\n" +
             HelpExampleCli("getbalance", "") +
             "\nThe total amount in the wallet at least 6 blocks confirmed\n" +
             HelpExampleCli("getbalance", "\"*\" 6") + "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("getbalance", "\"*\", 6")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     const UniValue &dummy_value = request.params[0];
     if (!dummy_value.isNull() && dummy_value.get_str() != "*") {
         throw JSONRPCError(
             RPC_METHOD_DEPRECATED,
             "dummy first argument must be excluded or set to \"*\".");
     }
 
     int min_depth = 0;
     if (!request.params[1].isNull()) {
         min_depth = request.params[1].get_int();
     }
 
     bool include_watchonly = ParseIncludeWatchonly(request.params[2], *pwallet);
 
     bool avoid_reuse = GetAvoidReuseFlag(pwallet, request.params[3]);
 
     const auto bal = pwallet->GetBalance(min_depth, avoid_reuse);
 
     return ValueFromAmount(bal.m_mine_trusted + (include_watchonly
                                                      ? bal.m_watchonly_trusted
                                                      : Amount::zero()));
 }
 
 static UniValue getunconfirmedbalance(const Config &config,
                                       const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "getunconfirmedbalance",
         "DEPRECATED\nIdentical to getbalances().mine.untrusted_pending\n",
         {},
         RPCResults{},
         RPCExamples{""},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     return ValueFromAmount(pwallet->GetBalance().m_mine_untrusted_pending);
 }
 
 static UniValue sendmany(const Config &config, const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "sendmany",
         "Send multiple times. Amounts are double-precision "
         "floating point numbers." +
             HelpRequiringPassphrase(pwallet) + "\n",
         {
             {"dummy", RPCArg::Type::STR, RPCArg::Optional::NO,
              "Must be set to \"\" for backwards compatibility.", "\"\""},
             {
                 "amounts",
                 RPCArg::Type::OBJ,
                 RPCArg::Optional::NO,
                 "A json object with addresses and amounts",
                 {
                     {"address", RPCArg::Type::AMOUNT, RPCArg::Optional::NO,
                      "The bitcoin address is the key, the numeric amount (can "
                      "be string) in " +
                          CURRENCY_UNIT + " is the value"},
                 },
             },
             {"minconf", RPCArg::Type::NUM, /* default */ "1",
              "Only use the balance confirmed at least this many times."},
             {"comment", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG,
              "A comment"},
             {
                 "subtractfeefrom",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::OMITTED_NAMED_ARG,
                 "A json array with addresses.\n"
                 "                           The fee will be equally deducted "
                 "from the amount of each selected address.\n"
                 "                           Those recipients will receive less "
                 "bitcoins than you enter in their corresponding amount field.\n"
                 "                           If no addresses are specified "
                 "here, the sender pays the fee.",
                 {
                     {"address", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
                      "Subtract fee from this address"},
                 },
             },
         },
         RPCResult{
             "\"txid\"                   (string) The transaction id for the "
             "send. Only 1 transaction is created regardless of \n"
             "                                    the number of addresses.\n"},
         RPCExamples{
             "\nSend two amounts to two different addresses:\n" +
             HelpExampleCli(
                 "sendmany",
                 "\"\" "
                 "\"{\\\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\\\":0.01,"
                 "\\\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\\\":0.02}\"") +
             "\nSend two amounts to two different addresses setting the "
             "confirmation and comment:\n" +
             HelpExampleCli("sendmany",
                            "\"\" "
                            "\"{\\\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\\\":0.01,"
                            "\\\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\\\":0.02}\" "
                            "6 \"testing\"") +
             "\nSend two amounts to two different addresses, subtract fee "
             "from amount:\n" +
             HelpExampleCli(
                 "sendmany",
                 "\"\" "
                 "\"{\\\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\\\":0.01,"
                 "\\\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\\\":0.02}\" 1 \"\" "
                 "\"[\\\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\\\","
                 "\\\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\\\"]\"") +
             "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("sendmany",
                            "\"\", "
                            "\"{\\\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\\\":0.01,"
                            "\\\"1353tsE8YMTA4EuV7dgUXGjNFf9KpVvKHz\\\":0.02}\","
                            " 6, \"testing\"")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     if (!request.params[0].isNull() && !request.params[0].get_str().empty()) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "Dummy value must be set to \"\"");
     }
     UniValue sendTo = request.params[1].get_obj();
 
     mapValue_t mapValue;
     if (!request.params[3].isNull() && !request.params[3].get_str().empty()) {
         mapValue["comment"] = request.params[3].get_str();
     }
 
     UniValue subtractFeeFromAmount(UniValue::VARR);
     if (!request.params[4].isNull()) {
         subtractFeeFromAmount = request.params[4].get_array();
     }
 
     std::set<CTxDestination> destinations;
     std::vector<CRecipient> vecSend;
 
     std::vector<std::string> keys = sendTo.getKeys();
     for (const std::string &name_ : keys) {
         CTxDestination dest = DecodeDestination(name_, wallet->chainParams);
         if (!IsValidDestination(dest)) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                std::string("Invalid Bitcoin address: ") +
                                    name_);
         }
 
         if (destinations.count(dest)) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 std::string("Invalid parameter, duplicated address: ") + name_);
         }
         destinations.insert(dest);
 
         CScript scriptPubKey = GetScriptForDestination(dest);
         Amount nAmount = AmountFromValue(sendTo[name_]);
         if (nAmount <= Amount::zero()) {
             throw JSONRPCError(RPC_TYPE_ERROR, "Invalid amount for send");
         }
 
         bool fSubtractFeeFromAmount = false;
         for (size_t idx = 0; idx < subtractFeeFromAmount.size(); idx++) {
             const UniValue &addr = subtractFeeFromAmount[idx];
             if (addr.get_str() == name_) {
                 fSubtractFeeFromAmount = true;
             }
         }
 
         CRecipient recipient = {scriptPubKey, nAmount, fSubtractFeeFromAmount};
         vecSend.push_back(recipient);
     }
 
     EnsureWalletIsUnlocked(pwallet);
 
     // Shuffle recipient list
     std::shuffle(vecSend.begin(), vecSend.end(), FastRandomContext());
 
     // Send
     Amount nFeeRequired = Amount::zero();
     int nChangePosRet = -1;
     bilingual_str error;
     CTransactionRef tx;
     CCoinControl coinControl;
     bool fCreated = pwallet->CreateTransaction(
         vecSend, tx, nFeeRequired, nChangePosRet, error, coinControl);
     if (!fCreated) {
         throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, error.original);
     }
     pwallet->CommitTransaction(tx, std::move(mapValue), {} /* orderForm */);
     return tx->GetId().GetHex();
 }
 
 static UniValue addmultisigaddress(const Config &config,
                                    const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "addmultisigaddress",
         "Add an nrequired-to-sign multisignature address to the wallet. "
         "Requires a new wallet backup.\n"
         "Each key is a Bitcoin address or hex-encoded public key.\n"
         "If 'label' is specified (DEPRECATED), assign address to that label.\n",
         {
             {"nrequired", RPCArg::Type::NUM, RPCArg::Optional::NO,
              "The number of required signatures out of the n keys or "
              "addresses."},
             {
                 "keys",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "A json array of bitcoin addresses or hex-encoded public keys",
                 {
                     {"key", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
                      "bitcoin address or hex-encoded public key"},
                 },
             },
             {"label", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG,
              "A label to assign the addresses to."},
         },
         RPCResult{"{\n"
                   "  \"address\":\"multisigaddress\",    (string) The value of "
                   "the new multisig address.\n"
                   "  \"redeemScript\":\"script\"         (string) The string "
                   "value of the hex-encoded redemption script.\n"
                   "}\n"},
         RPCExamples{
             "\nAdd a multisig address from 2 addresses\n" +
             HelpExampleCli("addmultisigaddress",
                            "2 "
                            "\"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\","
                            "\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\"") +
             "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("addmultisigaddress",
                            "2, "
                            "\"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\","
                            "\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\"")},
     }
         .Check(request);
 
     LegacyScriptPubKeyMan &spk_man = EnsureLegacyScriptPubKeyMan(*pwallet);
 
     LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore);
 
     std::string label;
     if (!request.params[2].isNull()) {
         label = LabelFromValue(request.params[2]);
     }
 
     int required = request.params[0].get_int();
 
     // Get the public keys
     const UniValue &keys_or_addrs = request.params[1].get_array();
     std::vector<CPubKey> pubkeys;
     for (size_t i = 0; i < keys_or_addrs.size(); ++i) {
         if (IsHex(keys_or_addrs[i].get_str()) &&
             (keys_or_addrs[i].get_str().length() == 66 ||
              keys_or_addrs[i].get_str().length() == 130)) {
             pubkeys.push_back(HexToPubKey(keys_or_addrs[i].get_str()));
         } else {
             pubkeys.push_back(AddrToPubKey(wallet->chainParams, spk_man,
                                            keys_or_addrs[i].get_str()));
         }
     }
 
     OutputType output_type = pwallet->m_default_address_type;
 
     // Construct using pay-to-script-hash:
     CScript inner;
     CTxDestination dest = AddAndGetMultisigDestination(
         required, pubkeys, output_type, spk_man, inner);
     pwallet->SetAddressBook(dest, label, "send");
 
     UniValue result(UniValue::VOBJ);
     result.pushKV("address", EncodeDestination(dest, config));
     result.pushKV("redeemScript", HexStr(inner.begin(), inner.end()));
     return result;
 }
 
 struct tallyitem {
     Amount nAmount{Amount::zero()};
     int nConf{std::numeric_limits<int>::max()};
     std::vector<uint256> txids;
     bool fIsWatchonly{false};
     tallyitem() {}
 };
 
 static UniValue ListReceived(const Config &config, CWallet *const pwallet,
                              const UniValue &params, bool by_label)
     EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet) {
     // Minimum confirmations
     int nMinDepth = 1;
     if (!params[0].isNull()) {
         nMinDepth = params[0].get_int();
     }
 
     // Whether to include empty labels
     bool fIncludeEmpty = false;
     if (!params[1].isNull()) {
         fIncludeEmpty = params[1].get_bool();
     }
 
     isminefilter filter = ISMINE_SPENDABLE;
     if (ParseIncludeWatchonly(params[2], *pwallet)) {
         filter |= ISMINE_WATCH_ONLY;
     }
 
     bool has_filtered_address = false;
     CTxDestination filtered_address = CNoDestination();
     if (!by_label && params.size() > 3) {
         if (!IsValidDestinationString(params[3].get_str(),
                                       pwallet->chainParams)) {
             throw JSONRPCError(RPC_WALLET_ERROR,
                                "address_filter parameter was invalid");
         }
         filtered_address =
             DecodeDestination(params[3].get_str(), pwallet->chainParams);
         has_filtered_address = true;
     }
 
     // Tally
     std::map<CTxDestination, tallyitem> mapTally;
     for (const std::pair<const TxId, CWalletTx> &pairWtx : pwallet->mapWallet) {
         const CWalletTx &wtx = pairWtx.second;
 
         TxValidationState state;
         if (wtx.IsCoinBase() ||
             !pwallet->chain().contextualCheckTransactionForCurrentBlock(
                 *wtx.tx, state)) {
             continue;
         }
 
         int nDepth = wtx.GetDepthInMainChain();
         if (nDepth < nMinDepth) {
             continue;
         }
 
         for (const CTxOut &txout : wtx.tx->vout) {
             CTxDestination address;
             if (!ExtractDestination(txout.scriptPubKey, address)) {
                 continue;
             }
 
             if (has_filtered_address && !(filtered_address == address)) {
                 continue;
             }
 
             isminefilter mine = pwallet->IsMine(address);
             if (!(mine & filter)) {
                 continue;
             }
 
             tallyitem &item = mapTally[address];
             item.nAmount += txout.nValue;
             item.nConf = std::min(item.nConf, nDepth);
             item.txids.push_back(wtx.GetId());
             if (mine & ISMINE_WATCH_ONLY) {
                 item.fIsWatchonly = true;
             }
         }
     }
 
     // Reply
     UniValue ret(UniValue::VARR);
     std::map<std::string, tallyitem> label_tally;
 
     // Create m_address_book iterator
     // If we aren't filtering, go from begin() to end()
     auto start = pwallet->m_address_book.begin();
     auto end = pwallet->m_address_book.end();
     // If we are filtering, find() the applicable entry
     if (has_filtered_address) {
         start = pwallet->m_address_book.find(filtered_address);
         if (start != end) {
             end = std::next(start);
         }
     }
 
     for (auto item_it = start; item_it != end; ++item_it) {
         if (item_it->second.IsChange()) {
             continue;
         }
         const CTxDestination &address = item_it->first;
         const std::string &label = item_it->second.GetLabel();
         std::map<CTxDestination, tallyitem>::iterator it =
             mapTally.find(address);
         if (it == mapTally.end() && !fIncludeEmpty) {
             continue;
         }
 
         Amount nAmount = Amount::zero();
         int nConf = std::numeric_limits<int>::max();
         bool fIsWatchonly = false;
         if (it != mapTally.end()) {
             nAmount = (*it).second.nAmount;
             nConf = (*it).second.nConf;
             fIsWatchonly = (*it).second.fIsWatchonly;
         }
 
         if (by_label) {
             tallyitem &_item = label_tally[label];
             _item.nAmount += nAmount;
             _item.nConf = std::min(_item.nConf, nConf);
             _item.fIsWatchonly = fIsWatchonly;
         } else {
             UniValue obj(UniValue::VOBJ);
             if (fIsWatchonly) {
                 obj.pushKV("involvesWatchonly", true);
             }
             obj.pushKV("address", EncodeDestination(address, config));
             obj.pushKV("amount", ValueFromAmount(nAmount));
             obj.pushKV("confirmations",
                        (nConf == std::numeric_limits<int>::max() ? 0 : nConf));
             obj.pushKV("label", label);
             UniValue transactions(UniValue::VARR);
             if (it != mapTally.end()) {
                 for (const uint256 &_item : (*it).second.txids) {
                     transactions.push_back(_item.GetHex());
                 }
             }
             obj.pushKV("txids", transactions);
             ret.push_back(obj);
         }
     }
 
     if (by_label) {
         for (const auto &entry : label_tally) {
             Amount nAmount = entry.second.nAmount;
             int nConf = entry.second.nConf;
             UniValue obj(UniValue::VOBJ);
             if (entry.second.fIsWatchonly) {
                 obj.pushKV("involvesWatchonly", true);
             }
             obj.pushKV("amount", ValueFromAmount(nAmount));
             obj.pushKV("confirmations",
                        (nConf == std::numeric_limits<int>::max() ? 0 : nConf));
             obj.pushKV("label", entry.first);
             ret.push_back(obj);
         }
     }
 
     return ret;
 }
 
 static UniValue listreceivedbyaddress(const Config &config,
                                       const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "listreceivedbyaddress",
         "List balances by receiving address.\n",
         {
             {"minconf", RPCArg::Type::NUM, /* default */ "1",
              "The minimum number of confirmations before payments are "
              "included."},
             {"include_empty", RPCArg::Type::BOOL, /* default */ "false",
              "Whether to include addresses that haven't received any "
              "payments."},
             {"include_watchonly", RPCArg::Type::BOOL,
              /* default */ "true for watch-only wallets, otherwise false",
              "Whether to include watch-only addresses (see 'importaddress')."},
             {"address_filter", RPCArg::Type::STR,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "If present, only return information on this address."},
         },
         RPCResult{
             "[\n"
             "  {\n"
             "    \"involvesWatchonly\" : true,        (bool) Only returns true "
             "if imported addresses were involved in transaction\n"
             "    \"address\" : \"receivingaddress\",  (string) The receiving "
             "address\n"
             "    \"amount\" : x.xxx,                  (numeric) The total "
             "amount in " +
             CURRENCY_UNIT +
             " received by the address\n"
             "    \"confirmations\" : n,               (numeric) The number of "
             "confirmations of the most recent transaction included\n"
             "    \"label\" : \"label\",               (string) The label of "
             "the receiving address. The default label is \"\".\n"
             "    \"txids\": [\n"
             "       \"txid\",                         (string) The ids of "
             "transactions received with the address \n"
             "       ...\n"
             "    ]\n"
             "  }\n"
             "  ,...\n"
             "]\n"},
         RPCExamples{
             HelpExampleCli("listreceivedbyaddress", "") +
             HelpExampleCli("listreceivedbyaddress", "6 true") +
             HelpExampleRpc("listreceivedbyaddress", "6, true, true") +
             HelpExampleRpc(
                 "listreceivedbyaddress",
                 "6, true, true, \"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\"")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     return ListReceived(config, pwallet, request.params, false);
 }
 
 static UniValue listreceivedbylabel(const Config &config,
                                     const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "listreceivedbylabel",
         "List received transactions by label.\n",
         {
             {"minconf", RPCArg::Type::NUM, /* default */ "1",
              "The minimum number of confirmations before payments are "
              "included."},
             {"include_empty", RPCArg::Type::BOOL, /* default */ "false",
              "Whether to include labels that haven't received any payments."},
             {"include_watchonly", RPCArg::Type::BOOL,
              /* default */ "true for watch-only wallets, otherwise false",
              "Whether to include watch-only addresses (see 'importaddress')."},
         },
         RPCResult{"[\n"
                   "  {\n"
                   "    \"involvesWatchonly\" : true,   (bool) Only returns "
                   "true if imported addresses were involved in transaction\n"
                   "    \"amount\" : x.xxx,             (numeric) The total "
                   "amount received by addresses with this label\n"
                   "    \"confirmations\" : n,          (numeric) The number of "
                   "confirmations of the most recent transaction included\n"
                   "    \"label\" : \"label\"           (string) The label of "
                   "the receiving address. The default label is \"\".\n"
                   "  }\n"
                   "  ,...\n"
                   "]\n"},
         RPCExamples{HelpExampleCli("listreceivedbylabel", "") +
                     HelpExampleCli("listreceivedbylabel", "6 true") +
                     HelpExampleRpc("listreceivedbylabel", "6, true, true")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     return ListReceived(config, pwallet, request.params, true);
 }
 
 static void MaybePushAddress(UniValue &entry, const CTxDestination &dest) {
     if (IsValidDestination(dest)) {
         entry.pushKV("address", EncodeDestination(dest, GetConfig()));
     }
 }
 
 /**
  * List transactions based on the given criteria.
  *
  * @param  pwallet        The wallet.
  * @param  wtx            The wallet transaction.
  * @param  nMinDepth      The minimum confirmation depth.
  * @param  fLong          Whether to include the JSON version of the
  * transaction.
  * @param  ret            The UniValue into which the result is stored.
  * @param  filter_ismine  The "is mine" filter flags.
  * @param  filter_label   Optional label string to filter incoming transactions.
  */
 static void ListTransactions(CWallet *const pwallet, const CWalletTx &wtx,
                              int nMinDepth, bool fLong, UniValue &ret,
                              const isminefilter &filter_ismine,
                              const std::string *filter_label)
     EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet) {
     Amount nFee;
     std::list<COutputEntry> listReceived;
     std::list<COutputEntry> listSent;
 
     wtx.GetAmounts(listReceived, listSent, nFee, filter_ismine);
 
     bool involvesWatchonly = wtx.IsFromMe(ISMINE_WATCH_ONLY);
 
     // Sent
     if (!filter_label) {
         for (const COutputEntry &s : listSent) {
             UniValue entry(UniValue::VOBJ);
             if (involvesWatchonly ||
                 (pwallet->IsMine(s.destination) & ISMINE_WATCH_ONLY)) {
                 entry.pushKV("involvesWatchonly", true);
             }
             MaybePushAddress(entry, s.destination);
             entry.pushKV("category", "send");
             entry.pushKV("amount", ValueFromAmount(-s.amount));
             const auto *address_book_entry =
                 pwallet->FindAddressBookEntry(s.destination);
             if (address_book_entry) {
                 entry.pushKV("label", address_book_entry->GetLabel());
             }
             entry.pushKV("vout", s.vout);
             entry.pushKV("fee", ValueFromAmount(-1 * nFee));
             if (fLong) {
                 WalletTxToJSON(pwallet->chain(), wtx, entry);
             }
             entry.pushKV("abandoned", wtx.isAbandoned());
             ret.push_back(entry);
         }
     }
 
     // Received
     if (listReceived.size() > 0 && wtx.GetDepthInMainChain() >= nMinDepth) {
         for (const COutputEntry &r : listReceived) {
             std::string label;
             const auto *address_book_entry =
                 pwallet->FindAddressBookEntry(r.destination);
             if (address_book_entry) {
                 label = address_book_entry->GetLabel();
             }
             if (filter_label && label != *filter_label) {
                 continue;
             }
             UniValue entry(UniValue::VOBJ);
             if (involvesWatchonly ||
                 (pwallet->IsMine(r.destination) & ISMINE_WATCH_ONLY)) {
                 entry.pushKV("involvesWatchonly", true);
             }
             MaybePushAddress(entry, r.destination);
             if (wtx.IsCoinBase()) {
                 if (wtx.GetDepthInMainChain() < 1) {
                     entry.pushKV("category", "orphan");
                 } else if (wtx.IsImmatureCoinBase()) {
                     entry.pushKV("category", "immature");
                 } else {
                     entry.pushKV("category", "generate");
                 }
             } else {
                 entry.pushKV("category", "receive");
             }
             entry.pushKV("amount", ValueFromAmount(r.amount));
             if (address_book_entry) {
                 entry.pushKV("label", label);
             }
             entry.pushKV("vout", r.vout);
             if (fLong) {
                 WalletTxToJSON(pwallet->chain(), wtx, entry);
             }
             ret.push_back(entry);
         }
     }
 }
 
 static const std::string TransactionDescriptionString() {
     return "    \"confirmations\": n,                        (numeric) The "
            "number of confirmations for the transaction. Negative "
            "confirmations means the\n"
            "                                                       transaction "
            "conflicted that many blocks ago.\n"
            "    \"generated\": xxx,                          (bool) Only "
            "present if transaction only input is a coinbase one.\n"
            "    \"trusted\": xxx,                            (bool) Only "
            "present if we consider transaction to be trusted and so safe to "
            "spend from.\n"
            "    \"blockhash\": \"hashvalue\",                  (string) The "
            "block hash containing the transaction.\n"
            "    \"blockheight\": n,                          (numeric) The "
            "block height containing the transaction.\n"
            "    \"blockindex\": n,                           (numeric) The "
            "index of the transaction in the block that includes it.\n"
            "    \"blocktime\": xxx,                          (numeric) The "
            "block time expressed in " +
            UNIX_EPOCH_TIME +
            ".\n"
            "    \"txid\": \"transactionid\",                   (string) The "
            "transaction id.\n"
            "    \"walletconflicts\": [                       (array) "
            "Conflicting transaction ids.\n"
            "      \"txid\",                                  (string) The "
            "transaction id.\n"
            "      ...\n"
            "    ],\n"
            "    \"time\": xxx,                               (numeric) The "
            "transaction time expressed in " +
            UNIX_EPOCH_TIME +
            ".\n"
            "    \"timereceived\": xxx,                       (numeric) The "
            "time received expressed in " +
            UNIX_EPOCH_TIME +
            ".\n"
            "    \"comment\": \"...\",                          (string) If a "
            "comment is associated with the transaction, only present if not "
            "empty.\n";
 }
 
 UniValue listtransactions(const Config &config, const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "listtransactions",
         "If a label name is provided, this will return only incoming "
         "transactions paying to addresses with the specified label.\n"
         "\nReturns up to 'count' most recent transactions skipping the first "
         "'from' transactions.\n",
         {
             {"label", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG,
              "If set, should be a valid label name to return only incoming "
              "transactions with the specified label, or \"*\" to disable "
              "filtering and return all transactions."},
             {"count", RPCArg::Type::NUM, /* default */ "10",
              "The number of transactions to return"},
             {"skip", RPCArg::Type::NUM, /* default */ "0",
              "The number of transactions to skip"},
             {"include_watchonly", RPCArg::Type::BOOL,
              /* default */ "true for watch-only wallets, otherwise false",
              "Include transactions to watch-only addresses (see "
              "'importaddress')"},
         },
         RPCResult{
             "[\n"
             "  {\n"
             "    \"involvesWatchonly\": xxx, (bool) Only returns true if "
             "imported addresses were involved in transaction.\n"
             "    \"address\":\"address\",      (string) The bitcoin address of "
             "the transaction.\n"
             "    \"category\":               (string) The transaction "
             "category.\n"
             "                \"send\"                  Transactions sent.\n"
             "                \"receive\"               Non-coinbase "
             "transactions received.\n"
             "                \"generate\"              Coinbase transactions "
             "received with more than 100 confirmations.\n"
             "                \"immature\"              Coinbase transactions "
             "received with 100 or fewer confirmations.\n"
             "                \"orphan\"                Orphaned coinbase "
             "transactions received.\n"
             "    \"amount\": x.xxx,          (numeric) The amount in " +
             CURRENCY_UNIT +
             ". This is negative for the 'send' category, and is positive\n"
             "                                        for all other categories\n"
             "    \"label\": \"label\",         (string) A comment for the "
             "address/transaction, if any\n"
             "    \"vout\": n,                (numeric) the vout value\n"
             "    \"fee\": x.xxx,             (numeric) The amount of the fee "
             "in " +
             CURRENCY_UNIT +
             ". This is negative and only available for the \n"
             "                                         'send' category of "
             "transactions.\n" +
             TransactionDescriptionString() +
             "    \"abandoned\": xxx          (bool) 'true' if the transaction "
             "has been abandoned (inputs are respendable). Only available for "
             "the \n"
             "                                         'send' category of "
             "transactions.\n"
             "  }\n"
             "]\n"},
         RPCExamples{"\nList the most recent 10 transactions in the systems\n" +
                     HelpExampleCli("listtransactions", "") +
                     "\nList transactions 100 to 120\n" +
                     HelpExampleCli("listtransactions", "\"*\" 20 100") +
                     "\nAs a JSON-RPC call\n" +
                     HelpExampleRpc("listtransactions", "\"*\", 20, 100")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     const std::string *filter_label = nullptr;
     if (!request.params[0].isNull() && request.params[0].get_str() != "*") {
         filter_label = &request.params[0].get_str();
         if (filter_label->empty()) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 "Label argument must be a valid label name or \"*\".");
         }
     }
     int nCount = 10;
     if (!request.params[1].isNull()) {
         nCount = request.params[1].get_int();
     }
 
     int nFrom = 0;
     if (!request.params[2].isNull()) {
         nFrom = request.params[2].get_int();
     }
 
     isminefilter filter = ISMINE_SPENDABLE;
     if (ParseIncludeWatchonly(request.params[3], *pwallet)) {
         filter |= ISMINE_WATCH_ONLY;
     }
 
     if (nCount < 0) {
         throw JSONRPCError(RPC_INVALID_PARAMETER, "Negative count");
     }
     if (nFrom < 0) {
         throw JSONRPCError(RPC_INVALID_PARAMETER, "Negative from");
     }
     UniValue ret(UniValue::VARR);
 
     {
         LOCK(pwallet->cs_wallet);
 
         const CWallet::TxItems &txOrdered = pwallet->wtxOrdered;
 
         // iterate backwards until we have nCount items to return:
         for (CWallet::TxItems::const_reverse_iterator it = txOrdered.rbegin();
              it != txOrdered.rend(); ++it) {
             CWalletTx *const pwtx = (*it).second;
             ListTransactions(pwallet, *pwtx, 0, true, ret, filter,
                              filter_label);
             if (int(ret.size()) >= (nCount + nFrom)) {
                 break;
             }
         }
     }
 
     // ret is newest to oldest
 
     if (nFrom > (int)ret.size()) {
         nFrom = ret.size();
     }
     if ((nFrom + nCount) > (int)ret.size()) {
         nCount = ret.size() - nFrom;
     }
 
     const std::vector<UniValue> &txs = ret.getValues();
     UniValue result{UniValue::VARR};
     // Return oldest to newest
     result.push_backV({txs.rend() - nFrom - nCount, txs.rend() - nFrom});
     return result;
 }
 
 static UniValue listsinceblock(const Config &config,
                                const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "listsinceblock",
         "Get all transactions in blocks since block [blockhash], or all "
         "transactions if omitted.\n"
         "If \"blockhash\" is no longer a part of the main chain, transactions "
         "from the fork point onward are included.\n"
         "Additionally, if include_removed is set, transactions affecting the "
         "wallet which were removed are returned in the \"removed\" array.\n",
         {
             {"blockhash", RPCArg::Type::STR,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "If set, the block hash to list transactions since, otherwise "
              "list all transactions."},
             {"target_confirmations", RPCArg::Type::NUM, /* default */ "1",
              "Return the nth block hash from the main chain. e.g. 1 would mean "
              "the best block hash. Note: this is not used as a filter, but "
              "only affects [lastblock] in the return value"},
             {"include_watchonly", RPCArg::Type::BOOL,
              /* default */ "true for watch-only wallets, otherwise false",
              "Include transactions to watch-only addresses (see "
              "'importaddress')"},
             {"include_removed", RPCArg::Type::BOOL, /* default */ "true",
              "Show transactions that were removed due to a reorg in the "
              "\"removed\" array\n"
              "                                                           (not "
              "guaranteed to work on pruned nodes)"},
         },
         RPCResult{
             "{\n"
             "  \"transactions\": [\n"
             "    \"involvesWatchonly\": xxx, (bool) Only returns true if "
             "imported addresses were involved in transaction.\n"
             "    \"address\":\"address\",    (string) The bitcoin address of "
             "the transaction.\n"
             "    \"category\":               (string) The transaction "
             "category.\n"
             "                \"send\"                  Transactions sent.\n"
             "                \"receive\"               Non-coinbase "
             "transactions received.\n"
             "                \"generate\"              Coinbase transactions "
             "received with more than 100 confirmations.\n"
             "                \"immature\"              Coinbase transactions "
             "received with 100 or fewer confirmations.\n"
             "                \"orphan\"                Orphaned coinbase "
             "transactions received.\n"
             "    \"amount\": x.xxx,          (numeric) The amount in " +
             CURRENCY_UNIT +
             ". This is negative for the 'send' category, and is positive\n"
             "                                         for all other "
             "categories\n"
             "    \"vout\" : n,               (numeric) the vout value\n"
             "    \"fee\": x.xxx,             (numeric) The amount of the fee "
             "in " +
             CURRENCY_UNIT +
             ". This is negative and only available for the 'send' category of "
             "transactions.\n" +
             TransactionDescriptionString() +
             "    \"abandoned\": xxx,         (bool) 'true' if the transaction "
             "has been abandoned (inputs are respendable). Only available for "
             "the 'send' category of transactions.\n"
             "    \"comment\": \"...\",       (string) If a comment is "
             "associated with the transaction.\n"
             "    \"label\" : \"label\"       (string) A comment for the "
             "address/transaction, if any\n"
             "    \"to\": \"...\",            (string) If a comment to is "
             "associated with the transaction.\n"
             "  ],\n"
             "  \"removed\": [\n"
             "    <structure is the same as \"transactions\" above, only "
             "present if include_removed=true>\n"
             "    Note: transactions that were re-added in the active chain "
             "will appear as-is in this array, and may thus have a positive "
             "confirmation count.\n"
             "  ],\n"
             "  \"lastblock\": \"lastblockhash\"     (string) The hash of the "
             "block (target_confirmations-1) from the best block on the main "
             "chain. This is typically used to feed back into listsinceblock "
             "the next time you call it. So you would generally use a "
             "target_confirmations of say 6, so you will be continually "
             "re-notified of transactions until they've reached 6 confirmations "
             "plus any new ones\n"
             "}\n"},
         RPCExamples{HelpExampleCli("listsinceblock", "") +
                     HelpExampleCli("listsinceblock",
                                    "\"000000000000000bacf66f7497b7dc45ef753ee9a"
                                    "7d38571037cdb1a57f663ad\" 6") +
                     HelpExampleRpc("listsinceblock",
                                    "\"000000000000000bacf66f7497b7dc45ef753ee9a"
                                    "7d38571037cdb1a57f663ad\", 6")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     // The way the 'height' is initialized is just a workaround for the gcc bug
     // #47679 since version 4.6.0. Height of the specified block or the common
     // ancestor, if the block provided was in a deactivated chain.
     Optional<int> height = MakeOptional(false, int());
 
     // Height of the specified block, even if it's in a deactivated chain.
     Optional<int> altheight;
     int target_confirms = 1;
     isminefilter filter = ISMINE_SPENDABLE;
 
     BlockHash blockId;
     if (!request.params[0].isNull() && !request.params[0].get_str().empty()) {
         blockId = BlockHash(ParseHashV(request.params[0], "blockhash"));
         height.emplace();
         altheight.emplace();
         if (!pwallet->chain().findCommonAncestor(
                 blockId, pwallet->GetLastBlockHash(),
                 /* ancestor out */ FoundBlock().height(*height),
                 /* blockId out */ FoundBlock().height(*altheight))) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
         }
     }
 
     if (!request.params[1].isNull()) {
         target_confirms = request.params[1].get_int();
 
         if (target_confirms < 1) {
             throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter");
         }
     }
 
     if (ParseIncludeWatchonly(request.params[2], *pwallet)) {
         filter |= ISMINE_WATCH_ONLY;
     }
 
     bool include_removed =
         (request.params[3].isNull() || request.params[3].get_bool());
 
     int depth = height ? pwallet->GetLastBlockHeight() + 1 - *height : -1;
 
     UniValue transactions(UniValue::VARR);
 
     for (const std::pair<const TxId, CWalletTx> &pairWtx : pwallet->mapWallet) {
         CWalletTx tx = pairWtx.second;
 
         if (depth == -1 || tx.GetDepthInMainChain() < depth) {
             ListTransactions(pwallet, tx, 0, true, transactions, filter,
                              nullptr /* filter_label */);
         }
     }
 
     // when a reorg'd block is requested, we also list any relevant transactions
     // in the blocks of the chain that was detached
     UniValue removed(UniValue::VARR);
     while (include_removed && altheight && *altheight > *height) {
         CBlock block;
         if (!pwallet->chain().findBlock(blockId, FoundBlock().data(block)) ||
             block.IsNull()) {
             throw JSONRPCError(RPC_INTERNAL_ERROR,
                                "Can't read block from disk");
         }
         for (const CTransactionRef &tx : block.vtx) {
             auto it = pwallet->mapWallet.find(tx->GetId());
             if (it != pwallet->mapWallet.end()) {
                 // We want all transactions regardless of confirmation count to
                 // appear here, even negative confirmation ones, hence the big
                 // negative.
                 ListTransactions(pwallet, it->second, -100000000, true, removed,
                                  filter, nullptr /* filter_label */);
             }
         }
         blockId = block.hashPrevBlock;
         --*altheight;
     }
 
     BlockHash lastblock;
     CHECK_NONFATAL(pwallet->chain().findAncestorByHeight(
         pwallet->GetLastBlockHash(),
         pwallet->GetLastBlockHeight() + 1 - target_confirms,
         FoundBlock().hash(lastblock)));
 
     UniValue ret(UniValue::VOBJ);
     ret.pushKV("transactions", transactions);
     if (include_removed) {
         ret.pushKV("removed", removed);
     }
     ret.pushKV("lastblock", lastblock.GetHex());
 
     return ret;
 }
 
 static UniValue gettransaction(const Config &config,
                                const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "gettransaction",
         "Get detailed information about in-wallet transaction <txid>\n",
         {
             {"txid", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The transaction id"},
             {"include_watchonly", RPCArg::Type::BOOL,
              /* default */ "true for watch-only wallets, otherwise false",
              "Whether to include watch-only addresses in balance calculation "
              "and details[]"},
             {"verbose", RPCArg::Type::BOOL, /* default */ "false",
              "Whether to include a `decoded` field containing the decoded "
              "transaction (equivalent to RPC decoderawtransaction)"},
         },
         RPCResult{
             "{\n"
             "    \"amount\" : x.xxx,        (numeric) The transaction amount "
             "in " +
             CURRENCY_UNIT +
             "\n"
             "    \"fee\": x.xxx,            (numeric) The amount of the fee "
             "in " +
             CURRENCY_UNIT +
             ". This is negative and only available for the \n"
             "                              'send' category of transactions.\n" +
             TransactionDescriptionString() +
             +"    \"details\" : [\n"
              "      {\n"
              "        \"involvesWatchonly\": xxx,         (bool) Only returns "
              "true if imported addresses were involved in transaction.\n"
              "        \"address\" : \"address\",          (string) The bitcoin "
              "address involved in the transaction\n"
              "        \"category\" :                      (string) The "
              "transaction category.\n"
              "                     \"send\"                  Transactions "
              "sent.\n"
              "                     \"receive\"               Non-coinbase "
              "transactions received.\n"
              "                     \"generate\"              Coinbase "
              "transactions received with more than 100 confirmations.\n"
              "                     \"immature\"              Coinbase "
              "transactions received with 100 or fewer confirmations.\n"
              "                     \"orphan\"                Orphaned coinbase "
              "transactions received.\n"
              "        \"amount\" : x.xxx,                 (numeric) The amount "
              "in " +
             CURRENCY_UNIT +
             "\n"
             "        \"label\" : \"label\",              (string) A comment "
             "for the address/transaction, if any\n"
             "        \"vout\" : n,                       (numeric) the vout "
             "value\n"
             "        \"fee\": x.xxx,                     (numeric) The amount "
             "of the fee in " +
             CURRENCY_UNIT +
             ". This is negative and only available for the \n"
             "                                           'send' category of "
             "transactions.\n"
             "        \"abandoned\": xxx                  (bool) 'true' if the "
             "transaction has been abandoned (inputs are respendable). Only "
             "available for the \n"
             "                                           'send' category of "
             "transactions.\n"
             "      }\n"
             "      ,...\n"
             "    ],\n"
             "    \"hex\" : \"data\"         (string) Raw data for transaction\n"
             "    \"decoded\" : transaction         (json object) Optional, the "
             "decoded transaction (only present when `verbose` is passed), "
             "equivalent to the\n"
             "                                                  RPC "
             "decoderawtransaction method, or the RPC getrawtransaction method "
             "when `verbose` is passed.\n"
             "}\n"},
         RPCExamples{HelpExampleCli("gettransaction",
                                    "\"1075db55d416d3ca199f55b6084e2115b9345e16c"
                                    "5cf302fc80e9d5fbf5d48d\"") +
                     HelpExampleCli("gettransaction",
                                    "\"1075db55d416d3ca199f55b6084e2115b9345e16c"
                                    "5cf302fc80e9d5fbf5d48d\" true") +
                     HelpExampleCli("gettransaction",
                                    "\"1075db55d416d3ca199f55b6084e2115b9345e16c"
                                    "5cf302fc80e9d5fbf5d48d\" false true") +
                     HelpExampleRpc("gettransaction",
                                    "\"1075db55d416d3ca199f55b6084e2115b9345e16c"
                                    "5cf302fc80e9d5fbf5d48d\"")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     TxId txid(ParseHashV(request.params[0], "txid"));
 
     isminefilter filter = ISMINE_SPENDABLE;
     if (ParseIncludeWatchonly(request.params[1], *pwallet)) {
         filter |= ISMINE_WATCH_ONLY;
     }
 
     bool verbose =
         request.params[2].isNull() ? false : request.params[2].get_bool();
 
     UniValue entry(UniValue::VOBJ);
     auto it = pwallet->mapWallet.find(txid);
     if (it == pwallet->mapWallet.end()) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            "Invalid or non-wallet transaction id");
     }
     const CWalletTx &wtx = it->second;
 
     Amount nCredit = wtx.GetCredit(filter);
     Amount nDebit = wtx.GetDebit(filter);
     Amount nNet = nCredit - nDebit;
     Amount nFee = (wtx.IsFromMe(filter) ? wtx.tx->GetValueOut() - nDebit
                                         : Amount::zero());
 
     entry.pushKV("amount", ValueFromAmount(nNet - nFee));
     if (wtx.IsFromMe(filter)) {
         entry.pushKV("fee", ValueFromAmount(nFee));
     }
 
     WalletTxToJSON(pwallet->chain(), wtx, entry);
 
     UniValue details(UniValue::VARR);
     ListTransactions(pwallet, wtx, 0, false, details, filter,
                      nullptr /* filter_label */);
     entry.pushKV("details", details);
 
     std::string strHex =
         EncodeHexTx(*wtx.tx, pwallet->chain().rpcSerializationFlags());
     entry.pushKV("hex", strHex);
 
     if (verbose) {
         UniValue decoded(UniValue::VOBJ);
         TxToUniv(*wtx.tx, uint256(), decoded, false);
         entry.pushKV("decoded", decoded);
     }
 
     return entry;
 }
 
 static UniValue abandontransaction(const Config &config,
                                    const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "abandontransaction",
         "Mark in-wallet transaction <txid> as abandoned\n"
         "This will mark this transaction and all its in-wallet descendants as "
         "abandoned which will allow\n"
         "for their inputs to be respent.  It can be used to replace \"stuck\" "
         "or evicted transactions.\n"
         "It only works on transactions which are not included in a block and "
         "are not currently in the mempool.\n"
         "It has no effect on transactions which are already abandoned.\n",
         {
             {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The transaction id"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("abandontransaction",
                                    "\"1075db55d416d3ca199f55b6084e2115b9345e16c"
                                    "5cf302fc80e9d5fbf5d48d\"") +
                     HelpExampleRpc("abandontransaction",
                                    "\"1075db55d416d3ca199f55b6084e2115b9345e16c"
                                    "5cf302fc80e9d5fbf5d48d\"")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     TxId txid(ParseHashV(request.params[0], "txid"));
 
     if (!pwallet->mapWallet.count(txid)) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            "Invalid or non-wallet transaction id");
     }
 
     if (!pwallet->AbandonTransaction(txid)) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                            "Transaction not eligible for abandonment");
     }
 
     return NullUniValue;
 }
 
 static UniValue backupwallet(const Config &config,
                              const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "backupwallet",
         "Safely copies current wallet file to destination, which can be a "
         "directory or a path with filename.\n",
         {
             {"destination", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The destination directory or file"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("backupwallet", "\"backup.dat\"") +
                     HelpExampleRpc("backupwallet", "\"backup.dat\"")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     std::string strDest = request.params[0].get_str();
     if (!pwallet->BackupWallet(strDest)) {
         throw JSONRPCError(RPC_WALLET_ERROR, "Error: Wallet backup failed!");
     }
 
     return NullUniValue;
 }
 
 static UniValue keypoolrefill(const Config &config,
                               const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "keypoolrefill",
         "Fills the keypool." + HelpRequiringPassphrase(pwallet) + "\n",
         {
             {"newsize", RPCArg::Type::NUM, /* default */ "100",
              "The new keypool size"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("keypoolrefill", "") +
                     HelpExampleRpc("keypoolrefill", "")},
     }
         .Check(request);
 
     if (pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
         throw JSONRPCError(RPC_WALLET_ERROR,
                            "Error: Private keys are disabled for this wallet");
     }
 
     LOCK(pwallet->cs_wallet);
 
     // 0 is interpreted by TopUpKeyPool() as the default keypool size given by
     // -keypool
     unsigned int kpSize = 0;
     if (!request.params[0].isNull()) {
         if (request.params[0].get_int() < 0) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Invalid parameter, expected valid size.");
         }
         kpSize = (unsigned int)request.params[0].get_int();
     }
 
     EnsureWalletIsUnlocked(pwallet);
     pwallet->TopUpKeyPool(kpSize);
 
     if (pwallet->GetKeyPoolSize() < kpSize) {
         throw JSONRPCError(RPC_WALLET_ERROR, "Error refreshing keypool.");
     }
 
     return NullUniValue;
 }
 
 static UniValue walletpassphrase(const Config &config,
                                  const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "walletpassphrase",
         "Stores the wallet decryption key in memory for 'timeout' seconds.\n"
         "This is needed prior to performing transactions related to private "
         "keys such as sending bitcoins\n"
         "\nNote:\n"
         "Issuing the walletpassphrase command while the wallet is already "
         "unlocked will set a new unlock\n"
         "time that overrides the old one.\n",
         {
             {"passphrase", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The wallet passphrase"},
             {"timeout", RPCArg::Type::NUM, RPCArg::Optional::NO,
              "The time to keep the decryption key in seconds; capped at "
              "100000000 (~3 years)."},
         },
         RPCResults{},
         RPCExamples{
             "\nUnlock the wallet for 60 seconds\n" +
             HelpExampleCli("walletpassphrase", "\"my pass phrase\" 60") +
             "\nLock the wallet again (before 60 seconds)\n" +
             HelpExampleCli("walletlock", "") + "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("walletpassphrase", "\"my pass phrase\", 60")},
     }
         .Check(request);
 
     int64_t nSleepTime;
     {
         LOCK(pwallet->cs_wallet);
 
         if (!pwallet->IsCrypted()) {
             throw JSONRPCError(RPC_WALLET_WRONG_ENC_STATE,
                                "Error: running with an unencrypted wallet, but "
                                "walletpassphrase was called.");
         }
 
         // Note that the walletpassphrase is stored in request.params[0] which
         // is not mlock()ed
         SecureString strWalletPass;
         strWalletPass.reserve(100);
         // TODO: get rid of this .c_str() by implementing
         // SecureString::operator=(std::string)
         // Alternately, find a way to make request.params[0] mlock()'d to begin
         // with.
         strWalletPass = request.params[0].get_str().c_str();
 
         // Get the timeout
         nSleepTime = request.params[1].get_int64();
         // Timeout cannot be negative, otherwise it will relock immediately
         if (nSleepTime < 0) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Timeout cannot be negative.");
         }
         // Clamp timeout
         // larger values trigger a macos/libevent bug?
         constexpr int64_t MAX_SLEEP_TIME = 100000000;
         if (nSleepTime > MAX_SLEEP_TIME) {
             nSleepTime = MAX_SLEEP_TIME;
         }
 
         if (strWalletPass.empty()) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "passphrase can not be empty");
         }
 
         if (!pwallet->Unlock(strWalletPass)) {
             throw JSONRPCError(
                 RPC_WALLET_PASSPHRASE_INCORRECT,
                 "Error: The wallet passphrase entered was incorrect.");
         }
 
         pwallet->TopUpKeyPool();
 
         pwallet->nRelockTime = GetTime() + nSleepTime;
     }
 
     // rpcRunLater must be called without cs_wallet held otherwise a deadlock
     // can occur. The deadlock would happen when RPCRunLater removes the
     // previous timer (and waits for the callback to finish if already running)
     // and the callback locks cs_wallet.
     AssertLockNotHeld(wallet->cs_wallet);
     // Keep a weak pointer to the wallet so that it is possible to unload the
     // wallet before the following callback is called. If a valid shared pointer
     // is acquired in the callback then the wallet is still loaded.
     std::weak_ptr<CWallet> weak_wallet = wallet;
     pwallet->chain().rpcRunLater(
         strprintf("lockwallet(%s)", pwallet->GetName()),
         [weak_wallet] {
             if (auto shared_wallet = weak_wallet.lock()) {
                 LOCK(shared_wallet->cs_wallet);
                 shared_wallet->Lock();
                 shared_wallet->nRelockTime = 0;
             }
         },
         nSleepTime);
 
     return NullUniValue;
 }
 
 static UniValue walletpassphrasechange(const Config &config,
                                        const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "walletpassphrasechange",
         "Changes the wallet passphrase from 'oldpassphrase' to "
         "'newpassphrase'.\n",
         {
             {"oldpassphrase", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The current passphrase"},
             {"newpassphrase", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The new passphrase"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("walletpassphrasechange",
                                    "\"old one\" \"new one\"") +
                     HelpExampleRpc("walletpassphrasechange",
                                    "\"old one\", \"new one\"")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     if (!pwallet->IsCrypted()) {
         throw JSONRPCError(RPC_WALLET_WRONG_ENC_STATE,
                            "Error: running with an unencrypted wallet, but "
                            "walletpassphrasechange was called.");
     }
 
     // TODO: get rid of these .c_str() calls by implementing
     // SecureString::operator=(std::string)
     // Alternately, find a way to make request.params[0] mlock()'d to begin
     // with.
     SecureString strOldWalletPass;
     strOldWalletPass.reserve(100);
     strOldWalletPass = request.params[0].get_str().c_str();
 
     SecureString strNewWalletPass;
     strNewWalletPass.reserve(100);
     strNewWalletPass = request.params[1].get_str().c_str();
 
     if (strOldWalletPass.empty() || strNewWalletPass.empty()) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "passphrase can not be empty");
     }
 
     if (!pwallet->ChangeWalletPassphrase(strOldWalletPass, strNewWalletPass)) {
         throw JSONRPCError(
             RPC_WALLET_PASSPHRASE_INCORRECT,
             "Error: The wallet passphrase entered was incorrect.");
     }
 
     return NullUniValue;
 }
 
 static UniValue walletlock(const Config &config,
                            const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "walletlock",
         "Removes the wallet encryption key from memory, locking the wallet.\n"
         "After calling this method, you will need to call walletpassphrase "
         "again\n"
         "before being able to call any methods which require the wallet to be "
         "unlocked.\n",
         {},
         RPCResults{},
         RPCExamples{
             "\nSet the passphrase for 2 minutes to perform a transaction\n" +
             HelpExampleCli("walletpassphrase", "\"my pass phrase\" 120") +
             "\nPerform a send (requires passphrase set)\n" +
             HelpExampleCli("sendtoaddress",
                            "\"1M72Sfpbz1BPpXFHz9m3CdqATR44Jvaydd\" 1.0") +
             "\nClear the passphrase since we are done before 2 minutes is "
             "up\n" +
             HelpExampleCli("walletlock", "") + "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("walletlock", "")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     if (!pwallet->IsCrypted()) {
         throw JSONRPCError(RPC_WALLET_WRONG_ENC_STATE,
                            "Error: running with an unencrypted wallet, but "
                            "walletlock was called.");
     }
 
     pwallet->Lock();
     pwallet->nRelockTime = 0;
 
     return NullUniValue;
 }
 
 static UniValue encryptwallet(const Config &config,
                               const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "encryptwallet",
         "Encrypts the wallet with 'passphrase'. This is for first time "
         "encryption.\n"
         "After this, any calls that interact with private keys such as sending "
         "or signing \n"
         "will require the passphrase to be set prior the making these calls.\n"
         "Use the walletpassphrase call for this, and then walletlock call.\n"
         "If the wallet is already encrypted, use the walletpassphrasechange "
         "call.\n",
         {
             {"passphrase", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The pass phrase to encrypt the wallet with. It must be at least "
              "1 character, but should be long."},
         },
         RPCResults{},
         RPCExamples{
             "\nEncrypt your wallet\n" +
             HelpExampleCli("encryptwallet", "\"my pass phrase\"") +
             "\nNow set the passphrase to use the wallet, such as for signing "
             "or sending bitcoin\n" +
             HelpExampleCli("walletpassphrase", "\"my pass phrase\"") +
             "\nNow we can do something like sign\n" +
             HelpExampleCli("signmessage", "\"address\" \"test message\"") +
             "\nNow lock the wallet again by removing the passphrase\n" +
             HelpExampleCli("walletlock", "") + "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("encryptwallet", "\"my pass phrase\"")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     if (pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
         throw JSONRPCError(
             RPC_WALLET_ENCRYPTION_FAILED,
             "Error: wallet does not contain private keys, nothing to encrypt.");
     }
 
     if (pwallet->IsCrypted()) {
         throw JSONRPCError(RPC_WALLET_WRONG_ENC_STATE,
                            "Error: running with an encrypted wallet, but "
                            "encryptwallet was called.");
     }
 
     // TODO: get rid of this .c_str() by implementing
     // SecureString::operator=(std::string)
     // Alternately, find a way to make request.params[0] mlock()'d to begin
     // with.
     SecureString strWalletPass;
     strWalletPass.reserve(100);
     strWalletPass = request.params[0].get_str().c_str();
 
     if (strWalletPass.empty()) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "passphrase can not be empty");
     }
 
     if (!pwallet->EncryptWallet(strWalletPass)) {
         throw JSONRPCError(RPC_WALLET_ENCRYPTION_FAILED,
                            "Error: Failed to encrypt the wallet.");
     }
 
     return "wallet encrypted; The keypool has been flushed and a new HD seed "
            "was generated (if you are using HD). You need to make a new "
            "backup.";
 }
 
 static UniValue lockunspent(const Config &config,
                             const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "lockunspent",
         "Updates list of temporarily unspendable outputs.\n"
         "Temporarily lock (unlock=false) or unlock (unlock=true) specified "
         "transaction outputs.\n"
         "If no transaction outputs are specified when unlocking then all "
         "current locked transaction outputs are unlocked.\n"
         "A locked transaction output will not be chosen by automatic coin "
         "selection, when spending bitcoins.\n"
         "Locks are stored in memory only. Nodes start with zero locked "
         "outputs, and the locked output list\n"
         "is always cleared (by virtue of process exit) when a node stops or "
         "fails.\n"
         "Also see the listunspent call\n",
         {
             {"unlock", RPCArg::Type::BOOL, RPCArg::Optional::NO,
              "Whether to unlock (true) or lock (false) the specified "
              "transactions"},
             {
                 "transactions",
                 RPCArg::Type::ARR,
                 /* default */ "empty array",
                 "A json array of objects. Each object the txid (string) vout "
                 "(numeric).",
                 {
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"txid", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO, "The transaction id"},
                             {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO,
                              "The output number"},
                         },
                     },
                 },
             },
         },
         RPCResult{"true|false    (boolean) Whether the command was successful "
                   "or not\n"},
         RPCExamples{
             "\nList the unspent transactions\n" +
             HelpExampleCli("listunspent", "") +
             "\nLock an unspent transaction\n" +
             HelpExampleCli("lockunspent", "false "
                                           "\"[{\\\"txid\\\":"
                                           "\\\"a08e6907dbbd3d809776dbfc5d82e371"
                                           "b764ed838b5655e72f463568df1aadf0\\\""
                                           ",\\\"vout\\\":1}]\"") +
             "\nList the locked transactions\n" +
             HelpExampleCli("listlockunspent", "") +
             "\nUnlock the transaction again\n" +
             HelpExampleCli("lockunspent", "true "
                                           "\"[{\\\"txid\\\":"
                                           "\\\"a08e6907dbbd3d809776dbfc5d82e371"
                                           "b764ed838b5655e72f463568df1aadf0\\\""
                                           ",\\\"vout\\\":1}]\"") +
             "\nAs a JSON-RPC call\n" +
             HelpExampleRpc("lockunspent", "false, "
                                           "\"[{\\\"txid\\\":"
                                           "\\\"a08e6907dbbd3d809776dbfc5d82e371"
                                           "b764ed838b5655e72f463568df1aadf0\\\""
                                           ",\\\"vout\\\":1}]\"")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     RPCTypeCheckArgument(request.params[0], UniValue::VBOOL);
 
     bool fUnlock = request.params[0].get_bool();
 
     if (request.params[1].isNull()) {
         if (fUnlock) {
             pwallet->UnlockAllCoins();
         }
         return true;
     }
 
     RPCTypeCheckArgument(request.params[1], UniValue::VARR);
 
     const UniValue &output_params = request.params[1];
 
     // Create and validate the COutPoints first.
 
     std::vector<COutPoint> outputs;
     outputs.reserve(output_params.size());
 
     for (size_t idx = 0; idx < output_params.size(); idx++) {
         const UniValue &o = output_params[idx].get_obj();
 
         RPCTypeCheckObj(o, {
                                {"txid", UniValueType(UniValue::VSTR)},
                                {"vout", UniValueType(UniValue::VNUM)},
                            });
 
         const int nOutput = find_value(o, "vout").get_int();
         if (nOutput < 0) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Invalid parameter, vout must be positive");
         }
 
         const TxId txid(ParseHashO(o, "txid"));
         const auto it = pwallet->mapWallet.find(txid);
         if (it == pwallet->mapWallet.end()) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Invalid parameter, unknown transaction");
         }
 
         const COutPoint output(txid, nOutput);
         const CWalletTx &trans = it->second;
         if (output.GetN() >= trans.tx->vout.size()) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Invalid parameter, vout index out of bounds");
         }
 
         if (pwallet->IsSpent(output)) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Invalid parameter, expected unspent output");
         }
 
         const bool is_locked = pwallet->IsLockedCoin(output);
         if (fUnlock && !is_locked) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Invalid parameter, expected locked output");
         }
 
         if (!fUnlock && is_locked) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Invalid parameter, output already locked");
         }
 
         outputs.push_back(output);
     }
 
     // Atomically set (un)locked status for the outputs.
     for (const COutPoint &output : outputs) {
         if (fUnlock) {
             pwallet->UnlockCoin(output);
         } else {
             pwallet->LockCoin(output);
         }
     }
 
     return true;
 }
 
 static UniValue listlockunspent(const Config &config,
                                 const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "listlockunspent",
         "Returns list of temporarily unspendable outputs.\n"
         "See the lockunspent call to lock and unlock transactions for "
         "spending.\n",
         {},
         RPCResult{
             "[\n"
             "  {\n"
             "    \"txid\" : \"transactionid\",     (string) The transaction id "
             "locked\n"
             "    \"vout\" : n                      (numeric) The vout value\n"
             "  }\n"
             "  ,...\n"
             "]\n"},
         RPCExamples{
             "\nList the unspent transactions\n" +
             HelpExampleCli("listunspent", "") +
             "\nLock an unspent transaction\n" +
             HelpExampleCli("lockunspent", "false "
                                           "\"[{\\\"txid\\\":"
                                           "\\\"a08e6907dbbd3d809776dbfc5d82e371"
                                           "b764ed838b5655e72f463568df1aadf0\\\""
                                           ",\\\"vout\\\":1}]\"") +
             "\nList the locked transactions\n" +
             HelpExampleCli("listlockunspent", "") +
             "\nUnlock the transaction again\n" +
             HelpExampleCli("lockunspent", "true "
                                           "\"[{\\\"txid\\\":"
                                           "\\\"a08e6907dbbd3d809776dbfc5d82e371"
                                           "b764ed838b5655e72f463568df1aadf0\\\""
                                           ",\\\"vout\\\":1}]\"") +
             "\nAs a JSON-RPC call\n" + HelpExampleRpc("listlockunspent", "")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     std::vector<COutPoint> vOutpts;
     pwallet->ListLockedCoins(vOutpts);
 
     UniValue ret(UniValue::VARR);
 
     for (const COutPoint &output : vOutpts) {
         UniValue o(UniValue::VOBJ);
 
         o.pushKV("txid", output.GetTxId().GetHex());
         o.pushKV("vout", int(output.GetN()));
         ret.push_back(o);
     }
 
     return ret;
 }
 
 static UniValue settxfee(const Config &config, const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "settxfee",
         "Set the transaction fee per kB for this wallet. Overrides the "
         "global -paytxfee command line parameter.\n",
         {
             {"amount", RPCArg::Type::AMOUNT, RPCArg::Optional::NO,
              "The transaction fee in " + CURRENCY_UNIT + "/kB"},
         },
         RPCResult{"true|false        (boolean) Returns true if successful\n"},
         RPCExamples{HelpExampleCli("settxfee", "0.00001") +
                     HelpExampleRpc("settxfee", "0.00001")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     Amount nAmount = AmountFromValue(request.params[0]);
     CFeeRate tx_fee_rate(nAmount, 1000);
     if (tx_fee_rate == CFeeRate()) {
         // automatic selection
     } else if (tx_fee_rate < pwallet->chain().relayMinFee()) {
         throw JSONRPCError(
             RPC_INVALID_PARAMETER,
             strprintf("txfee cannot be less than min relay tx fee (%s)",
                       pwallet->chain().relayMinFee().ToString()));
     } else if (tx_fee_rate < pwallet->m_min_fee) {
         throw JSONRPCError(
             RPC_INVALID_PARAMETER,
             strprintf("txfee cannot be less than wallet min fee (%s)",
                       pwallet->m_min_fee.ToString()));
     }
 
     pwallet->m_pay_tx_fee = tx_fee_rate;
     return true;
 }
 
 static UniValue getbalances(const Config &config,
                             const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const rpc_wallet =
         GetWalletForJSONRPCRequest(request);
     if (!EnsureWalletIsAvailable(rpc_wallet.get(), request.fHelp)) {
         return NullUniValue;
     }
     CWallet &wallet = *rpc_wallet;
 
     RPCHelpMan{
         "getbalances",
         "Returns an object with all balances in " + CURRENCY_UNIT + ".\n",
         {},
         RPCResult{
             "{\n"
             "    \"mine\": {                        (object) balances from "
             "outputs that the wallet can sign\n"
             "      \"trusted\": xxx                 (numeric) trusted balance "
             "(outputs created by the wallet or confirmed outputs)\n"
             "      \"untrusted_pending\": xxx       (numeric) untrusted "
             "pending balance (outputs created by others that are in the "
             "mempool)\n"
             "      \"immature\": xxx                (numeric) balance from "
             "immature coinbase outputs\n"
             "      \"used\": xxx                    (numeric) (only present if "
             "avoid_reuse is set) balance from coins sent to addresses that "
             "were previously spent from (potentially privacy violating)\n"
             "    },\n"
             "    \"watchonly\": {                   (object) watchonly "
             "balances (not present if wallet does not watch anything)\n"
             "      \"trusted\": xxx                 (numeric) trusted balance "
             "(outputs created by the wallet or confirmed outputs)\n"
             "      \"untrusted_pending\": xxx       (numeric) untrusted "
             "pending balance (outputs created by others that are in the "
             "mempool)\n"
             "      \"immature\": xxx                (numeric) balance from "
             "immature coinbase outputs\n"
             "    },\n"
             "}\n"},
         RPCExamples{HelpExampleCli("getbalances", "") +
                     HelpExampleRpc("getbalances", "")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     wallet.BlockUntilSyncedToCurrentChain();
 
     LOCK(wallet.cs_wallet);
 
     UniValue obj(UniValue::VOBJ);
 
     const auto bal = wallet.GetBalance();
     UniValue balances{UniValue::VOBJ};
     {
         UniValue balances_mine{UniValue::VOBJ};
         balances_mine.pushKV("trusted", ValueFromAmount(bal.m_mine_trusted));
         balances_mine.pushKV("untrusted_pending",
                              ValueFromAmount(bal.m_mine_untrusted_pending));
         balances_mine.pushKV("immature", ValueFromAmount(bal.m_mine_immature));
         if (wallet.IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE)) {
             // If the AVOID_REUSE flag is set, bal has been set to just the
             // un-reused address balance. Get the total balance, and then
             // subtract bal to get the reused address balance.
             const auto full_bal = wallet.GetBalance(0, false);
             balances_mine.pushKV(
                 "used", ValueFromAmount(full_bal.m_mine_trusted +
                                         full_bal.m_mine_untrusted_pending -
                                         bal.m_mine_trusted -
                                         bal.m_mine_untrusted_pending));
         }
         balances.pushKV("mine", balances_mine);
     }
     auto spk_man = wallet.GetLegacyScriptPubKeyMan();
     if (spk_man && spk_man->HaveWatchOnly()) {
         UniValue balances_watchonly{UniValue::VOBJ};
         balances_watchonly.pushKV("trusted",
                                   ValueFromAmount(bal.m_watchonly_trusted));
         balances_watchonly.pushKV(
             "untrusted_pending",
             ValueFromAmount(bal.m_watchonly_untrusted_pending));
         balances_watchonly.pushKV("immature",
                                   ValueFromAmount(bal.m_watchonly_immature));
         balances.pushKV("watchonly", balances_watchonly);
     }
     return balances;
 }
 
 static UniValue getwalletinfo(const Config &config,
                               const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "getwalletinfo",
         "Returns an object containing various wallet state info.\n",
         {},
         RPCResult{
             "{\n"
             "  \"walletname\": xxxxx,             (string) the wallet name\n"
             "  \"walletversion\": xxxxx,          (numeric) the wallet "
             "version\n"
             "  \"balance\": xxxxxxx,              (numeric) DEPRECATED. "
             "Identical to getbalances().mine.trusted\n"
             "  \"unconfirmed_balance\": xxx,      (numeric) DEPRECATED. "
             "Identical to getbalances().mine.untrusted_pending\n"
             "  \"immature_balance\": xxxxxx,      (numeric) DEPRECATED. "
             "Identical to getbalances().mine.immature\n"
             "  \"txcount\": xxxxxxx,              (numeric) the total number "
             "of transactions in the wallet\n"
             "  \"keypoololdest\": xxxxxx,           (numeric) the " +
             UNIX_EPOCH_TIME +
             " of the oldest pre-generated key in the key pool\n"
             "  \"keypoolsize\": xxxx,             (numeric) how many new keys "
             "are pre-generated (only counts external keys)\n"
             "  \"keypoolsize_hd_internal\": xxxx, (numeric) how many new keys "
             "are pre-generated for internal use (used for change outputs, only "
             "appears if the wallet is using this feature, otherwise external "
             "keys are used)\n"
             "  \"unlocked_until\": ttt,             (numeric) the " +
             UNIX_EPOCH_TIME +
             " until which the wallet is unlocked for transfers, or 0 if the "
             "wallet is locked\n"
             "  \"paytxfee\": x.xxxx,              (numeric) the transaction "
             "fee configuration, set in " +
             CURRENCY_UNIT +
             "/kB\n"
             "  \"hdseedid\": \"<hash160>\"          (string, optional) the "
             "Hash160 of the HD seed (only present when HD is enabled)\n"
             "  \"private_keys_enabled\": true|false (boolean) false if "
             "privatekeys are disabled for this wallet (enforced watch-only "
             "wallet)\n"
             "  \"scanning\":                        (json object) current "
             "scanning details, or false if no scan is in progress\n"
             "    {\n"
             "      \"duration\" : xxxx              (numeric) elapsed seconds "
             "since scan start\n"
             "      \"progress\" : x.xxxx,           (numeric) scanning "
             "progress percentage [0.0, 1.0]\n"
             "    }\n"
             "  \"avoid_reuse\": true|false          (boolean) whether this "
             "wallet tracks clean/dirty coins in terms of reuse\n"
             "}\n"},
         RPCExamples{HelpExampleCli("getwalletinfo", "") +
                     HelpExampleRpc("getwalletinfo", "")},
     }
         .Check(request);
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     LOCK(pwallet->cs_wallet);
 
     UniValue obj(UniValue::VOBJ);
 
     size_t kpExternalSize = pwallet->KeypoolCountExternalKeys();
     const auto bal = pwallet->GetBalance();
     obj.pushKV("walletname", pwallet->GetName());
     obj.pushKV("walletversion", pwallet->GetVersion());
     obj.pushKV("balance", ValueFromAmount(bal.m_mine_trusted));
     obj.pushKV("unconfirmed_balance",
                ValueFromAmount(bal.m_mine_untrusted_pending));
     obj.pushKV("immature_balance", ValueFromAmount(bal.m_mine_immature));
     obj.pushKV("txcount", (int)pwallet->mapWallet.size());
     obj.pushKV("keypoololdest", pwallet->GetOldestKeyPoolTime());
     obj.pushKV("keypoolsize", (int64_t)kpExternalSize);
 
     LegacyScriptPubKeyMan *spk_man = pwallet->GetLegacyScriptPubKeyMan();
     if (spk_man) {
         CKeyID seed_id = spk_man->GetHDChain().seed_id;
         if (!seed_id.IsNull()) {
             obj.pushKV("hdseedid", seed_id.GetHex());
         }
     }
 
     if (pwallet->CanSupportFeature(FEATURE_HD_SPLIT)) {
         obj.pushKV("keypoolsize_hd_internal",
                    int64_t(pwallet->GetKeyPoolSize() - kpExternalSize));
     }
     if (pwallet->IsCrypted()) {
         obj.pushKV("unlocked_until", pwallet->nRelockTime);
     }
     obj.pushKV("paytxfee", ValueFromAmount(pwallet->m_pay_tx_fee.GetFeePerK()));
     obj.pushKV("private_keys_enabled",
                !pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS));
     if (pwallet->IsScanning()) {
         UniValue scanning(UniValue::VOBJ);
         scanning.pushKV("duration", pwallet->ScanningDuration() / 1000);
         scanning.pushKV("progress", pwallet->ScanningProgress());
         obj.pushKV("scanning", scanning);
     } else {
         obj.pushKV("scanning", false);
     }
     obj.pushKV("avoid_reuse",
                pwallet->IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE));
     return obj;
 }
 
 static UniValue listwalletdir(const Config &config,
                               const JSONRPCRequest &request) {
     RPCHelpMan{
         "listwalletdir",
         "Returns a list of wallets in the wallet directory.\n",
         {},
         RPCResult{
             "{\n"
             "  \"wallets\" : [                (json array of objects)\n"
             "    {\n"
             "      \"name\" : \"name\"          (string) The wallet name\n"
             "    }\n"
             "    ,...\n"
             "  ]\n"
             "}\n"},
         RPCExamples{HelpExampleCli("listwalletdir", "") +
                     HelpExampleRpc("listwalletdir", "")},
     }
         .Check(request);
 
     UniValue wallets(UniValue::VARR);
     for (const auto &path : ListWalletDir()) {
         UniValue wallet(UniValue::VOBJ);
         wallet.pushKV("name", path.string());
         wallets.push_back(wallet);
     }
 
     UniValue result(UniValue::VOBJ);
     result.pushKV("wallets", wallets);
     return result;
 }
 
 static UniValue listwallets(const Config &config,
                             const JSONRPCRequest &request) {
     RPCHelpMan{
         "listwallets",
         "Returns a list of currently loaded wallets.\n"
         "For full information on the wallet, use \"getwalletinfo\"\n",
         {},
         RPCResult{"[                         (json array of strings)\n"
                   "  \"walletname\"            (string) the wallet name\n"
                   "   ...\n"
                   "]\n"},
         RPCExamples{HelpExampleCli("listwallets", "") +
                     HelpExampleRpc("listwallets", "")},
     }
         .Check(request);
 
     UniValue obj(UniValue::VARR);
 
     for (const std::shared_ptr<CWallet> &wallet : GetWallets()) {
         if (!EnsureWalletIsAvailable(wallet.get(), request.fHelp)) {
             return NullUniValue;
         }
 
         LOCK(wallet->cs_wallet);
 
         obj.push_back(wallet->GetName());
     }
 
     return obj;
 }
 
 static UniValue loadwallet(const Config &config,
                            const JSONRPCRequest &request) {
     RPCHelpMan{
         "loadwallet",
         "Loads a wallet from a wallet file or directory."
         "\nNote that all wallet command-line options used when starting "
         "bitcoind will be"
         "\napplied to the new wallet (eg -zapwallettxes, rescan, etc).\n",
         {
             {"filename", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The wallet directory or .dat file."},
         },
         RPCResult{"{\n"
                   "  \"name\" :    <wallet_name>,        (string) The wallet "
                   "name if loaded successfully.\n"
                   "  \"warning\" : <warning>,            (string) Warning "
                   "message if wallet was not loaded cleanly.\n"
                   "}\n"},
         RPCExamples{HelpExampleCli("loadwallet", "\"test.dat\"") +
                     HelpExampleRpc("loadwallet", "\"test.dat\"")},
     }
         .Check(request);
 
     const CChainParams &chainParams = config.GetChainParams();
 
     WalletLocation location(request.params[0].get_str());
 
     if (!location.Exists()) {
         throw JSONRPCError(RPC_WALLET_NOT_FOUND,
                            "Wallet " + location.GetName() + " not found.");
     } else if (fs::is_directory(location.GetPath())) {
         // The given filename is a directory. Check that there's a wallet.dat
         // file.
         fs::path wallet_dat_file = location.GetPath() / "wallet.dat";
         if (fs::symlink_status(wallet_dat_file).type() == fs::file_not_found) {
             throw JSONRPCError(RPC_WALLET_NOT_FOUND,
                                "Directory " + location.GetName() +
                                    " does not contain a wallet.dat file.");
         }
     }
 
     bilingual_str error;
     std::vector<bilingual_str> warnings;
     std::shared_ptr<CWallet> const wallet =
         LoadWallet(chainParams, *g_rpc_chain, location, error, warnings);
     if (!wallet) {
         throw JSONRPCError(RPC_WALLET_ERROR, error.original);
     }
 
     UniValue obj(UniValue::VOBJ);
     obj.pushKV("name", wallet->GetName());
     obj.pushKV("warning", Join(warnings, "\n", OpOriginal));
 
     return obj;
 }
 
 static UniValue setwalletflag(const Config &config,
                               const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     std::string flags = "";
     for (auto &it : WALLET_FLAG_MAP) {
         if (it.second & MUTABLE_WALLET_FLAGS) {
             flags += (flags == "" ? "" : ", ") + it.first;
         }
     }
     RPCHelpMan{
         "setwalletflag",
         "Change the state of the given wallet flag for a wallet.\n",
         {
             {"flag", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The name of the flag to change. Current available flags: " +
                  flags},
             {"value", RPCArg::Type::BOOL, /* default */ "true",
              "The new state."},
         },
         RPCResult{
             "{\n"
             "    \"flag_name\": string   (string) The name of the flag that "
             "was modified\n"
             "    \"flag_state\": bool    (bool) The new state of the flag\n"
             "    \"warnings\": string    (string) Any warnings associated with "
             "the change\n"
             "}\n"},
         RPCExamples{HelpExampleCli("setwalletflag", "avoid_reuse") +
                     HelpExampleRpc("setwalletflag", "\"avoid_reuse\"")},
     }
         .Check(request);
 
     std::string flag_str = request.params[0].get_str();
     bool value = request.params[1].isNull() || request.params[1].get_bool();
 
     if (!WALLET_FLAG_MAP.count(flag_str)) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            strprintf("Unknown wallet flag: %s", flag_str));
     }
 
     auto flag = WALLET_FLAG_MAP.at(flag_str);
 
     if (!(flag & MUTABLE_WALLET_FLAGS)) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            strprintf("Wallet flag is immutable: %s", flag_str));
     }
 
     UniValue res(UniValue::VOBJ);
 
     if (pwallet->IsWalletFlagSet(flag) == value) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            strprintf("Wallet flag is already set to %s: %s",
                                      value ? "true" : "false", flag_str));
     }
 
     res.pushKV("flag_name", flag_str);
     res.pushKV("flag_state", value);
 
     if (value) {
         pwallet->SetWalletFlag(flag);
     } else {
         pwallet->UnsetWalletFlag(flag);
     }
 
     if (flag && value && WALLET_FLAG_CAVEATS.count(flag)) {
         res.pushKV("warnings", WALLET_FLAG_CAVEATS.at(flag));
     }
 
     return res;
 }
 
 static UniValue createwallet(const Config &config,
                              const JSONRPCRequest &request) {
     RPCHelpMan{
         "createwallet",
         "Creates and loads a new wallet.\n",
         {
             {"wallet_name", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The name for the new wallet. If this is a path, the wallet will "
              "be created at the path location."},
             {"disable_private_keys", RPCArg::Type::BOOL, /* default */ "false",
              "Disable the possibility of private keys (only watchonlys are "
              "possible in this mode)."},
             {"blank", RPCArg::Type::BOOL, /* default */ "false",
              "Create a blank wallet. A blank wallet has no keys or HD seed. "
              "One can be set using sethdseed."},
             {"passphrase", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
              "Encrypt the wallet with this passphrase."},
             {"avoid_reuse", RPCArg::Type::BOOL, /* default */ "false",
              "Keep track of coin reuse, and treat dirty and clean coins "
              "differently with privacy considerations in mind."},
         },
         RPCResult{"{\n"
                   "  \"name\" :    <wallet_name>,        (string) The wallet "
                   "name if created successfully. If the wallet was created "
                   "using a full path, the wallet_name will be the full path.\n"
                   "  \"warning\" : <warning>,            (string) Warning "
                   "message if wallet was not loaded cleanly.\n"
                   "}\n"},
         RPCExamples{HelpExampleCli("createwallet", "\"testwallet\"") +
                     HelpExampleRpc("createwallet", "\"testwallet\"")},
     }
         .Check(request);
 
     uint64_t flags = 0;
     if (!request.params[1].isNull() && request.params[1].get_bool()) {
         flags |= WALLET_FLAG_DISABLE_PRIVATE_KEYS;
     }
 
     if (!request.params[2].isNull() && request.params[2].get_bool()) {
         flags |= WALLET_FLAG_BLANK_WALLET;
     }
 
     SecureString passphrase;
     passphrase.reserve(100);
     std::vector<bilingual_str> warnings;
     if (!request.params[3].isNull()) {
         passphrase = request.params[3].get_str().c_str();
         if (passphrase.empty()) {
             // Empty string means unencrypted
             warnings.emplace_back(
                 Untranslated("Empty string given as passphrase, wallet will "
                              "not be encrypted."));
         }
     }
 
     if (!request.params[4].isNull() && request.params[4].get_bool()) {
         flags |= WALLET_FLAG_AVOID_REUSE;
     }
 
     bilingual_str error;
     std::shared_ptr<CWallet> wallet;
     WalletCreationStatus status =
         CreateWallet(config.GetChainParams(), *g_rpc_chain, passphrase, flags,
                      request.params[0].get_str(), error, warnings, wallet);
     switch (status) {
         case WalletCreationStatus::CREATION_FAILED:
             throw JSONRPCError(RPC_WALLET_ERROR, error.original);
         case WalletCreationStatus::ENCRYPTION_FAILED:
             throw JSONRPCError(RPC_WALLET_ENCRYPTION_FAILED, error.original);
         case WalletCreationStatus::SUCCESS:
             break;
             // no default case, so the compiler can warn about missing cases
     }
 
     UniValue obj(UniValue::VOBJ);
     obj.pushKV("name", wallet->GetName());
     obj.pushKV("warning", Join(warnings, "\n", OpOriginal));
 
     return obj;
 }
 
 static UniValue unloadwallet(const Config &config,
                              const JSONRPCRequest &request) {
     RPCHelpMan{
         "unloadwallet",
         "Unloads the wallet referenced by the request endpoint otherwise "
         "unloads the wallet specified in the argument.\n"
         "Specifying the wallet name on a wallet endpoint is invalid.",
         {
             {"wallet_name", RPCArg::Type::STR,
              /* default */ "the wallet name from the RPC request",
              "The name of the wallet to unload."},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("unloadwallet", "wallet_name") +
                     HelpExampleRpc("unloadwallet", "wallet_name")},
     }
         .Check(request);
 
     std::string wallet_name;
     if (GetWalletNameFromJSONRPCRequest(request, wallet_name)) {
         if (!request.params[0].isNull()) {
             throw JSONRPCError(RPC_INVALID_PARAMETER,
                                "Cannot unload the requested wallet");
         }
     } else {
         wallet_name = request.params[0].get_str();
     }
 
     std::shared_ptr<CWallet> wallet = GetWallet(wallet_name);
     if (!wallet) {
         throw JSONRPCError(RPC_WALLET_NOT_FOUND,
                            "Requested wallet does not exist or is not loaded");
     }
 
     // Release the "main" shared pointer and prevent further notifications.
     // Note that any attempt to load the same wallet would fail until the wallet
     // is destroyed (see CheckUniqueFileid).
     if (!RemoveWallet(wallet)) {
         throw JSONRPCError(RPC_MISC_ERROR, "Requested wallet already unloaded");
     }
 
     UnloadWallet(std::move(wallet));
 
     return NullUniValue;
 }
 
 static UniValue listunspent(const Config &config,
                             const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "listunspent",
         "Returns array of unspent transaction outputs\n"
         "with between minconf and maxconf (inclusive) confirmations.\n"
         "Optionally filter to only include txouts paid to specified "
         "addresses.\n",
         {
             {"minconf", RPCArg::Type::NUM, /* default */ "1",
              "The minimum confirmations to filter"},
             {"maxconf", RPCArg::Type::NUM, /* default */ "9999999",
              "The maximum confirmations to filter"},
             {
                 "addresses",
                 RPCArg::Type::ARR,
                 /* default */ "empty array",
                 "A json array of bitcoin addresses to filter",
                 {
                     {"address", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
                      "bitcoin address"},
                 },
             },
             {"include_unsafe", RPCArg::Type::BOOL, /* default */ "true",
              "Include outputs that are not safe to spend\n"
              "                  See description of \"safe\" attribute below."},
             {"query_options",
              RPCArg::Type::OBJ,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "JSON with query options",
              {
                  {"minimumAmount", RPCArg::Type::AMOUNT, /* default */ "0",
                   "Minimum value of each UTXO in " + CURRENCY_UNIT + ""},
                  {"maximumAmount", RPCArg::Type::AMOUNT,
                   /* default */ "unlimited",
                   "Maximum value of each UTXO in " + CURRENCY_UNIT + ""},
                  {"maximumCount", RPCArg::Type::NUM, /* default */ "unlimited",
                   "Maximum number of UTXOs"},
                  {"minimumSumAmount", RPCArg::Type::AMOUNT,
                   /* default */ "unlimited",
                   "Minimum sum value of all UTXOs in " + CURRENCY_UNIT + ""},
              },
              "query_options"},
         },
         RPCResult{
             "[                   (array of json object)\n"
             "  {\n"
             "    \"txid\" : \"txid\",        (string) the transaction id\n"
             "    \"vout\" : n,               (numeric) the vout value\n"
             "    \"address\" : \"address\",  (string) the bitcoin address\n"
             "    \"label\" : \"label\",      (string) The associated label, or "
             "\"\" for the default label\n"
             "    \"scriptPubKey\" : \"key\", (string) the script key\n"
             "    \"amount\" : x.xxx,         (numeric) the transaction output "
             "amount in " +
             CURRENCY_UNIT +
             "\n"
             "    \"confirmations\" : n,      (numeric) The number of "
             "confirmations\n"
             "    \"redeemScript\" : n        (string) The redeemScript if "
             "scriptPubKey is P2SH\n"
             "    \"spendable\" : xxx,        (bool) Whether we have the "
             "private keys to spend this output\n"
             "    \"solvable\" : xxx,         (bool) Whether we know how to "
             "spend this output, ignoring the lack of keys\n"
             "    \"reused\" : xxx,           (bool) (only present if "
             "avoid_reuse is set) Whether this output is reused/dirty (sent to "
             "an address that was previously spent from)\n"
             "    \"desc\" : xxx,             (string, only when solvable) A "
             "descriptor for spending this output\n"
             "    \"safe\" : xxx              (bool) Whether this output is "
             "considered safe to spend. Unconfirmed transactions\n"
             "                              from outside keys are considered "
             "unsafe and are not eligible for spending by\n"
             "                              fundrawtransaction and "
             "sendtoaddress.\n"
             "  }\n"
             "  ,...\n"
             "]\n"},
         RPCExamples{
             HelpExampleCli("listunspent", "") +
             HelpExampleCli("listunspent",
                            "6 9999999 "
                            "\"[\\\"1PGFqEzfmQch1gKD3ra4k18PNj3tTUUSqg\\\","
                            "\\\"1LtvqCaApEdUGFkpKMM4MstjcaL4dKg8SP\\\"]\"") +
             HelpExampleRpc("listunspent",
                            "6, 9999999 "
                            "\"[\\\"1PGFqEzfmQch1gKD3ra4k18PNj3tTUUSqg\\\","
                            "\\\"1LtvqCaApEdUGFkpKMM4MstjcaL4dKg8SP\\\"]\"") +
             HelpExampleCli(
                 "listunspent",
                 "6 9999999 '[]' true '{ \"minimumAmount\": 0.005 }'") +
             HelpExampleRpc(
                 "listunspent",
                 "6, 9999999, [] , true, { \"minimumAmount\": 0.005 } ")},
     }
         .Check(request);
 
     int nMinDepth = 1;
     if (!request.params[0].isNull()) {
         RPCTypeCheckArgument(request.params[0], UniValue::VNUM);
         nMinDepth = request.params[0].get_int();
     }
 
     int nMaxDepth = 9999999;
     if (!request.params[1].isNull()) {
         RPCTypeCheckArgument(request.params[1], UniValue::VNUM);
         nMaxDepth = request.params[1].get_int();
     }
 
     std::set<CTxDestination> destinations;
     if (!request.params[2].isNull()) {
         RPCTypeCheckArgument(request.params[2], UniValue::VARR);
         UniValue inputs = request.params[2].get_array();
         for (size_t idx = 0; idx < inputs.size(); idx++) {
             const UniValue &input = inputs[idx];
             CTxDestination dest =
                 DecodeDestination(input.get_str(), wallet->chainParams);
             if (!IsValidDestination(dest)) {
                 throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                    std::string("Invalid Bitcoin address: ") +
                                        input.get_str());
             }
             if (!destinations.insert(dest).second) {
                 throw JSONRPCError(
                     RPC_INVALID_PARAMETER,
                     std::string("Invalid parameter, duplicated address: ") +
                         input.get_str());
             }
         }
     }
 
     bool include_unsafe = true;
     if (!request.params[3].isNull()) {
         RPCTypeCheckArgument(request.params[3], UniValue::VBOOL);
         include_unsafe = request.params[3].get_bool();
     }
 
     Amount nMinimumAmount = Amount::zero();
     Amount nMaximumAmount = MAX_MONEY;
     Amount nMinimumSumAmount = MAX_MONEY;
     uint64_t nMaximumCount = 0;
 
     if (!request.params[4].isNull()) {
         const UniValue &options = request.params[4].get_obj();
 
         if (options.exists("minimumAmount")) {
             nMinimumAmount = AmountFromValue(options["minimumAmount"]);
         }
 
         if (options.exists("maximumAmount")) {
             nMaximumAmount = AmountFromValue(options["maximumAmount"]);
         }
 
         if (options.exists("minimumSumAmount")) {
             nMinimumSumAmount = AmountFromValue(options["minimumSumAmount"]);
         }
 
         if (options.exists("maximumCount")) {
             nMaximumCount = options["maximumCount"].get_int64();
         }
     }
 
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     UniValue results(UniValue::VARR);
     std::vector<COutput> vecOutputs;
     {
         CCoinControl cctl;
         cctl.m_avoid_address_reuse = false;
         cctl.m_min_depth = nMinDepth;
         cctl.m_max_depth = nMaxDepth;
         LOCK(pwallet->cs_wallet);
         pwallet->AvailableCoins(vecOutputs, !include_unsafe, &cctl,
                                 nMinimumAmount, nMaximumAmount,
                                 nMinimumSumAmount, nMaximumCount);
     }
 
     LOCK(pwallet->cs_wallet);
 
     const bool avoid_reuse = pwallet->IsWalletFlagSet(WALLET_FLAG_AVOID_REUSE);
 
     for (const COutput &out : vecOutputs) {
         CTxDestination address;
         const CScript &scriptPubKey = out.tx->tx->vout[out.i].scriptPubKey;
         bool fValidAddress = ExtractDestination(scriptPubKey, address);
         bool reused =
             avoid_reuse && pwallet->IsUsedDestination(out.tx->GetId(), out.i);
 
         if (destinations.size() &&
             (!fValidAddress || !destinations.count(address))) {
             continue;
         }
 
         UniValue entry(UniValue::VOBJ);
         entry.pushKV("txid", out.tx->GetId().GetHex());
         entry.pushKV("vout", out.i);
 
         if (fValidAddress) {
             entry.pushKV("address", EncodeDestination(address, config));
 
             const auto *address_book_entry =
                 pwallet->FindAddressBookEntry(address);
             if (address_book_entry) {
                 entry.pushKV("label", address_book_entry->GetLabel());
             }
 
             std::unique_ptr<SigningProvider> provider =
                 pwallet->GetSigningProvider(scriptPubKey);
             if (provider) {
                 if (scriptPubKey.IsPayToScriptHash()) {
                     const CScriptID &hash =
                         CScriptID(boost::get<ScriptHash>(address));
                     CScript redeemScript;
                     if (provider->GetCScript(hash, redeemScript)) {
                         entry.pushKV(
                             "redeemScript",
                             HexStr(redeemScript.begin(), redeemScript.end()));
                     }
                 }
             }
         }
 
         entry.pushKV("scriptPubKey",
                      HexStr(scriptPubKey.begin(), scriptPubKey.end()));
         entry.pushKV("amount", ValueFromAmount(out.tx->tx->vout[out.i].nValue));
         entry.pushKV("confirmations", out.nDepth);
         entry.pushKV("spendable", out.fSpendable);
         entry.pushKV("solvable", out.fSolvable);
         if (out.fSolvable) {
             std::unique_ptr<SigningProvider> provider =
                 pwallet->GetSigningProvider(scriptPubKey);
             if (provider) {
                 auto descriptor = InferDescriptor(scriptPubKey, *provider);
                 entry.pushKV("desc", descriptor->ToString());
             }
         }
         if (avoid_reuse) {
             entry.pushKV("reused", reused);
         }
         entry.pushKV("safe", out.fSafe);
         results.push_back(entry);
     }
 
     return results;
 }
 
 void FundTransaction(CWallet *const pwallet, CMutableTransaction &tx,
                      Amount &fee_out, int &change_position, UniValue options) {
     // Make sure the results are valid at least up to the most recent block
     // the user could have gotten from another RPC command prior to now
     pwallet->BlockUntilSyncedToCurrentChain();
 
     CCoinControl coinControl;
     change_position = -1;
     bool lockUnspents = false;
     UniValue subtractFeeFromOutputs;
     std::set<int> setSubtractFeeFromOutputs;
 
     if (!options.isNull()) {
         if (options.type() == UniValue::VBOOL) {
             // backward compatibility bool only fallback
             coinControl.fAllowWatchOnly = options.get_bool();
         } else {
             RPCTypeCheckArgument(options, UniValue::VOBJ);
             RPCTypeCheckObj(
                 options,
                 {
                     {"changeAddress", UniValueType(UniValue::VSTR)},
                     {"changePosition", UniValueType(UniValue::VNUM)},
                     {"includeWatching", UniValueType(UniValue::VBOOL)},
                     {"lockUnspents", UniValueType(UniValue::VBOOL)},
                     // will be checked below
                     {"feeRate", UniValueType()},
                     {"subtractFeeFromOutputs", UniValueType(UniValue::VARR)},
                 },
                 true, true);
 
             if (options.exists("changeAddress")) {
                 CTxDestination dest = DecodeDestination(
                     options["changeAddress"].get_str(), pwallet->chainParams);
 
                 if (!IsValidDestination(dest)) {
                     throw JSONRPCError(
                         RPC_INVALID_ADDRESS_OR_KEY,
                         "changeAddress must be a valid bitcoin address");
                 }
 
                 coinControl.destChange = dest;
             }
 
             if (options.exists("changePosition")) {
                 change_position = options["changePosition"].get_int();
             }
 
             coinControl.fAllowWatchOnly =
                 ParseIncludeWatchonly(options["includeWatching"], *pwallet);
 
             if (options.exists("lockUnspents")) {
                 lockUnspents = options["lockUnspents"].get_bool();
             }
 
             if (options.exists("feeRate")) {
                 coinControl.m_feerate =
                     CFeeRate(AmountFromValue(options["feeRate"]));
                 coinControl.fOverrideFeeRate = true;
             }
 
             if (options.exists("subtractFeeFromOutputs")) {
                 subtractFeeFromOutputs =
                     options["subtractFeeFromOutputs"].get_array();
             }
         }
     } else {
         // if options is null and not a bool
         coinControl.fAllowWatchOnly =
             ParseIncludeWatchonly(NullUniValue, *pwallet);
     }
 
     if (tx.vout.size() == 0) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "TX must have at least one output");
     }
 
     if (change_position != -1 &&
         (change_position < 0 ||
          (unsigned int)change_position > tx.vout.size())) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "changePosition out of bounds");
     }
 
     for (size_t idx = 0; idx < subtractFeeFromOutputs.size(); idx++) {
         int pos = subtractFeeFromOutputs[idx].get_int();
         if (setSubtractFeeFromOutputs.count(pos)) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 strprintf("Invalid parameter, duplicated position: %d", pos));
         }
         if (pos < 0) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 strprintf("Invalid parameter, negative position: %d", pos));
         }
         if (pos >= int(tx.vout.size())) {
             throw JSONRPCError(
                 RPC_INVALID_PARAMETER,
                 strprintf("Invalid parameter, position too large: %d", pos));
         }
         setSubtractFeeFromOutputs.insert(pos);
     }
 
     bilingual_str error;
 
     if (!pwallet->FundTransaction(tx, fee_out, change_position, error,
                                   lockUnspents, setSubtractFeeFromOutputs,
                                   coinControl)) {
         throw JSONRPCError(RPC_WALLET_ERROR, error.original);
     }
 }
 
 static UniValue fundrawtransaction(const Config &config,
                                    const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "fundrawtransaction",
         "Add inputs to a transaction until it has enough in value to meet "
         "its out value.\n"
         "This will not modify existing inputs, and will add at most one change "
         "output to the outputs.\n"
         "No existing outputs will be modified unless "
         "\"subtractFeeFromOutputs\" is specified.\n"
         "Note that inputs which were signed may need to be resigned after "
         "completion since in/outputs have been added.\n"
         "The inputs added will not be signed, use signrawtransactionwithkey or "
         "signrawtransactionwithwallet for that.\n"
         "Note that all existing inputs must have their previous output "
         "transaction be in the wallet.\n"
         "Note that all inputs selected must be of standard form and P2SH "
         "scripts must be\n"
         "in the wallet using importaddress or addmultisigaddress (to calculate "
         "fees).\n"
         "You can see whether this is the case by checking the \"solvable\" "
         "field in the listunspent output.\n"
         "Only pay-to-pubkey, multisig, and P2SH versions thereof are currently "
         "supported for watch-only\n",
         {
             {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
              "The hex string of the raw transaction"},
             {"options",
              RPCArg::Type::OBJ,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "for backward compatibility: passing in a true instead of an "
              "object will result in {\"includeWatching\":true}",
              {
                  {"changeAddress", RPCArg::Type::STR,
                   /* default */ "pool address",
                   "The bitcoin address to receive the change"},
                  {"changePosition", RPCArg::Type::NUM, /* default */ "",
                   "The index of the change output"},
                  {"includeWatching", RPCArg::Type::BOOL,
                   /* default */ "true for watch-only wallets, otherwise false",
                   "Also select inputs which are watch only.\n"
                   "Only solvable inputs can be used. Watch-only destinations "
                   "are solvable if the public key and/or output script was "
                   "imported,\n"
                   "e.g. with 'importpubkey' or 'importmulti' with the "
                   "'pubkeys' or 'desc' field."},
                  {"lockUnspents", RPCArg::Type::BOOL, /* default */ "false",
                   "Lock selected unspent outputs"},
                  {"feeRate", RPCArg::Type::AMOUNT, /* default */
                   "not set: makes wallet determine the fee",
                   "Set a specific fee rate in " + CURRENCY_UNIT + "/kB"},
                  {
                      "subtractFeeFromOutputs",
                      RPCArg::Type::ARR,
                      /* default */ "empty array",
                      "A json array of integers.\n"
                      "                              The fee will be equally "
                      "deducted from the amount of each specified output.\n"
                      "                              Those recipients will "
                      "receive less bitcoins than you enter in their "
                      "corresponding amount field.\n"
                      "                              If no outputs are "
                      "specified here, the sender pays the fee.",
                      {
                          {"vout_index", RPCArg::Type::NUM,
                           RPCArg::Optional::OMITTED,
                           "The zero-based output index, before a change output "
                           "is added."},
                      },
                  },
              },
              "options"},
         },
         RPCResult{"{\n"
                   "  \"hex\":       \"value\", (string)  The resulting raw "
                   "transaction (hex-encoded string)\n"
                   "  \"fee\":       n,         (numeric) Fee in " +
                   CURRENCY_UNIT +
                   " the resulting transaction pays\n"
                   "  \"changepos\": n          (numeric) The position of the "
                   "added change output, or -1\n"
                   "}\n"},
         RPCExamples{
             "\nCreate a transaction with no inputs\n" +
             HelpExampleCli("createrawtransaction",
                            "\"[]\" \"{\\\"myaddress\\\":0.01}\"") +
             "\nAdd sufficient unsigned inputs to meet the output value\n" +
             HelpExampleCli("fundrawtransaction", "\"rawtransactionhex\"") +
             "\nSign the transaction\n" +
             HelpExampleCli("signrawtransactionwithwallet",
                            "\"fundedtransactionhex\"") +
             "\nSend the transaction\n" +
             HelpExampleCli("sendrawtransaction", "\"signedtransactionhex\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VSTR, UniValueType()});
 
     // parse hex string from parameter
     CMutableTransaction tx;
     if (!DecodeHexTx(tx, request.params[0].get_str())) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
     }
 
     Amount fee;
     int change_position;
     FundTransaction(pwallet, tx, fee, change_position, request.params[1]);
 
     UniValue result(UniValue::VOBJ);
     result.pushKV("hex", EncodeHexTx(CTransaction(tx)));
     result.pushKV("fee", ValueFromAmount(fee));
     result.pushKV("changepos", change_position);
 
     return result;
 }
 
 UniValue signrawtransactionwithwallet(const Config &config,
                                       const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "signrawtransactionwithwallet",
         "Sign inputs for raw transaction (serialized, hex-encoded).\n"
         "The second optional argument (may be null) is an array of previous "
         "transaction outputs that\n"
         "this transaction depends on but may not yet be in the block chain.\n" +
             HelpRequiringPassphrase(pwallet) + "\n",
         {
             {"hexstring", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The transaction hex string"},
             {
                 "prevtxs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::OMITTED_NAMED_ARG,
                 "A json array of previous dependent transaction outputs",
                 {
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"txid", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO, "The transaction id"},
                             {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO,
                              "The output number"},
                             {"scriptPubKey", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO, "script key"},
                             {"redeemScript", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::OMITTED, "(required for P2SH)"},
                             {"amount", RPCArg::Type::AMOUNT,
                              RPCArg::Optional::NO, "The amount spent"},
                         },
                     },
                 },
             },
             {"sighashtype", RPCArg::Type::STR, /* default */ "ALL|FORKID",
              "The signature hash type. Must be one of\n"
              "       \"ALL|FORKID\"\n"
              "       \"NONE|FORKID\"\n"
              "       \"SINGLE|FORKID\"\n"
              "       \"ALL|FORKID|ANYONECANPAY\"\n"
              "       \"NONE|FORKID|ANYONECANPAY\"\n"
              "       \"SINGLE|FORKID|ANYONECANPAY\""},
         },
         RPCResult{"{\n"
                   "  \"hex\" : \"value\",          (string) The hex-encoded "
                   "raw transaction with signature(s)\n"
                   "  \"complete\" : true|false,    (boolean) If the "
                   "transaction has a complete set of signatures\n"
                   "  \"errors\" : [                (json array of objects) "
                   "Script verification errors (if there are any)\n"
                   "    {\n"
                   "      \"txid\" : \"hash\",        (string) The hash of the "
                   "referenced, previous transaction\n"
                   "      \"vout\" : n,               (numeric) The index of "
                   "the output to spent and used as input\n"
                   "      \"scriptSig\" : \"hex\",    (string) The hex-encoded "
                   "signature script\n"
                   "      \"sequence\" : n,           (numeric) Script sequence "
                   "number\n"
                   "      \"error\" : \"text\"        (string) Verification or "
                   "signing error related to the input\n"
                   "    }\n"
                   "    ,...\n"
                   "  ]\n"
                   "}\n"},
         RPCExamples{
             HelpExampleCli("signrawtransactionwithwallet", "\"myhex\"") +
             HelpExampleRpc("signrawtransactionwithwallet", "\"myhex\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params,
                  {UniValue::VSTR, UniValue::VARR, UniValue::VSTR}, true);
 
     CMutableTransaction mtx;
     if (!DecodeHexTx(mtx, request.params[0].get_str())) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed");
     }
 
     // Sign the transaction
     LOCK(pwallet->cs_wallet);
     EnsureWalletIsUnlocked(pwallet);
 
     // Fetch previous transactions (inputs):
     std::map<COutPoint, Coin> coins;
     for (const CTxIn &txin : mtx.vin) {
         // Create empty map entry keyed by prevout.
         coins[txin.prevout];
     }
     pwallet->chain().findCoins(coins);
 
     // Parse the prevtxs array
     ParsePrevouts(request.params[1], nullptr, coins);
 
     std::set<std::shared_ptr<SigningProvider>> providers;
     for (const std::pair<COutPoint, Coin> coin_pair : coins) {
         std::unique_ptr<SigningProvider> provider = pwallet->GetSigningProvider(
             coin_pair.second.GetTxOut().scriptPubKey);
         if (provider) {
             providers.insert(std::move(provider));
         }
     }
     if (providers.size() == 0) {
         // When there are no available providers, use a dummy SigningProvider so
         // we can check if the tx is complete
         providers.insert(std::make_shared<SigningProvider>());
     }
 
     UniValue result(UniValue::VOBJ);
     for (std::shared_ptr<SigningProvider> provider : providers) {
         SignTransaction(mtx, provider.get(), coins, request.params[2], result);
     }
     return result;
 }
 
 UniValue rescanblockchain(const Config &config, const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "rescanblockchain",
         "Rescan the local blockchain for wallet related transactions.\n"
         "Note: Use \"getwalletinfo\" to query the scanning progress.\n",
         {
             {"start_height", RPCArg::Type::NUM, /* default */ "0",
              "block height where the rescan should start"},
             {"stop_height", RPCArg::Type::NUM,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "the last block height that should be scanned"},
         },
         RPCResult{"{\n"
                   "  \"start_height\"     (numeric) The block height where the "
                   "rescan started (the requested height or 0\n"
                   "  \"stop_height\"      (numeric) The height of the last "
                   "rescanned block. May be null in rare cases if there was a "
                   "reorg and the call didn't scan any blocks because they were "
                   "already scanned in the background.\n"
                   "}\n"},
         RPCExamples{HelpExampleCli("rescanblockchain", "100000 120000") +
                     HelpExampleRpc("rescanblockchain", "100000, 120000")},
     }
         .Check(request);
 
     WalletRescanReserver reserver(*pwallet);
     if (!reserver.reserve()) {
         throw JSONRPCError(
             RPC_WALLET_ERROR,
             "Wallet is currently rescanning. Abort existing rescan or wait.");
     }
 
     int start_height = 0;
     Optional<int> stop_height;
     BlockHash start_block;
     {
         LOCK(pwallet->cs_wallet);
         int tip_height = pwallet->GetLastBlockHeight();
 
         if (!request.params[0].isNull()) {
             start_height = request.params[0].get_int();
             if (start_height < 0 || start_height > tip_height) {
                 throw JSONRPCError(RPC_INVALID_PARAMETER,
                                    "Invalid start_height");
             }
         }
 
         if (!request.params[1].isNull()) {
             stop_height = request.params[1].get_int();
             if (*stop_height < 0 || *stop_height > tip_height) {
                 throw JSONRPCError(RPC_INVALID_PARAMETER,
                                    "Invalid stop_height");
             } else if (*stop_height < start_height) {
                 throw JSONRPCError(
                     RPC_INVALID_PARAMETER,
                     "stop_height must be greater than start_height");
             }
         }
 
         // We can't rescan beyond non-pruned blocks, stop and throw an error
         if (!pwallet->chain().hasBlocks(pwallet->GetLastBlockHash(),
                                         start_height, stop_height)) {
             throw JSONRPCError(
                 RPC_MISC_ERROR,
                 "Can't rescan beyond pruned data. Use RPC call "
                 "getblockchaininfo to determine your pruned height.");
         }
 
         CHECK_NONFATAL(pwallet->chain().findAncestorByHeight(
             pwallet->GetLastBlockHash(), start_height,
             FoundBlock().hash(start_block)));
     }
 
     CWallet::ScanResult result = pwallet->ScanForWalletTransactions(
         start_block, start_height, stop_height, reserver, true /* fUpdate */);
     switch (result.status) {
         case CWallet::ScanResult::SUCCESS:
             break;
         case CWallet::ScanResult::FAILURE:
             throw JSONRPCError(
                 RPC_MISC_ERROR,
                 "Rescan failed. Potentially corrupted data files.");
         case CWallet::ScanResult::USER_ABORT:
             throw JSONRPCError(RPC_MISC_ERROR, "Rescan aborted.");
             // no default case, so the compiler can warn about missing cases
     }
     UniValue response(UniValue::VOBJ);
     response.pushKV("start_height", start_height);
     response.pushKV("stop_height", result.last_scanned_height
                                        ? *result.last_scanned_height
                                        : UniValue());
     return response;
 }
 
 class DescribeWalletAddressVisitor : public boost::static_visitor<UniValue> {
 public:
     const SigningProvider *const provider;
 
     void ProcessSubScript(const CScript &subscript, UniValue &obj) const {
         // Always present: script type and redeemscript
         std::vector<std::vector<uint8_t>> solutions_data;
         txnouttype which_type = Solver(subscript, solutions_data);
         obj.pushKV("script", GetTxnOutputType(which_type));
         obj.pushKV("hex", HexStr(subscript.begin(), subscript.end()));
 
         CTxDestination embedded;
         if (ExtractDestination(subscript, embedded)) {
             // Only when the script corresponds to an address.
             UniValue subobj(UniValue::VOBJ);
             UniValue detail = DescribeAddress(embedded);
             subobj.pushKVs(detail);
             UniValue wallet_detail = boost::apply_visitor(*this, embedded);
             subobj.pushKVs(wallet_detail);
             subobj.pushKV("address", EncodeDestination(embedded, GetConfig()));
             subobj.pushKV("scriptPubKey",
                           HexStr(subscript.begin(), subscript.end()));
             // Always report the pubkey at the top level, so that
             // `getnewaddress()['pubkey']` always works.
             if (subobj.exists("pubkey")) {
                 obj.pushKV("pubkey", subobj["pubkey"]);
             }
             obj.pushKV("embedded", std::move(subobj));
         } else if (which_type == TX_MULTISIG) {
             // Also report some information on multisig scripts (which do not
             // have a corresponding address).
             // TODO: abstract out the common functionality between this logic
             // and ExtractDestinations.
             obj.pushKV("sigsrequired", solutions_data[0][0]);
             UniValue pubkeys(UniValue::VARR);
             for (size_t i = 1; i < solutions_data.size() - 1; ++i) {
                 CPubKey key(solutions_data[i].begin(), solutions_data[i].end());
                 pubkeys.push_back(HexStr(key.begin(), key.end()));
             }
             obj.pushKV("pubkeys", std::move(pubkeys));
         }
     }
 
     explicit DescribeWalletAddressVisitor(const SigningProvider *_provider)
         : provider(_provider) {}
 
     UniValue operator()(const CNoDestination &dest) const {
         return UniValue(UniValue::VOBJ);
     }
 
     UniValue operator()(const PKHash &pkhash) const {
         CKeyID keyID(pkhash);
         UniValue obj(UniValue::VOBJ);
         CPubKey vchPubKey;
         if (provider && provider->GetPubKey(keyID, vchPubKey)) {
             obj.pushKV("pubkey", HexStr(vchPubKey));
             obj.pushKV("iscompressed", vchPubKey.IsCompressed());
         }
         return obj;
     }
 
     UniValue operator()(const ScriptHash &scripthash) const {
         CScriptID scriptID(scripthash);
         UniValue obj(UniValue::VOBJ);
         CScript subscript;
         if (provider && provider->GetCScript(scriptID, subscript)) {
             ProcessSubScript(subscript, obj);
         }
         return obj;
     }
 };
 
 static UniValue DescribeWalletAddress(CWallet *pwallet,
                                       const CTxDestination &dest) {
     UniValue ret(UniValue::VOBJ);
     UniValue detail = DescribeAddress(dest);
     CScript script = GetScriptForDestination(dest);
     std::unique_ptr<SigningProvider> provider = nullptr;
     if (pwallet) {
         provider = pwallet->GetSigningProvider(script);
     }
     ret.pushKVs(detail);
     ret.pushKVs(boost::apply_visitor(
         DescribeWalletAddressVisitor(provider.get()), dest));
     return ret;
 }
 
 /** Convert CAddressBookData to JSON record.  */
 static UniValue AddressBookDataToJSON(const CAddressBookData &data,
                                       const bool verbose) {
     UniValue ret(UniValue::VOBJ);
     if (verbose) {
         ret.pushKV("name", data.GetLabel());
     }
     ret.pushKV("purpose", data.purpose);
     return ret;
 }
 
 UniValue getaddressinfo(const Config &config, const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     const std::string example_address =
         "\"qrmzys48glkpevp2l4t24jtcltc9hyzx9cep2qffm4\"";
 
     RPCHelpMan{
         "getaddressinfo",
         "\nReturn information about the given bitcoin address.\n"
         "Some of the information will only be present if the address is in the "
         "active wallet.\n",
         {
             {"address", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The bitcoin address for which to get information."},
         },
         RPCResult{
             "{\n"
             "  \"address\" : \"address\",              (string) The bitcoin "
             "address validated.\n"
             "  \"scriptPubKey\" : \"hex\",             (string) The "
             "hex-encoded scriptPubKey generated by the address.\n"
             "  \"ismine\" : true|false,              (boolean) If the address "
             "is yours.\n"
             "  \"iswatchonly\" : true|false,         (boolean) If the address "
             "is watchonly.\n"
             "  \"solvable\" : true|false,            (boolean) If we know how "
             "to spend coins sent to this address, ignoring the possible lack "
             "of private keys.\n"
             "  \"desc\" : \"desc\",                    (string, optional) A "
             "descriptor for spending coins sent to this address (only when "
             "solvable).\n"
             "  \"isscript\" : true|false,            (boolean) If the key is a "
             "script.\n"
             "  \"ischange\" : true|false,            (boolean) If the address "
             "was used for change output.\n"
             "  \"script\" : \"type\"                   (string, optional) The "
             "output script type. Only if isscript is true and the redeemscript "
             "is known. Possible\n"
             "                                                         types: "
             "nonstandard, pubkey, pubkeyhash, scripthash, multisig, nulldata\n"
             "  \"hex\" : \"hex\",                      (string, optional) The "
             "redeemscript for the p2sh address.\n"
             "  \"pubkeys\"                           (array, optional) Array "
             "of pubkeys associated with the known redeemscript (only if script "
             "is multisig).\n"
             "    [\n"
             "      \"pubkey\" (string)\n"
             "      ,...\n"
             "    ]\n"
             "  \"sigsrequired\" : xxxxx              (numeric, optional) The "
             "number of signatures required to spend multisig output (only if "
             "script is multisig).\n"
             "  \"pubkey\" : \"publickeyhex\",          (string, optional) The "
             "hex value of the raw public key for single-key addresses "
             "(possibly embedded in P2SH).\n"
             "  \"embedded\" : {...},                 (object, optional) "
             "Information about the address embedded in P2SH, if "
             "relevant and known. Includes all\n"
             "                                                         "
             "getaddressinfo output fields for the embedded address, excluding "
             "metadata (timestamp, hdkeypath,\n"
             "                                                         "
             "hdseedid) and relation to the wallet (ismine, iswatchonly).\n"
             "  \"iscompressed\" : true|false,        (boolean, optional) If "
             "the pubkey is compressed.\n"
             "  \"label\" :  \"label\"                  (string) The label "
             "associated with the address. Defaults to \"\". Equivalent to the "
             "label name in the labels array below.\n"
             "  \"timestamp\" : timestamp,            (number, optional) The "
             "creation time of the key, if available, expressed in " +
             UNIX_EPOCH_TIME +
             ".\n"
             "  \"hdkeypath\" : \"keypath\"             (string, optional) The "
             "HD keypath, if the key is HD and available.\n"
             "  \"hdseedid\" : \"<hash160>\"            (string, optional) The "
             "Hash160 of the HD seed.\n"
             "  \"hdmasterfingerprint\" : \"<hash160>\" (string, optional) The "
             "fingerprint of the master key.\n"
             "  \"labels\"                            (json object) An array of "
             "labels associated with the address. Currently limited to one "
             "label but returned\n"
             "                                               as an array to "
             "keep the API stable if multiple labels are enabled in the "
             "future.\n"
             "    [\n"
             "      \"label name\" (string) The label name. Defaults to \"\". "
             "Equivalent to the label field above.\n\n"
             "      DEPRECATED, will be removed in a future version. To "
             "re-enable, launch bitcoind with `-deprecatedrpc=labelspurpose`:\n"
             "      { (json object of label data)\n"
             "        \"name\" : \"label name\" (string) The label name. "
             "Defaults to \"\". Equivalent to the label field above.\n"
             "        \"purpose\" : \"purpose\" (string) The purpose of the "
             "associated address (send or receive).\n"
             "      },...\n"
             "    ]\n"
             "}\n"},
         RPCExamples{HelpExampleCli("getaddressinfo", example_address) +
                     HelpExampleRpc("getaddressinfo", example_address)},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     UniValue ret(UniValue::VOBJ);
     CTxDestination dest =
         DecodeDestination(request.params[0].get_str(), wallet->chainParams);
     // Make sure the destination is valid
     if (!IsValidDestination(dest)) {
         throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address");
     }
 
     std::string currentAddress = EncodeDestination(dest, config);
     ret.pushKV("address", currentAddress);
 
     CScript scriptPubKey = GetScriptForDestination(dest);
     ret.pushKV("scriptPubKey",
                HexStr(scriptPubKey.begin(), scriptPubKey.end()));
 
     std::unique_ptr<SigningProvider> provider =
         pwallet->GetSigningProvider(scriptPubKey);
 
     isminetype mine = pwallet->IsMine(dest);
     ret.pushKV("ismine", bool(mine & ISMINE_SPENDABLE));
 
     bool solvable = provider && IsSolvable(*provider, scriptPubKey);
     ret.pushKV("solvable", solvable);
 
     if (solvable) {
         ret.pushKV("desc",
                    InferDescriptor(scriptPubKey, *provider)->ToString());
     }
 
     ret.pushKV("iswatchonly", bool(mine & ISMINE_WATCH_ONLY));
 
     UniValue detail = DescribeWalletAddress(pwallet, dest);
     ret.pushKVs(detail);
 
     // Return label field if existing. Currently only one label can be
     // associated with an address, so the label should be equivalent to the
     // value of the name key/value pair in the labels array below.
     const auto *address_book_entry = pwallet->FindAddressBookEntry(dest);
     if (address_book_entry) {
         ret.pushKV("label", address_book_entry->GetLabel());
     }
 
     ret.pushKV("ischange", pwallet->IsChange(scriptPubKey));
 
     ScriptPubKeyMan *spk_man = pwallet->GetScriptPubKeyMan(scriptPubKey);
     if (spk_man) {
         if (const CKeyMetadata *meta = spk_man->GetMetadata(dest)) {
             ret.pushKV("timestamp", meta->nCreateTime);
             if (meta->has_key_origin) {
                 ret.pushKV("hdkeypath", WriteHDKeypath(meta->key_origin.path));
                 ret.pushKV("hdseedid", meta->hd_seed_id.GetHex());
                 ret.pushKV("hdmasterfingerprint",
                            HexStr(meta->key_origin.fingerprint,
                                   meta->key_origin.fingerprint + 4));
             }
         }
     }
 
     // Return a `labels` array containing the label associated with the address,
     // equivalent to the `label` field above. Currently only one label can be
     // associated with an address, but we return an array so the API remains
     // stable if we allow multiple labels to be associated with an address in
     // the future.
     //
     // DEPRECATED: The previous behavior of returning an array containing a JSON
     // object of `name` and `purpose` key/value pairs has been deprecated.
     UniValue labels(UniValue::VARR);
     if (address_book_entry) {
         if (pwallet->chain().rpcEnableDeprecated("labelspurpose")) {
             labels.push_back(AddressBookDataToJSON(*address_book_entry, true));
         } else {
             labels.push_back(address_book_entry->GetLabel());
         }
     }
     ret.pushKV("labels", std::move(labels));
 
     return ret;
 }
 
 UniValue getaddressesbylabel(const Config &config,
                              const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "getaddressesbylabel",
         "Returns the list of addresses assigned the specified label.\n",
         {
             {"label", RPCArg::Type::STR, RPCArg::Optional::NO, "The label."},
         },
         RPCResult{
             "{ (json object with addresses as keys)\n"
             "  \"address\": { (json object with information about address)\n"
             "    \"purpose\": \"string\" (string)  Purpose of address "
             "(\"send\" for sending address, \"receive\" for receiving "
             "address)\n"
             "  },...\n"
             "}\n"},
         RPCExamples{HelpExampleCli("getaddressesbylabel", "\"tabby\"") +
                     HelpExampleRpc("getaddressesbylabel", "\"tabby\"")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     std::string label = LabelFromValue(request.params[0]);
 
     // Find all addresses that have the given label
     UniValue ret(UniValue::VOBJ);
     std::set<std::string> addresses;
     for (const std::pair<const CTxDestination, CAddressBookData> &item :
          pwallet->m_address_book) {
         if (item.second.IsChange()) {
             continue;
         }
         if (item.second.GetLabel() == label) {
             std::string address = EncodeDestination(item.first, config);
             // CWallet::m_address_book is not expected to contain duplicate
             // address strings, but build a separate set as a precaution just in
             // case it does.
             bool unique = addresses.emplace(address).second;
             CHECK_NONFATAL(unique);
             // UniValue::pushKV checks if the key exists in O(N)
             // and since duplicate addresses are unexpected (checked with
             // std::set in O(log(N))), UniValue::__pushKV is used instead,
             // which currently is O(1).
             ret.__pushKV(address, AddressBookDataToJSON(item.second, false));
         }
     }
 
     if (ret.empty()) {
         throw JSONRPCError(RPC_WALLET_INVALID_LABEL_NAME,
                            std::string("No addresses with label " + label));
     }
 
     return ret;
 }
 
 UniValue listlabels(const Config &config, const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "listlabels",
         "Returns the list of all labels, or labels that are assigned to "
         "addresses with a specific purpose.\n",
         {
             {"purpose", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG,
              "Address purpose to list labels for ('send','receive'). An empty "
              "string is the same as not providing this argument."},
         },
         RPCResult{"[               (json array of string)\n"
                   "  \"label\",      (string) Label name\n"
                   "  ...\n"
                   "]\n"},
         RPCExamples{"\nList all labels\n" + HelpExampleCli("listlabels", "") +
                     "\nList labels that have receiving addresses\n" +
                     HelpExampleCli("listlabels", "receive") +
                     "\nList labels that have sending addresses\n" +
                     HelpExampleCli("listlabels", "send") +
                     "\nAs a JSON-RPC call\n" +
                     HelpExampleRpc("listlabels", "receive")},
     }
         .Check(request);
 
     LOCK(pwallet->cs_wallet);
 
     std::string purpose;
     if (!request.params[0].isNull()) {
         purpose = request.params[0].get_str();
     }
 
     // Add to a set to sort by label name, then insert into Univalue array
     std::set<std::string> label_set;
     for (const std::pair<const CTxDestination, CAddressBookData> &entry :
          pwallet->m_address_book) {
         if (entry.second.IsChange()) {
             continue;
         }
         if (purpose.empty() || entry.second.purpose == purpose) {
             label_set.insert(entry.second.GetLabel());
         }
     }
 
     UniValue ret(UniValue::VARR);
     for (const std::string &name : label_set) {
         ret.push_back(name);
     }
 
     return ret;
 }
 
 static UniValue sethdseed(const Config &config, const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "sethdseed",
         "Set or generate a new HD wallet seed. Non-HD wallets will not be "
         "upgraded to being a HD wallet. Wallets that are already\n"
         "HD will have a new HD seed set so that new keys added to the keypool "
         "will be derived from this new seed.\n"
         "\nNote that you will need to MAKE A NEW BACKUP of your wallet after "
         "setting the HD wallet seed.\n" +
             HelpRequiringPassphrase(pwallet) + "\n",
         {
             {"newkeypool", RPCArg::Type::BOOL, /* default */ "true",
              "Whether to flush old unused addresses, including change "
              "addresses, from the keypool and regenerate it.\n"
              "                             If true, the next address from "
              "getnewaddress and change address from getrawchangeaddress will "
              "be from this new seed.\n"
              "                             If false, addresses (including "
              "change addresses if the wallet already had HD Chain Split "
              "enabled) from the existing\n"
              "                             keypool will be used until it has "
              "been depleted."},
             {"seed", RPCArg::Type::STR, /* default */ "random seed",
              "The WIF private key to use as the new HD seed.\n"
              "                             The seed value can be retrieved "
              "using the dumpwallet command. It is the private key marked "
              "hdseed=1"},
         },
         RPCResults{},
         RPCExamples{HelpExampleCli("sethdseed", "") +
                     HelpExampleCli("sethdseed", "false") +
                     HelpExampleCli("sethdseed", "true \"wifkey\"") +
                     HelpExampleRpc("sethdseed", "true, \"wifkey\"")},
     }
         .Check(request);
 
     LegacyScriptPubKeyMan &spk_man =
         EnsureLegacyScriptPubKeyMan(*pwallet, true);
 
     if (pwallet->chain().isInitialBlockDownload()) {
         throw JSONRPCError(
             RPC_CLIENT_IN_INITIAL_DOWNLOAD,
             "Cannot set a new HD seed while still in Initial Block Download");
     }
 
     if (pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) {
         throw JSONRPCError(
             RPC_WALLET_ERROR,
             "Cannot set a HD seed to a wallet with private keys disabled");
     }
 
     LOCK2(pwallet->cs_wallet, spk_man.cs_KeyStore);
 
     // Do not do anything to non-HD wallets
     if (!pwallet->CanSupportFeature(FEATURE_HD)) {
         throw JSONRPCError(
             RPC_WALLET_ERROR,
             "Cannot set a HD seed on a non-HD wallet. Use the upgradewallet "
             "RPC in order to upgrade a non-HD wallet to HD");
     }
 
     EnsureWalletIsUnlocked(pwallet);
 
     bool flush_key_pool = true;
     if (!request.params[0].isNull()) {
         flush_key_pool = request.params[0].get_bool();
     }
 
     CPubKey master_pub_key;
     if (request.params[1].isNull()) {
         master_pub_key = spk_man.GenerateNewSeed();
     } else {
         CKey key = DecodeSecret(request.params[1].get_str());
         if (!key.IsValid()) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                "Invalid private key");
         }
 
         if (HaveKey(spk_man, key)) {
             throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
                                "Already have this key (either as an HD seed or "
                                "as a loose private key)");
         }
 
         master_pub_key = spk_man.DeriveNewSeed(key);
     }
 
     spk_man.SetHDSeed(master_pub_key);
     if (flush_key_pool) {
         spk_man.NewKeyPool();
     }
 
     return NullUniValue;
 }
 
 static UniValue walletprocesspsbt(const Config &config,
                                   const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "walletprocesspsbt",
         "Update a PSBT with input information from our wallet and then sign "
         "inputs that we can sign for." +
             HelpRequiringPassphrase(pwallet) + "\n",
         {
             {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO,
              "The transaction base64 string"},
             {"sign", RPCArg::Type::BOOL, /* default */ "true",
              "Also sign the transaction when updating"},
             {"sighashtype", RPCArg::Type::STR, /* default */ "ALL|FORKID",
              "The signature hash type to sign with if not specified by "
              "the PSBT. Must be one of\n"
              "       \"ALL|FORKID\"\n"
              "       \"NONE|FORKID\"\n"
              "       \"SINGLE|FORKID\"\n"
              "       \"ALL|FORKID|ANYONECANPAY\"\n"
              "       \"NONE|FORKID|ANYONECANPAY\"\n"
              "       \"SINGLE|FORKID|ANYONECANPAY\""},
             {"bip32derivs", RPCArg::Type::BOOL, /* default */ "false",
              "If true, includes the BIP 32 derivation paths for public keys if "
              "we know them"},
         },
         RPCResult{"{\n"
                   "  \"psbt\" : \"value\",          (string) The "
                   "base64-encoded partially signed transaction\n"
                   "  \"complete\" : true|false,   (boolean) If the transaction "
                   "has a complete set of signatures\n"
                   "  ]\n"
                   "}\n"},
         RPCExamples{HelpExampleCli("walletprocesspsbt", "\"psbt\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params,
                  {UniValue::VSTR, UniValue::VBOOL, UniValue::VSTR});
 
     // Unserialize the transaction
     PartiallySignedTransaction psbtx;
     std::string error;
     if (!DecodeBase64PSBT(psbtx, request.params[0].get_str(), error)) {
         throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
                            strprintf("TX decode failed %s", error));
     }
 
     // Get the sighash type
     SigHashType nHashType = ParseSighashString(request.params[2]);
     if (!nHashType.hasForkId()) {
         throw JSONRPCError(RPC_INVALID_PARAMETER,
                            "Signature must use SIGHASH_FORKID");
     }
 
     // Fill transaction with our data and also sign
     bool sign =
         request.params[1].isNull() ? true : request.params[1].get_bool();
     bool bip32derivs =
         request.params[3].isNull() ? false : request.params[3].get_bool();
     bool complete = true;
     const TransactionError err =
         FillPSBT(pwallet, psbtx, complete, nHashType, sign, bip32derivs);
     if (err != TransactionError::OK) {
         throw JSONRPCTransactionError(err);
     }
 
     UniValue result(UniValue::VOBJ);
     CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
     ssTx << psbtx;
     result.pushKV("psbt", EncodeBase64(ssTx.str()));
     result.pushKV("complete", complete);
 
     return result;
 }
 
 static UniValue walletcreatefundedpsbt(const Config &config,
                                        const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{
         "walletcreatefundedpsbt",
         "Creates and funds a transaction in the Partially Signed Transaction "
         "format. Inputs will be added if supplied inputs are not enough\n"
         "Implements the Creator and Updater roles.\n",
         {
             {
                 "inputs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "A json array of json objects",
                 {
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"txid", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO, "The transaction id"},
                             {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO,
                              "The output number"},
                             {"sequence", RPCArg::Type::NUM,
                              RPCArg::Optional::NO, "The sequence number"},
                         },
                     },
                 },
             },
             {
                 "outputs",
                 RPCArg::Type::ARR,
                 RPCArg::Optional::NO,
                 "a json array with outputs (key-value pairs), where none of "
                 "the keys are duplicated.\n"
                 "That is, each address can only appear once and there can only "
                 "be one 'data' object.\n"
                 "For compatibility reasons, a dictionary, which holds the "
                 "key-value pairs directly, is also\n"
                 "                             accepted as second parameter.",
                 {
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"address", RPCArg::Type::AMOUNT,
                              RPCArg::Optional::NO,
                              "A key-value pair. The key (string) is the "
                              "bitcoin address, the value (float or string) is "
                              "the amount in " +
                                  CURRENCY_UNIT + ""},
                         },
                     },
                     {
                         "",
                         RPCArg::Type::OBJ,
                         RPCArg::Optional::OMITTED,
                         "",
                         {
                             {"data", RPCArg::Type::STR_HEX,
                              RPCArg::Optional::NO,
                              "A key-value pair. The key must be \"data\", the "
                              "value is hex-encoded data"},
                         },
                     },
                 },
             },
             {"locktime", RPCArg::Type::NUM, /* default */ "0",
              "Raw locktime. Non-0 value also locktime-activates inputs\n"
              "                             Allows this transaction to be "
              "replaced by a transaction with higher fees. If provided, it is "
              "an error if explicit sequence numbers are incompatible."},
             {"options",
              RPCArg::Type::OBJ,
              RPCArg::Optional::OMITTED_NAMED_ARG,
              "",
              {
                  {"changeAddress", RPCArg::Type::STR_HEX,
                   /* default */ "pool address",
                   "The bitcoin address to receive the change"},
                  {"changePosition", RPCArg::Type::NUM,
                   /* default */ "random", "The index of the change output"},
                  {"includeWatching", RPCArg::Type::BOOL,
                   /* default */ "true for watch-only wallets, otherwise false",
                   "Also select inputs which are watch only"},
                  {"lockUnspents", RPCArg::Type::BOOL, /* default */ "false",
                   "Lock selected unspent outputs"},
                  {"feeRate", RPCArg::Type::AMOUNT, /* default */
                   "not set: makes wallet determine the fee",
                   "Set a specific fee rate in " + CURRENCY_UNIT + "/kB"},
                  {
                      "subtractFeeFromOutputs",
                      RPCArg::Type::ARR,
                      /* default */ "empty array",
                      "A json array of integers.\n"
                      "                              The fee will be equally "
                      "deducted from the amount of each specified output.\n"
                      "                              Those recipients will "
                      "receive less bitcoins than you enter in their "
                      "corresponding amount field.\n"
                      "                              If no outputs are "
                      "specified here, the sender pays the fee.",
                      {
                          {"vout_index", RPCArg::Type::NUM,
                           RPCArg::Optional::OMITTED,
                           "The zero-based output index, before a change output "
                           "is added."},
                      },
                  },
              },
              "options"},
             {"bip32derivs", RPCArg::Type::BOOL, /* default */ "false",
              "If true, includes the BIP 32 derivation paths for public keys if "
              "we know them"},
         },
         RPCResult{"{\n"
                   "  \"psbt\": \"value\",        (string)  The resulting raw "
                   "transaction (base64-encoded string)\n"
                   "  \"fee\":       n,         (numeric) Fee in " +
                   CURRENCY_UNIT +
                   " the resulting transaction pays\n"
                   "  \"changepos\": n          (numeric) The position of the "
                   "added change output, or -1\n"
                   "}\n"},
         RPCExamples{
             "\nCreate a transaction with no inputs\n" +
             HelpExampleCli("walletcreatefundedpsbt",
                            "\"[{\\\"txid\\\":\\\"myid\\\",\\\"vout\\\":0}]\" "
                            "\"[{\\\"data\\\":\\\"00010203\\\"}]\"")},
     }
         .Check(request);
 
     RPCTypeCheck(request.params,
                  {UniValue::VARR,
                   UniValueType(), // ARR or OBJ, checked later
                   UniValue::VNUM, UniValue::VOBJ},
                  true);
 
     Amount fee;
     int change_position;
     CMutableTransaction rawTx =
         ConstructTransaction(wallet->chainParams, request.params[0],
                              request.params[1], request.params[2]);
     FundTransaction(pwallet, rawTx, fee, change_position, request.params[3]);
 
     // Make a blank psbt
     PartiallySignedTransaction psbtx(rawTx);
 
     // Fill transaction with out data but don't sign
     bool bip32derivs =
         request.params[4].isNull() ? false : request.params[4].get_bool();
     bool complete = true;
     const TransactionError err =
         FillPSBT(pwallet, psbtx, complete, SigHashType().withForkId(), false,
                  bip32derivs);
     if (err != TransactionError::OK) {
         throw JSONRPCTransactionError(err);
     }
 
     // Serialize the PSBT
     CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
     ssTx << psbtx;
 
     UniValue result(UniValue::VOBJ);
     result.pushKV("psbt", EncodeBase64(ssTx.str()));
     result.pushKV("fee", ValueFromAmount(fee));
     result.pushKV("changepos", change_position);
     return result;
 }
 
 static UniValue upgradewallet(const Config &config,
                               const JSONRPCRequest &request) {
     std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request);
     CWallet *const pwallet = wallet.get();
 
     if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) {
         return NullUniValue;
     }
 
     RPCHelpMan{"upgradewallet",
                "\nUpgrade the wallet. Upgrades to the latest version if no "
                "version number is specified\n"
                "New keys may be generated and a new wallet backup will need to "
                "be made.",
                {{"version", RPCArg::Type::NUM,
                  /* default */ strprintf("%d", FEATURE_LATEST),
                  "The version number to upgrade to. Default is the latest "
                  "wallet version"}},
                RPCResults{},
                RPCExamples{HelpExampleCli("upgradewallet", "200300") +
                            HelpExampleRpc("upgradewallet", "200300")}}
         .Check(request);
 
     RPCTypeCheck(request.params, {UniValue::VNUM}, true);
 
     EnsureWalletIsUnlocked(pwallet);
 
     int version = 0;
     if (!request.params[0].isNull()) {
         version = request.params[0].get_int();
     }
 
     bilingual_str error;
     std::vector<bilingual_str> warnings;
     if (!pwallet->UpgradeWallet(version, error, warnings)) {
         throw JSONRPCError(RPC_WALLET_ERROR, error.original);
     }
     return error.original;
 }
 
 // clang-format off
 static const CRPCCommand commands[] = {
     //  category            name                            actor (function)              argNames
     //  ------------------- ------------------------        ----------------------        ----------
     { "rawtransactions",    "fundrawtransaction",           fundrawtransaction,           {"hexstring","options"} },
     { "wallet",             "abandontransaction",           abandontransaction,           {"txid"} },
     { "wallet",             "addmultisigaddress",           addmultisigaddress,           {"nrequired","keys","label"} },
     { "wallet",             "backupwallet",                 backupwallet,                 {"destination"} },
     { "wallet",             "createwallet",                 createwallet,                 {"wallet_name", "disable_private_keys", "blank", "passphrase", "avoid_reuse"} },
     { "wallet",             "encryptwallet",                encryptwallet,                {"passphrase"} },
     { "wallet",             "getaddressesbylabel",          getaddressesbylabel,          {"label"} },
     { "wallet",             "getaddressinfo",               getaddressinfo,               {"address"} },
     { "wallet",             "getbalance",                   getbalance,                   {"dummy","minconf","include_watchonly","avoid_reuse"} },
     { "wallet",             "getnewaddress",                getnewaddress,                {"label", "address_type"} },
     { "wallet",             "getrawchangeaddress",          getrawchangeaddress,          {"address_type"} },
     { "wallet",             "getreceivedbyaddress",         getreceivedbyaddress,         {"address","minconf"} },
     { "wallet",             "getreceivedbylabel",           getreceivedbylabel,           {"label","minconf"} },
     { "wallet",             "gettransaction",               gettransaction,               {"txid","include_watchonly","verbose"} },
     { "wallet",             "getunconfirmedbalance",        getunconfirmedbalance,        {} },
     { "wallet",             "getbalances",                  getbalances,                  {} },
     { "wallet",             "getwalletinfo",                getwalletinfo,                {} },
     { "wallet",             "keypoolrefill",                keypoolrefill,                {"newsize"} },
     { "wallet",             "listaddressgroupings",         listaddressgroupings,         {} },
     { "wallet",             "listlabels",                   listlabels,                   {"purpose"} },
     { "wallet",             "listlockunspent",              listlockunspent,              {} },
     { "wallet",             "listreceivedbyaddress",        listreceivedbyaddress,        {"minconf","include_empty","include_watchonly","address_filter"} },
     { "wallet",             "listreceivedbylabel",          listreceivedbylabel,          {"minconf","include_empty","include_watchonly"} },
     { "wallet",             "listsinceblock",               listsinceblock,               {"blockhash","target_confirmations","include_watchonly","include_removed"} },
     { "wallet",             "listtransactions",             listtransactions,             {"label|dummy","count","skip","include_watchonly"} },
     { "wallet",             "listunspent",                  listunspent,                  {"minconf","maxconf","addresses","include_unsafe","query_options"} },
     { "wallet",             "listwalletdir",                listwalletdir,                {} },
     { "wallet",             "listwallets",                  listwallets,                  {} },
     { "wallet",             "loadwallet",                   loadwallet,                   {"filename"} },
     { "wallet",             "lockunspent",                  lockunspent,                  {"unlock","transactions"} },
     { "wallet",             "rescanblockchain",             rescanblockchain,             {"start_height", "stop_height"} },
     { "wallet",             "sendmany",                     sendmany,                     {"dummy","amounts","minconf","comment","subtractfeefrom"} },
     { "wallet",             "sendtoaddress",                sendtoaddress,                {"address","amount","comment","comment_to","subtractfeefromamount","avoid_reuse"} },
     { "wallet",             "sethdseed",                    sethdseed,                    {"newkeypool","seed"} },
     { "wallet",             "setlabel",                     setlabel,                     {"address","label"} },
     { "wallet",             "settxfee",                     settxfee,                     {"amount"} },
     { "wallet",             "setwalletflag",                setwalletflag,                {"flag","value"} },
     { "wallet",             "signmessage",                  signmessage,                  {"address","message"} },
     { "wallet",             "signrawtransactionwithwallet", signrawtransactionwithwallet, {"hextring","prevtxs","sighashtype"} },
     { "wallet",             "unloadwallet",                 unloadwallet,                 {"wallet_name"} },
     { "wallet",             "upgradewallet",                upgradewallet,                {"version"} },
     { "wallet",             "walletcreatefundedpsbt",       walletcreatefundedpsbt,       {"inputs","outputs","locktime","options","bip32derivs"} },
     { "wallet",             "walletlock",                   walletlock,                   {} },
     { "wallet",             "walletpassphrase",             walletpassphrase,             {"passphrase","timeout"} },
     { "wallet",             "walletpassphrasechange",       walletpassphrasechange,       {"oldpassphrase","newpassphrase"} },
     { "wallet",             "walletprocesspsbt",            walletprocesspsbt,            {"psbt","sign","sighashtype","bip32derivs"} },
 };
 // clang-format on
 
 void RegisterWalletRPCCommands(
     interfaces::Chain &chain,
     std::vector<std::unique_ptr<interfaces::Handler>> &handlers) {
     for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) {
         handlers.emplace_back(chain.handleRpc(commands[vcidx]));
     }
 }
 
 interfaces::Chain *g_rpc_chain = nullptr;
diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp
index 93c524d18..4ff10648d 100644
--- a/src/zmq/zmqpublishnotifier.cpp
+++ b/src/zmq/zmqpublishnotifier.cpp
@@ -1,217 +1,217 @@
 // Copyright (c) 2015-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <zmq/zmqpublishnotifier.h>
 
+#include <blockdb.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <config.h>
 #include <primitives/blockhash.h>
 #include <primitives/txid.h>
 #include <rpc/server.h>
 #include <streams.h>
 #include <util/system.h>
-#include <validation.h>
 
 #include <cstdarg>
 
 static std::multimap<std::string, CZMQAbstractPublishNotifier *>
     mapPublishNotifiers;
 
 static const char *MSG_HASHBLOCK = "hashblock";
 static const char *MSG_HASHTX = "hashtx";
 static const char *MSG_RAWBLOCK = "rawblock";
 static const char *MSG_RAWTX = "rawtx";
 
 // Internal function to send multipart message
 static int zmq_send_multipart(void *sock, const void *data, size_t size, ...) {
     va_list args;
     va_start(args, size);
 
     while (1) {
         zmq_msg_t msg;
 
         int rc = zmq_msg_init_size(&msg, size);
         if (rc != 0) {
             zmqError("Unable to initialize ZMQ msg");
             va_end(args);
             return -1;
         }
 
         void *buf = zmq_msg_data(&msg);
         memcpy(buf, data, size);
 
         data = va_arg(args, const void *);
 
         rc = zmq_msg_send(&msg, sock, data ? ZMQ_SNDMORE : 0);
         if (rc == -1) {
             zmqError("Unable to send ZMQ msg");
             zmq_msg_close(&msg);
             va_end(args);
             return -1;
         }
 
         zmq_msg_close(&msg);
 
         if (!data) {
             break;
         }
 
         size = va_arg(args, size_t);
     }
     va_end(args);
     return 0;
 }
 
 bool CZMQAbstractPublishNotifier::Initialize(void *pcontext) {
     assert(!psocket);
 
     // check if address is being used by other publish notifier
     std::multimap<std::string, CZMQAbstractPublishNotifier *>::iterator i =
         mapPublishNotifiers.find(address);
 
     if (i == mapPublishNotifiers.end()) {
         psocket = zmq_socket(pcontext, ZMQ_PUB);
         if (!psocket) {
             zmqError("Failed to create socket");
             return false;
         }
 
         LogPrint(BCLog::ZMQ,
                  "zmq: Outbound message high water mark for %s at %s is %d\n",
                  type, address, outbound_message_high_water_mark);
 
         int rc = zmq_setsockopt(psocket, ZMQ_SNDHWM,
                                 &outbound_message_high_water_mark,
                                 sizeof(outbound_message_high_water_mark));
         if (rc != 0) {
             zmqError("Failed to set outbound message high water mark");
             zmq_close(psocket);
             return false;
         }
 
         rc = zmq_bind(psocket, address.c_str());
         if (rc != 0) {
             zmqError("Failed to bind address");
             zmq_close(psocket);
             return false;
         }
 
         // register this notifier for the address, so it can be reused for other
         // publish notifier
         mapPublishNotifiers.insert(std::make_pair(address, this));
         return true;
     } else {
         LogPrint(BCLog::ZMQ, "zmq: Reusing socket for address %s\n", address);
         LogPrint(BCLog::ZMQ,
                  "zmq: Outbound message high water mark for %s at %s is %d\n",
                  type, address, outbound_message_high_water_mark);
 
         psocket = i->second->psocket;
         mapPublishNotifiers.insert(std::make_pair(address, this));
 
         return true;
     }
 }
 
 void CZMQAbstractPublishNotifier::Shutdown() {
     // Early return if Initialize was not called
     if (!psocket) {
         return;
     }
 
     int count = mapPublishNotifiers.count(address);
 
     // remove this notifier from the list of publishers using this address
     typedef std::multimap<std::string, CZMQAbstractPublishNotifier *>::iterator
         iterator;
     std::pair<iterator, iterator> iterpair =
         mapPublishNotifiers.equal_range(address);
 
     for (iterator it = iterpair.first; it != iterpair.second; ++it) {
         if (it->second == this) {
             mapPublishNotifiers.erase(it);
             break;
         }
     }
 
     if (count == 1) {
         LogPrint(BCLog::ZMQ, "zmq: Close socket at address %s\n", address);
         int linger = 0;
         zmq_setsockopt(psocket, ZMQ_LINGER, &linger, sizeof(linger));
         zmq_close(psocket);
     }
 
     psocket = nullptr;
 }
 
 bool CZMQAbstractPublishNotifier::SendMessage(const char *command,
                                               const void *data, size_t size) {
     assert(psocket);
 
     /* send three parts, command & data & a LE 4byte sequence number */
     uint8_t msgseq[sizeof(uint32_t)];
     WriteLE32(&msgseq[0], nSequence);
     int rc = zmq_send_multipart(psocket, command, strlen(command), data, size,
                                 msgseq, (size_t)sizeof(uint32_t), nullptr);
     if (rc == -1) {
         return false;
     }
 
     /* increment memory only sequence number after sending */
     nSequence++;
 
     return true;
 }
 
 bool CZMQPublishHashBlockNotifier::NotifyBlock(const CBlockIndex *pindex) {
     BlockHash hash = pindex->GetBlockHash();
     LogPrint(BCLog::ZMQ, "zmq: Publish hashblock %s\n", hash.GetHex());
     char data[32];
     for (unsigned int i = 0; i < 32; i++) {
         data[31 - i] = hash.begin()[i];
     }
     return SendMessage(MSG_HASHBLOCK, data, 32);
 }
 
 bool CZMQPublishHashTransactionNotifier::NotifyTransaction(
     const CTransaction &transaction) {
     TxId txid = transaction.GetId();
     LogPrint(BCLog::ZMQ, "zmq: Publish hashtx %s\n", txid.GetHex());
     char data[32];
     for (unsigned int i = 0; i < 32; i++) {
         data[31 - i] = txid.begin()[i];
     }
     return SendMessage(MSG_HASHTX, data, 32);
 }
 
 bool CZMQPublishRawBlockNotifier::NotifyBlock(const CBlockIndex *pindex) {
     LogPrint(BCLog::ZMQ, "zmq: Publish rawblock %s\n",
              pindex->GetBlockHash().GetHex());
 
     const Config &config = GetConfig();
     CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
     {
         LOCK(cs_main);
         CBlock block;
         if (!ReadBlockFromDisk(block, pindex,
                                config.GetChainParams().GetConsensus())) {
             zmqError("Can't read block from disk");
             return false;
         }
 
         ss << block;
     }
 
     return SendMessage(MSG_RAWBLOCK, &(*ss.begin()), ss.size());
 }
 
 bool CZMQPublishRawTransactionNotifier::NotifyTransaction(
     const CTransaction &transaction) {
     TxId txid = transaction.GetId();
     LogPrint(BCLog::ZMQ, "zmq: Publish rawtx %s\n", txid.GetHex());
     CDataStream ss(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags());
     ss << transaction;
     return SendMessage(MSG_RAWTX, &(*ss.begin()), ss.size());
 }