diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a22ed6dc3..69254eaa4 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,1012 +1,1010 @@ # Copyright (c) 2017 The Bitcoin developers project(bitcoind) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) # Default visibility is hidden on all targets. set(CMAKE_C_VISIBILITY_PRESET hidden) set(CMAKE_CXX_VISIBILITY_PRESET hidden) option(BUILD_BITCOIN_WALLET "Activate the wallet functionality" ON) option(BUILD_BITCOIN_ZMQ "Activate the ZeroMQ functionalities" ON) option(BUILD_BITCOIN_CLI "Build bitcoin-cli" ON) option(BUILD_BITCOIN_TX "Build bitcoin-tx" ON) option(BUILD_BITCOIN_QT "Build bitcoin-qt" ON) option(BUILD_BITCOIN_SEEDER "Build bitcoin-seeder" ON) option(BUILD_LIBBITCOINCONSENSUS "Build the bitcoinconsenus shared library" ON) option(BUILD_BITCOIN_CHAINSTATE "Build bitcoin-chainstate" OFF) option(ENABLE_BIP70 "Enable BIP70 (payment protocol) support in GUI" ON) option(ENABLE_HARDENING "Harden the executables" ON) option(ENABLE_REDUCE_EXPORTS "Reduce the amount of exported symbols" OFF) option(ENABLE_STATIC_LIBSTDCXX "Statically link libstdc++" OFF) option(ENABLE_GLIBC_BACK_COMPAT "Enable Glibc compatibility features" OFF) option(ENABLE_QRCODE "Enable QR code display" ON) option(ENABLE_UPNP "Enable UPnP support" ON) option(START_WITH_UPNP "Make UPnP the default to map ports" OFF) option(ENABLE_NATPMP "Enable NAT-PMP support" ON) option(START_WITH_NATPMP "Make NAT-PMP the default to map ports" OFF) option(ENABLE_CLANG_TIDY "Enable clang-tidy checks for Bitcoin ABC" OFF) option(ENABLE_PROFILING "Select the profiling tool to use" OFF) option(ENABLE_TRACING "Enable eBPF user static defined tracepoints" OFF) # Linker option if(CMAKE_CROSSCOMPILING) set(DEFAULT_LINKER "") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set(DEFAULT_LINKER lld) elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set(DEFAULT_LINKER gold) else() set(DEFAULT_LINKER "") endif() set(USE_LINKER "${DEFAULT_LINKER}" CACHE STRING "Linker to be used (default: ${DEFAULT_LINKER}). Set to empty string to use the system's default.") set(OS_WITH_JEMALLOC_AS_SYSTEM_DEFAULT "Android" "FreeBSD" "NetBSD" ) if(NOT CMAKE_SYSTEM_NAME IN_LIST OS_WITH_JEMALLOC_AS_SYSTEM_DEFAULT) set(USE_JEMALLOC_DEFAULT ON) endif() # FIXME: Building against jemalloc causes the software to segfault on OSX. # See https://github.com/Bitcoin-ABC/bitcoin-abc/issues/401 if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING) set(USE_JEMALLOC_DEFAULT OFF) endif() option(USE_JEMALLOC "Use jemalloc as an allocation library" ${USE_JEMALLOC_DEFAULT}) if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") set(DEFAULT_ENABLE_DBUS_NOTIFICATIONS ON) endif() option(ENABLE_DBUS_NOTIFICATIONS "Enable DBus desktop notifications. Linux only." ${DEFAULT_ENABLE_DBUS_NOTIFICATIONS}) # If ccache is available, then use it. find_program(CCACHE ccache) if(CCACHE) message(STATUS "Using ccache: ${CCACHE}") set(CMAKE_C_COMPILER_LAUNCHER ${CCACHE}) set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE}) endif(CCACHE) # Disable what we do not need for the native build. include(NativeExecutable) native_add_cmake_flags( "-DBUILD_BITCOIN_WALLET=OFF" "-DBUILD_BITCOIN_CHRONIK=OFF" "-DBUILD_BITCOIN_QT=OFF" "-DBUILD_BITCOIN_ZMQ=OFF" "-DENABLE_QRCODE=OFF" "-DENABLE_NATPMP=OFF" "-DENABLE_UPNP=OFF" "-DUSE_JEMALLOC=OFF" "-DENABLE_CLANG_TIDY=OFF" "-DENABLE_BIP70=OFF" "-DUSE_LINKER=" ) if(ENABLE_CLANG_TIDY) include(ClangTidy) endif() if(ENABLE_SANITIZERS) include(Sanitizers) enable_sanitizers(${ENABLE_SANITIZERS}) endif() include(AddCompilerFlags) if(USE_LINKER) set(LINKER_FLAG "-fuse-ld=${USE_LINKER}") custom_check_linker_flag(IS_LINKER_SUPPORTED ${LINKER_FLAG}) if(NOT IS_LINKER_SUPPORTED) message(FATAL_ERROR "The ${USE_LINKER} linker is not supported, make sure ${USE_LINKER} is properly installed or use -DUSE_LINKER= to use the system's linker") endif() add_linker_flags(${LINKER_FLAG}) # Remember the selected linker, it will be used for the subsequent # custom_check_linker_flag calls set(GLOBAL_LINKER_FLAGS ${LINKER_FLAG} CACHE INTERNAL "Additional linker flags for flag support checking") endif() # Prefer -g3, defaults to -g if unavailable foreach(LANGUAGE C CXX) set(COMPILER_DEBUG_LEVEL -g) check_compiler_flags(G3_IS_SUPPORTED ${LANGUAGE} -g3) if(${G3_IS_SUPPORTED}) set(COMPILER_DEBUG_LEVEL -g3) endif() add_compile_options_to_configuration_for_language(Debug ${LANGUAGE} ${COMPILER_DEBUG_LEVEL}) endforeach() # Define some debugging symbols when the Debug build type is selected. add_compile_definitions_to_configuration(Debug DEBUG DEBUG_LOCKORDER ABORT_ON_FAILED_ASSUME) # Add -ftrapv when building in Debug add_compile_options_to_configuration(Debug -ftrapv) # All versions of gcc that we commonly use for building are subject to bug # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90348. To work around that, set # -fstack-reuse=none for all gcc builds. (Only gcc understands this flag) if(CMAKE_CXX_COMPILER_ID MATCHES "GNU") add_compiler_flags(-fstack-reuse=none) endif() if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") # Ensure that WINDRES_PREPROC is enabled when using windres. list(APPEND CMAKE_RC_FLAGS "-DWINDRES_PREPROC") # Build all static so there is no dll file to distribute. add_linker_flags(-static) add_compile_definitions( # Windows 7 _WIN32_WINNT=0x0601 # Internet Explorer 5.01 (!) _WIN32_IE=0x0501 # Define WIN32_LEAN_AND_MEAN to exclude APIs such as Cryptography, DDE, # RPC, Shell, and Windows Sockets. WIN32_LEAN_AND_MEAN ) # We require Windows 7 (NT 6.1) or later add_linker_flags(-Wl,--major-subsystem-version,6 -Wl,--minor-subsystem-version,1) endif() if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") add_compile_definitions(MAC_OSX OBJC_OLD_DISPATCH_PROTOTYPES=0) add_linker_flags(-Wl,-dead_strip_dylibs) endif() if(ENABLE_REDUCE_EXPORTS) # Default visibility is set by CMAKE__VISIBILITY_PRESET, but this # doesn't tell if the visibility set is effective. # Check if the flag -fvisibility=hidden is supported, as using the hidden # visibility is a requirement to reduce exports. check_compiler_flags(HAS_CXX_FVISIBILITY CXX -fvisibility=hidden) if(NOT HAS_CXX_FVISIBILITY) message(FATAL_ERROR "Cannot set default symbol visibility. Use -DENABLE_REDUCE_EXPORTS=OFF.") endif() # Also hide symbols from static libraries add_linker_flags(-Wl,--exclude-libs,libstdc++) endif() # Enable statically linking libstdc++ if(ENABLE_STATIC_LIBSTDCXX) add_linker_flags(-static-libstdc++) endif() set(CMAKE_POSITION_INDEPENDENT_CODE ON) if(ENABLE_HARDENING) # Enable stack protection add_cxx_compiler_flags(-fstack-protector-all -Wstack-protector) # Enable control-flow enforcement add_cxx_compiler_flags(-fcf-protection=full) if(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Windows") # stack-clash-protection does not work properly when building for Windows. # See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90458 add_cxx_compiler_flags(-fstack-clash-protection) endif() add_linker_flags(-Wl,-z,noexecstack) # Enable some buffer overflow checking, except in -O0 builds which # do not support them add_compiler_flags(-U_FORTIFY_SOURCE) add_compile_options($<$>:-D_FORTIFY_SOURCE=2>) # Enable ASLR (these flags are primarily targeting MinGw) add_linker_flags(-Wl,--enable-reloc-section -Wl,--dynamicbase -Wl,--nxcompat -Wl,--high-entropy-va) # Make the relocated sections read-only add_linker_flags(-Wl,-z,relro -Wl,-z,now) # Avoids mixing code pages with data to improve cache performance as well # as security add_linker_flags(-Wl,-z,separate-code) # CMake provides the POSITION_INDEPENDENT_CODE property to set PIC/PIE. cmake_policy(SET CMP0083 NEW) include(CheckPIESupported) check_pie_supported() if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") # MinGw provides its own libssp for stack smashing protection link_libraries(ssp) endif() endif() if(ENABLE_PROFILING MATCHES "gprof") message(STATUS "Enable profiling with gprof") # -pg is incompatible with -pie. Since hardening and profiling together # doesn't make sense, we simply make them mutually exclusive here. # Additionally, hardened toolchains may force -pie by default, in which # case it needs to be turned off with -no-pie. if(ENABLE_HARDENING) message(FATAL_ERROR "Profiling with gprof requires disabling hardening with -DENABLE_HARDENING=OFF.") endif() add_linker_flags(-no-pie) add_compiler_flags(-pg) add_linker_flags(-pg) endif() # Enable warning add_c_compiler_flags(-Wnested-externs -Wstrict-prototypes) add_compiler_flags( -Wall -Wextra -Wformat -Wgnu -Wvla -Wcast-align -Wunused-parameter -Wmissing-braces -Wthread-safety -Wrange-loop-analysis -Wredundant-decls -Wunreachable-code-loop-increment -Wsign-compare -Wconditional-uninitialized -Wduplicated-branches -Wduplicated-cond -Wlogical-op -Wdocumentation ) add_compiler_flag_group(-Wformat -Wformat-security) add_cxx_compiler_flags( -Wredundant-move -Woverloaded-virtual ) if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") # GCC has no flag variant which is granular enough to avoid raising the clang # -Wshadow-uncaptured-local equivalent. This is causing a lot of warnings # on serialize.h which cannot be disabled locally, so drop the flag. add_compiler_flags( -Wshadow -Wshadow-field ) endif() option(EXTRA_WARNINGS "Enable extra warnings" OFF) if(EXTRA_WARNINGS) add_cxx_compiler_flags(-Wsuggest-override) else() add_compiler_flags( -Wno-unused-parameter -Wno-implicit-fallthrough -Wno-psabi ) endif() # libtool style configure add_subdirectory(config) # Enable LFS (Large File Support) on targets that don't have it natively. # This should be defined before the libraries are included as leveldb need the # definition to be set. if(NOT HAVE_LARGE_FILE_SUPPORT) add_compile_definitions(_FILE_OFFSET_BITS=64) add_linker_flags(-Wl,--large-address-aware) endif() if(ENABLE_GLIBC_BACK_COMPAT) # Wrap some glibc functions with ours add_linker_flags(-Wl,--wrap=exp) add_linker_flags(-Wl,--wrap=log) add_linker_flags(-Wl,--wrap=log2) add_linker_flags(-Wl,--wrap=pow) if(NOT HAVE_LARGE_FILE_SUPPORT) add_linker_flags(-Wl,--wrap=fcntl -Wl,--wrap=fcntl64) endif() endif() if(USE_JEMALLOC) # Most of the sanitizers require their instrumented allocation functions to # be fully functional. This is obviously the case for all the memory related # sanitizers (asan, lsan, msan) but not only. if(ENABLE_SANITIZERS) message(WARNING "Jemalloc is incompatible with the sanitizers and has been disabled.") else() find_package(Jemalloc 3.6.0 REQUIRED) link_libraries(Jemalloc::jemalloc) endif() endif() # These flags are needed for using std::filesystem with GCC < 9.1 & Clang < 9.0 # Since these are optional libraries they need to be placed accordingly on the # command line. add_linker_flags(-lstdc++fs -lc++fs) custom_check_linker_flag(LINKER_HAS_STDCXXFS "-lstdc++fs") if(LINKER_HAS_STDCXXFS) link_libraries(stdc++fs) endif() custom_check_linker_flag(LINKER_HAS_CXXFS "-lc++fs") if(LINKER_HAS_CXXFS) link_libraries(c++fs) endif() # Make sure that all the global compiler and linker flags are set BEFORE # including the libraries so they apply as needed. # libraries add_subdirectory(crypto) add_subdirectory(leveldb) add_subdirectory(secp256k1) add_subdirectory(univalue) # Find the git root, and returns the full path to the .git/logs/HEAD file if # it exists. function(find_git_head_logs_file RESULT) find_package(Git) if(GIT_FOUND) execute_process( COMMAND "${GIT_EXECUTABLE}" "rev-parse" "--show-toplevel" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" OUTPUT_VARIABLE GIT_ROOT RESULT_VARIABLE GIT_RESULT OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET ) if(GIT_RESULT EQUAL 0) set(GIT_LOGS_DIR "${GIT_ROOT}/.git/logs") set(GIT_HEAD_LOGS_FILE "${GIT_LOGS_DIR}/HEAD") # If the .git/logs/HEAD does not exist, create it if(NOT EXISTS "${GIT_HEAD_LOGS_FILE}") file(MAKE_DIRECTORY "${GIT_LOGS_DIR}") file(TOUCH "${GIT_HEAD_LOGS_FILE}") endif() set(${RESULT} "${GIT_HEAD_LOGS_FILE}" PARENT_SCOPE) endif() endif() endfunction() find_git_head_logs_file(GIT_HEAD_LOGS_FILE) set(OBJ_DIR "${CMAKE_CURRENT_BINARY_DIR}/obj") file(MAKE_DIRECTORY "${OBJ_DIR}") set(BUILD_HEADER "${OBJ_DIR}/build.h") set(BUILD_HEADER_TMP "${BUILD_HEADER}.tmp") add_custom_command( DEPENDS "${GIT_HEAD_LOGS_FILE}" "${CMAKE_SOURCE_DIR}/share/genbuild.sh" OUTPUT "${BUILD_HEADER}" COMMAND "${CMAKE_SOURCE_DIR}/share/genbuild.sh" "${BUILD_HEADER_TMP}" "${CMAKE_SOURCE_DIR}" COMMAND ${CMAKE_COMMAND} -E copy_if_different "${BUILD_HEADER_TMP}" "${BUILD_HEADER}" COMMAND ${CMAKE_COMMAND} -E remove "${BUILD_HEADER_TMP}" ) # Because the Bitcoin ABc source code is disorganised, we # end up with a bunch of libraries without any apparent # cohesive structure. This is inherited from Bitcoin Core # and reflecting this. # TODO: Improve the structure once cmake is rocking. # Various completely unrelated features shared by all executables. add_library(util chainparamsbase.cpp clientversion.cpp compat/glibcxx_sanity.cpp compat/strnlen.cpp currencyunit.cpp fs.cpp interfaces/handler.cpp logging.cpp random.cpp randomenv.cpp rcu.cpp rpc/request.cpp support/cleanse.cpp support/lockedpool.cpp sync.cpp threadinterrupt.cpp uint256.cpp util/asmap.cpp util/bip32.cpp util/bytevectorhash.cpp util/check.cpp util/hasher.cpp util/error.cpp util/getuniquepath.cpp util/message.cpp util/moneystr.cpp util/readwritefile.cpp util/settings.cpp util/string.cpp util/sock.cpp util/spanparsing.cpp util/strencodings.cpp util/string.cpp util/system.cpp util/thread.cpp util/threadnames.cpp util/time.cpp util/tokenpipe.cpp util/url.cpp # obj/build.h "${BUILD_HEADER}" ) target_compile_definitions(util PUBLIC HAVE_CONFIG_H HAVE_BUILD_INFO) target_include_directories(util PUBLIC . # To access the config/ and obj/ directories ${CMAKE_CURRENT_BINARY_DIR} ) if(ENABLE_GLIBC_BACK_COMPAT) target_sources(util PRIVATE compat/glibc_compat.cpp) endif() # Work around jemalloc printing harmless warnings to stderr with qemu. # We match the emulator against qemu but not ^qemu so we are compatible with the # use of absolute paths. if(CMAKE_CROSSCOMPILING AND USE_JEMALLOC AND CMAKE_CROSSCOMPILING_EMULATOR MATCHES "qemu-.+") message(WARNING "Overriding Jemalloc malloc_message() function for use with QEMU.") target_sources(util PRIVATE jemalloc_message.cpp) endif() set(Boost_USE_STATIC_LIBS ON) macro(link_windows_dependencies TARGET) find_package(SHLWAPI REQUIRED) target_link_libraries(${TARGET} SHLWAPI::shlwapi) find_library(WS2_32_LIBRARY NAMES ws2_32) target_link_libraries(${TARGET} ${WS2_32_LIBRARY}) endmacro() # Target specific configs if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") set(Boost_USE_STATIC_RUNTIME ON) set(Boost_THREADAPI win32) link_windows_dependencies(util) endif() target_link_libraries(util univalue crypto) macro(link_event TARGET) non_native_target_link_libraries(${TARGET} Event 2.0.22 ${ARGN}) endmacro() link_event(util event) macro(link_boost_headers_only TARGET) non_native_target_link_headers_only(${TARGET} Boost 1.64 ${ARGN}) endmacro() link_boost_headers_only(util headers) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) target_link_libraries(util Threads::Threads) function(add_network_sources NETWORK_SOURCES) set(NETWORK_DIR abc) list(TRANSFORM ARGN PREPEND "networks/${NETWORK_DIR}/" OUTPUT_VARIABLE NETWORK_SOURCES ) set(NETWORK_SOURCES ${NETWORK_SOURCES} PARENT_SCOPE) endfunction() add_network_sources(NETWORK_SOURCES checkpoints.cpp chainparamsconstants.cpp ) # More completely unrelated features shared by all executables. # Because nothing says this is different from util than "common" add_library(common base58.cpp common/bloom.cpp cashaddr.cpp cashaddrenc.cpp chainparams.cpp config.cpp consensus/merkle.cpp coins.cpp compressor.cpp eventloop.cpp feerate.cpp core_read.cpp core_write.cpp key.cpp key_io.cpp merkleblock.cpp net_permissions.cpp netaddress.cpp netbase.cpp outputtype.cpp policy/policy.cpp primitives/block.cpp protocol.cpp psbt.cpp rpc/rawtransaction_util.cpp rpc/util.cpp scheduler.cpp warnings.cpp ${NETWORK_SOURCES} ) target_link_libraries(common bitcoinconsensus util secp256k1 script) # script library add_library(script script/bitfield.cpp script/descriptor.cpp script/interpreter.cpp script/script.cpp script/script_error.cpp script/sigencoding.cpp script/sign.cpp script/signingprovider.cpp script/standard.cpp ) target_link_libraries(script common) # libbitcoinconsensus add_library(bitcoinconsensus arith_uint256.cpp hash.cpp primitives/transaction.cpp pubkey.cpp uint256.cpp util/strencodings.cpp consensus/amount.cpp consensus/tx_check.cpp ) target_link_libraries(bitcoinconsensus script) include(InstallationHelper) if(BUILD_LIBBITCOINCONSENSUS) target_compile_definitions(bitcoinconsensus PUBLIC BUILD_BITCOIN_INTERNAL HAVE_CONSENSUS_LIB ) install_shared_library(bitcoinconsensus script/bitcoinconsensus.cpp PUBLIC_HEADER script/bitcoinconsensus.h ) endif() # Bitcoin server facilities add_library(server addrdb.cpp addrman.cpp avalanche/avalanche.cpp avalanche/compactproofs.cpp avalanche/delegation.cpp avalanche/delegationbuilder.cpp avalanche/peermanager.cpp avalanche/processor.cpp avalanche/proof.cpp avalanche/proofid.cpp avalanche/proofbuilder.cpp avalanche/proofpool.cpp avalanche/voterecord.cpp banman.cpp blockencodings.cpp blockfileinfo.cpp blockfilter.cpp blockindex.cpp chain.cpp checkpoints.cpp config.cpp consensus/activation.cpp consensus/tx_verify.cpp dbwrapper.cpp deploymentstatus.cpp dnsseeds.cpp flatfile.cpp headerssync.cpp httprpc.cpp httpserver.cpp i2p.cpp index/base.cpp index/blockfilterindex.cpp index/coinstatsindex.cpp index/txindex.cpp init.cpp init/common.cpp invrequest.cpp kernel/cs_main.cpp kernel/disconnected_transactions.cpp kernel/mempool_persist.cpp mapport.cpp mempool_args.cpp minerfund.cpp net.cpp net_processing.cpp node/blockmanager_args.cpp node/blockstorage.cpp node/caches.cpp node/chainstate.cpp node/chainstatemanager_args.cpp node/coin.cpp node/coinstats.cpp node/context.cpp node/interfaces.cpp node/mempool_persist_args.cpp node/miner.cpp node/psbt.cpp node/transaction.cpp node/ui_interface.cpp node/utxo_snapshot.cpp node/validation_cache_args.cpp noui.cpp policy/block/minerfund.cpp policy/block/preconsensus.cpp policy/block/stakingrewards.cpp policy/fees.cpp policy/packages.cpp policy/settings.cpp pow/aserti32d.cpp pow/daa.cpp pow/eda.cpp pow/grasberg.cpp pow/pow.cpp rest.cpp rpc/abc.cpp rpc/avalanche.cpp rpc/blockchain.cpp rpc/command.cpp rpc/mempool.cpp rpc/mining.cpp rpc/misc.cpp rpc/net.cpp rpc/rawtransaction.cpp rpc/server.cpp rpc/server_util.cpp rpc/txoutproof.cpp script/scriptcache.cpp script/sigcache.cpp shutdown.cpp timedata.cpp torcontrol.cpp txdb.cpp txmempool.cpp txorphanage.cpp validation.cpp validationinterface.cpp versionbits.cpp ) target_include_directories(server PRIVATE leveldb/helpers/memenv) target_link_libraries(server bitcoinconsensus leveldb memenv ) link_event(server event) if(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Windows") link_event(server pthreads) endif() if(ENABLE_UPNP) find_package(MiniUPnPc 1.9 REQUIRED) target_link_libraries(server MiniUPnPc::miniupnpc) if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") # TODO: check if we are really using a static library. Assume this is # the one from the depends for now since the native windows build is not # supported. target_compile_definitions(server PUBLIC -DSTATICLIB PUBLIC -DMINIUPNP_STATICLIB ) endif() endif() if(ENABLE_NATPMP) find_package(NATPMP REQUIRED) target_link_libraries(server NATPMP::natpmp) if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") target_compile_definitions(server PUBLIC -DSTATICLIB PUBLIC -DNATPMP_STATICLIB ) endif() endif() # Test suites. add_subdirectory(test) add_subdirectory(avalanche/test) add_subdirectory(pow/test) # Benchmark suite. add_subdirectory(bench) include(BinaryTest) include(WindowsVersionInfo) # Wallet if(BUILD_BITCOIN_WALLET) add_subdirectory(wallet) target_link_libraries(server wallet) # bitcoin-wallet add_executable(bitcoin-wallet bitcoin-wallet.cpp) generate_windows_version_info(bitcoin-wallet DESCRIPTION "CLI tool for ${PACKAGE_NAME} wallets" ) target_link_libraries(bitcoin-wallet wallet-tool common util) add_to_symbols_check(bitcoin-wallet) add_to_security_check(bitcoin-wallet) install_target(bitcoin-wallet) install_manpages(bitcoin-wallet) else() target_sources(server PRIVATE dummywallet.cpp) endif() # ZeroMQ if(BUILD_BITCOIN_ZMQ) add_subdirectory(zmq) target_link_libraries(server zmq) # FIXME: This is needed because of an unwanted dependency: # zmqpublishnotifier.cpp -> blockstorage.h -> txdb.h -> dbwrapper.h -> leveldb/db.h target_link_libraries(zmq leveldb) endif() # RPC client support add_library(rpcclient compat/stdin.cpp rpc/client.cpp ) target_link_libraries(rpcclient univalue util) # bitcoin-seeder if(BUILD_BITCOIN_SEEDER) add_subdirectory(seeder) endif() # bitcoin-cli if(BUILD_BITCOIN_CLI) add_executable(bitcoin-cli bitcoin-cli.cpp) generate_windows_version_info(bitcoin-cli DESCRIPTION "JSON-RPC client for ${PACKAGE_NAME}" ) target_link_libraries(bitcoin-cli common rpcclient) link_event(bitcoin-cli event) add_to_symbols_check(bitcoin-cli) add_to_security_check(bitcoin-cli) install_target(bitcoin-cli) install_manpages(bitcoin-cli) endif() # bitcoin-tx if(BUILD_BITCOIN_TX) add_executable(bitcoin-tx bitcoin-tx.cpp) generate_windows_version_info(bitcoin-tx DESCRIPTION "CLI Bitcoin transaction editor utility" ) target_link_libraries(bitcoin-tx bitcoinconsensus) add_to_symbols_check(bitcoin-tx) add_to_security_check(bitcoin-tx) install_target(bitcoin-tx) install_manpages(bitcoin-tx) endif() # bitcoin-chainstate if(BUILD_BITCOIN_CHAINSTATE) # TODO: libbitcoinkernel is a work in progress consensus engine library, as more # and more modules are decoupled from the consensus engine, this list will # shrink to only those which are absolutely necessary. For example, things # like index/*.cpp will be removed. add_library(bitcoinkernel kernel/bitcoinkernel.cpp kernel/cs_main.cpp kernel/disconnected_transactions.cpp kernel/mempool_persist.cpp arith_uint256.cpp blockfileinfo.cpp - blockfilter.cpp blockindex.cpp common/bloom.cpp chain.cpp chainparamsbase.cpp chainparams.cpp checkpoints.cpp clientversion.cpp coins.cpp compat/glibcxx_sanity.cpp compressor.cpp config.cpp consensus/activation.cpp consensus/amount.cpp consensus/merkle.cpp consensus/tx_check.cpp consensus/tx_verify.cpp core_read.cpp dbwrapper.cpp deploymentstatus.cpp eventloop.cpp feerate.cpp flatfile.cpp fs.cpp hash.cpp index/base.cpp - index/blockfilterindex.cpp index/coinstatsindex.cpp init/common.cpp key.cpp logging.cpp networks/abc/chainparamsconstants.cpp networks/abc/checkpoints.cpp node/blockstorage.cpp node/chainstate.cpp node/coinstats.cpp node/ui_interface.cpp node/utxo_snapshot.cpp policy/fees.cpp policy/packages.cpp policy/policy.cpp policy/settings.cpp pow/aserti32d.cpp pow/daa.cpp pow/eda.cpp pow/pow.cpp primitives/block.cpp primitives/transaction.cpp pubkey.cpp random.cpp randomenv.cpp rcu.cpp scheduler.cpp script/bitfield.cpp script/interpreter.cpp script/script.cpp script/scriptcache.cpp script/script_error.cpp script/sigcache.cpp script/sigencoding.cpp script/standard.cpp shutdown.cpp support/cleanse.cpp support/lockedpool.cpp sync.cpp threadinterrupt.cpp txdb.cpp txmempool.cpp uint256.cpp util/bytevectorhash.cpp util/check.cpp util/getuniquepath.cpp util/hasher.cpp util/moneystr.cpp util/settings.cpp util/strencodings.cpp util/string.cpp util/system.cpp util/thread.cpp util/threadnames.cpp util/time.cpp util/tokenpipe.cpp validation.cpp validationinterface.cpp versionbits.cpp warnings.cpp # Bitcoin ABC specific dependencies that will not go away with Core backports addrdb.cpp # via banman.cpp addrman.cpp # via net.cpp banman.cpp # via net.cpp base58.cpp # via key_io.cpp avalanche/avalanche.cpp avalanche/delegation.cpp avalanche/delegationbuilder.cpp avalanche/peermanager.cpp avalanche/processor.cpp avalanche/proof.cpp avalanche/proofid.cpp avalanche/proofpool.cpp avalanche/voterecord.cpp cashaddr.cpp # via cashaddrenc.cpp cashaddrenc.cpp # via key_io.cpp dnsseeds.cpp # via net.cpp (GetRandomizedDNSSeeds) i2p.cpp # via net.cppTx key_io.cpp # avalanche/processor.cpp uses DecodeSecret minerfund.cpp # via policy/block/minerfund.cpp net.cpp # avalanche uses CConnman netaddress.cpp # via net.cpp netbase.cpp # via net.cpp net_permissions.cpp # via net.cpp policy/block/minerfund.cpp policy/block/preconsensus.cpp policy/block/stakingrewards.cpp protocol.cpp # avalanche/processor.cpp uses NetMsgType timedata.cpp # via net.cpp util/asmap.cpp # via netaddress.cpp util/error.cpp # via net_permissions.cpp (ResolveErrMsg) util/readwritefile.cpp # via i2p.cpp util/sock.cpp # via net.cpp ) target_include_directories(bitcoinkernel PUBLIC . leveldb/helpers/memenv # To access the config/ and obj/ directories ${CMAKE_CURRENT_BINARY_DIR} ) target_link_libraries(bitcoinkernel crypto univalue secp256k1 leveldb memenv) link_boost_headers_only(bitcoinkernel headers) if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") link_windows_dependencies(bitcoinkernel) endif() add_executable(bitcoin-chainstate bitcoin-chainstate.cpp) target_link_libraries(bitcoin-chainstate bitcoinkernel) generate_windows_version_info(bitcoin-chainstate DESCRIPTION "CLI Datadir information utility (experimental)" ) add_to_symbols_check(bitcoin-chainstate) add_to_security_check(bitcoin-chainstate) endif() # bitcoind add_executable(bitcoind bitcoind.cpp) target_link_libraries(bitcoind server) generate_windows_version_info(bitcoind DESCRIPTION "Bitcoin node with a JSON-RPC server" ) add_to_symbols_check(bitcoind) add_to_security_check(bitcoind) install_target(bitcoind) install_manpages(bitcoind) # Bitcoin-qt if(BUILD_BITCOIN_QT) add_subdirectory(qt) endif() diff --git a/src/index/base.cpp b/src/index/base.cpp index 203a79181..6bd2048cc 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -1,394 +1,417 @@ // Copyright (c) 2017-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include // For Chainstate #include #include using node::ReadBlockFromDisk; constexpr uint8_t DB_BEST_BLOCK{'B'}; constexpr int64_t SYNC_LOG_INTERVAL = 30; // secon constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds template static void FatalError(const char *fmt, const Args &...args) { std::string strMessage = tfm::format(fmt, args...); SetMiscWarning(Untranslated(strMessage)); LogPrintf("*** %s\n", strMessage); AbortError(_("A fatal internal error occurred, see debug.log for details")); StartShutdown(); } BaseIndex::DB::DB(const fs::path &path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) : CDBWrapper(path, n_cache_size, f_memory, f_wipe, f_obfuscate) {} bool BaseIndex::DB::ReadBestBlock(CBlockLocator &locator) const { bool success = Read(DB_BEST_BLOCK, locator); if (!success) { locator.SetNull(); } return success; } void BaseIndex::DB::WriteBestBlock(CDBBatch &batch, const CBlockLocator &locator) { batch.Write(DB_BEST_BLOCK, locator); } BaseIndex::~BaseIndex() { Interrupt(); Stop(); } bool BaseIndex::Init() { CBlockLocator locator; if (!GetDB().ReadBestBlock(locator)) { locator.SetNull(); } LOCK(cs_main); CChain &active_chain = m_chainstate->m_chain; if (locator.IsNull()) { - m_best_block_index = nullptr; + SetBestBlockIndex(nullptr); } else { - m_best_block_index = m_chainstate->FindForkInGlobalIndex(locator); + SetBestBlockIndex(m_chainstate->FindForkInGlobalIndex(locator)); } m_synced = m_best_block_index.load() == active_chain.Tip(); if (!m_synced) { bool prune_violation = false; if (!m_best_block_index) { // index is not built yet // make sure we have all block data back to the genesis prune_violation = node::GetFirstStoredBlock(active_chain.Tip()) != active_chain.Genesis(); } // in case the index has a best block set and is not fully synced // check if we have the required blocks to continue building the index else { const CBlockIndex *block_to_test = m_best_block_index.load(); if (!active_chain.Contains(block_to_test)) { // if the bestblock is not part of the mainchain, find the fork // and make sure we have all data down to the fork block_to_test = active_chain.FindFork(block_to_test); } const CBlockIndex *block = active_chain.Tip(); prune_violation = true; // check backwards from the tip if we have all block data until we // reach the indexes bestblock while (block_to_test && block && block->nStatus.hasData()) { if (block_to_test == block) { prune_violation = false; break; } // block->pprev must exist at this point, since block_to_test is // part of the chain and thus must be encountered when going // backwards from the tip assert(block->pprev); block = block->pprev; } } if (prune_violation) { return InitError(strprintf( Untranslated("%s best block of the index goes beyond pruned " "data. Please disable the index or reindex (which " "will download the whole blockchain again)"), GetName())); } } return true; } static const CBlockIndex *NextSyncBlock(const CBlockIndex *pindex_prev, CChain &chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); if (!pindex_prev) { return chain.Genesis(); } const CBlockIndex *pindex = chain.Next(pindex_prev); if (pindex) { return pindex; } return chain.Next(chain.FindFork(pindex_prev)); } void BaseIndex::ThreadSync() { const CBlockIndex *pindex = m_best_block_index.load(); if (!m_synced) { auto &consensus_params = GetConfig().GetChainParams().GetConsensus(); int64_t last_log_time = 0; int64_t last_locator_write_time = 0; while (true) { if (m_interrupt) { - m_best_block_index = pindex; + SetBestBlockIndex(pindex); // No need to handle errors in Commit. If it fails, the error // will be already be logged. The best way to recover is to // continue, as index cannot be corrupted by a missed commit to // disk for an advanced index state. Commit(); return; } { LOCK(cs_main); const CBlockIndex *pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain); if (!pindex_next) { - m_best_block_index = pindex; + SetBestBlockIndex(pindex); m_synced = true; // No need to handle errors in Commit. See rationale above. Commit(); break; } if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) { FatalError( "%s: Failed to rewind index %s to a previous chain tip", __func__, GetName()); return; } pindex = pindex_next; } int64_t current_time = GetTime(); if (last_log_time + SYNC_LOG_INTERVAL < current_time) { LogPrintf("Syncing %s with block chain from height %d\n", GetName(), pindex->nHeight); last_log_time = current_time; } if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) { - m_best_block_index = pindex; + SetBestBlockIndex(pindex); last_locator_write_time = current_time; // No need to handle errors in Commit. See rationale above. Commit(); } CBlock block; if (!ReadBlockFromDisk(block, pindex, consensus_params)) { FatalError("%s: Failed to read block %s from disk", __func__, pindex->GetBlockHash().ToString()); return; } if (!WriteBlock(block, pindex)) { FatalError("%s: Failed to write block %s to index database", __func__, pindex->GetBlockHash().ToString()); return; } } } if (pindex) { LogPrintf("%s is enabled at height %d\n", GetName(), pindex->nHeight); } else { LogPrintf("%s is enabled\n", GetName()); } } bool BaseIndex::Commit() { CDBBatch batch(GetDB()); if (!CommitInternal(batch) || !GetDB().WriteBatch(batch)) { return error("%s: Failed to commit latest %s state", __func__, GetName()); } return true; } bool BaseIndex::CommitInternal(CDBBatch &batch) { LOCK(cs_main); // Don't commit anything if we haven't indexed any block yet // (this could happen if init is interrupted). if (m_best_block_index == nullptr) { return false; } GetDB().WriteBestBlock(batch, GetLocator(m_best_block_index)); return true; } bool BaseIndex::Rewind(const CBlockIndex *current_tip, const CBlockIndex *new_tip) { assert(current_tip == m_best_block_index); assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); // In the case of a reorg, ensure persisted block locator is not stale. // Pruning has a minimum of 288 blocks-to-keep and getting the index // out of sync may be possible but a users fault. // In case we reorg beyond the pruned depth, ReadBlockFromDisk would // throw and lead to a graceful shutdown - m_best_block_index = new_tip; + SetBestBlockIndex(new_tip); if (!Commit()) { // If commit fails, revert the best block index to avoid corruption. - m_best_block_index = current_tip; + SetBestBlockIndex(current_tip); return false; } return true; } void BaseIndex::BlockConnected(const std::shared_ptr &block, const CBlockIndex *pindex) { if (!m_synced) { return; } const CBlockIndex *best_block_index = m_best_block_index.load(); if (!best_block_index) { if (pindex->nHeight != 0) { FatalError("%s: First block connected is not the genesis block " "(height=%d)", __func__, pindex->nHeight); return; } } else { // Ensure block connects to an ancestor of the current best block. This // should be the case most of the time, but may not be immediately after // the the sync thread catches up and sets m_synced. Consider the case // where there is a reorg and the blocks on the stale branch are in the // ValidationInterface queue backlog even after the sync thread has // caught up to the new chain tip. In this unlikely event, log a warning // and let the queue clear. if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) { LogPrintf("%s: WARNING: Block %s does not connect to an ancestor " "of known best chain (tip=%s); not updating index\n", __func__, pindex->GetBlockHash().ToString(), best_block_index->GetBlockHash().ToString()); return; } if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) { FatalError("%s: Failed to rewind index %s to a previous chain tip", __func__, GetName()); return; } } if (WriteBlock(*block, pindex)) { - m_best_block_index = pindex; + // Setting the best block index is intentionally the last step of this + // function, so BlockUntilSyncedToCurrentChain callers waiting for the + // best block index to be updated can rely on the block being fully + // processed, and the index object being safe to delete. + SetBestBlockIndex(pindex); } else { FatalError("%s: Failed to write block %s to index", __func__, pindex->GetBlockHash().ToString()); return; } } void BaseIndex::ChainStateFlushed(const CBlockLocator &locator) { if (!m_synced) { return; } const BlockHash &locator_tip_hash = locator.vHave.front(); const CBlockIndex *locator_tip_index; { LOCK(cs_main); locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash); } if (!locator_tip_index) { FatalError("%s: First block (hash=%s) in locator was not found", __func__, locator_tip_hash.ToString()); return; } // This checks that ChainStateFlushed callbacks are received after // BlockConnected. The check may fail immediately after the the sync thread // catches up and sets m_synced. Consider the case where there is a reorg // and the blocks on the stale branch are in the ValidationInterface queue // backlog even after the sync thread has caught up to the new chain tip. In // this unlikely event, log a warning and let the queue clear. const CBlockIndex *best_block_index = m_best_block_index.load(); if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) { LogPrintf("%s: WARNING: Locator contains block (hash=%s) not on known " "best chain (tip=%s); not writing index locator\n", __func__, locator_tip_hash.ToString(), best_block_index->GetBlockHash().ToString()); return; } // No need to handle errors in Commit. If it fails, the error will be // already be logged. The best way to recover is to continue, as index // cannot be corrupted by a missed commit to disk for an advanced index // state. Commit(); } bool BaseIndex::BlockUntilSyncedToCurrentChain() const { AssertLockNotHeld(cs_main); if (!m_synced) { return false; } { // Skip the queue-draining stuff if we know we're caught up with // m_chain.Tip(). LOCK(cs_main); const CBlockIndex *chain_tip = m_chainstate->m_chain.Tip(); const CBlockIndex *best_block_index = m_best_block_index.load(); if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) { return true; } } LogPrintf("%s: %s is catching up on block notifications\n", __func__, GetName()); SyncWithValidationInterfaceQueue(); return true; } void BaseIndex::Interrupt() { m_interrupt(); } bool BaseIndex::Start(Chainstate &active_chainstate) { m_chainstate = &active_chainstate; // Need to register this ValidationInterface before running Init(), so that // callbacks are not missed if Init sets m_synced to true. RegisterValidationInterface(this); if (!Init()) { return false; } m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { ThreadSync(); }); return true; } void BaseIndex::Stop() { UnregisterValidationInterface(this); if (m_thread_sync.joinable()) { m_thread_sync.join(); } } IndexSummary BaseIndex::GetSummary() const { IndexSummary summary{}; summary.name = GetName(); summary.synced = m_synced; summary.best_block_height = m_best_block_index ? m_best_block_index.load()->nHeight : 0; return summary; } + +void BaseIndex::SetBestBlockIndex(const CBlockIndex *block) { + assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune()); + + if (AllowPrune() && block) { + node::PruneLockInfo prune_lock; + prune_lock.height_first = block->nHeight; + WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock( + GetName(), prune_lock)); + } + + // Intentionally set m_best_block_index as the last step in this function, + // after updating prune locks above, and after making any other references + // to *this, so the BlockUntilSyncedToCurrentChain function (which checks + // m_best_block_index as an optimization) can be used to wait for the last + // BlockConnected notification and safely assume that prune locks are + // updated and that the index object is safe to delete. + m_best_block_index = block; +} diff --git a/src/index/base.h b/src/index/base.h index 999a60757..b5900fadb 100644 --- a/src/index/base.h +++ b/src/index/base.h @@ -1,135 +1,140 @@ // Copyright (c) 2017-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_INDEX_BASE_H #define BITCOIN_INDEX_BASE_H #include #include #include class CBlock; class CBlockIndex; class Chainstate; struct IndexSummary { std::string name; bool synced{false}; int best_block_height{0}; }; /** * Base class for indices of blockchain data. This implements * CValidationInterface and ensures blocks are indexed sequentially according * to their position in the active chain. */ class BaseIndex : public CValidationInterface { protected: /** * The database stores a block locator of the chain the database is synced * to so that the TxIndex can efficiently determine the point it last * stopped at. A locator is used instead of a simple hash of the chain tip * because blocks and block index entries may not be flushed to disk until * after this database is updated. */ class DB : public CDBWrapper { public: DB(const fs::path &path, size_t n_cache_size, bool f_memory = false, bool f_wipe = false, bool f_obfuscate = false); /// Read block locator of the chain that the index is in sync with. bool ReadBestBlock(CBlockLocator &locator) const; /// Write block locator of the chain that the index is in sync with. void WriteBestBlock(CDBBatch &batch, const CBlockLocator &locator); }; private: /// Whether the index is in sync with the main chain. The flag is flipped /// from false to true once, after which point this starts processing /// ValidationInterface notifications to stay in sync. std::atomic m_synced{false}; /// The last block in the chain that the index is in sync with. std::atomic m_best_block_index{nullptr}; std::thread m_thread_sync; CThreadInterrupt m_interrupt; /// Sync the index with the block index starting from the current best /// block. Intended to be run in its own thread, m_thread_sync, and can be /// interrupted with m_interrupt. Once the index gets in sync, the m_synced /// flag is set and the BlockConnected ValidationInterface callback takes /// over and the sync thread exits. void ThreadSync(); /// Write the current index state (eg. chain block locator and /// subclass-specific items) to disk. /// /// Recommendations for error handling: /// If called on a successor of the previous committed best block in the /// index, the index can continue processing without risk of corruption, /// though the index state will need to catch up from further behind on /// reboot. If the new state is not a successor of the previous state (due /// to a chain reorganization), the index must halt until Commit succeeds or /// else it could end up getting corrupted. bool Commit(); + virtual bool AllowPrune() const = 0; + protected: Chainstate *m_chainstate{nullptr}; void BlockConnected(const std::shared_ptr &block, const CBlockIndex *pindex) override; void ChainStateFlushed(const CBlockLocator &locator) override; const CBlockIndex *CurrentIndex() { return m_best_block_index.load(); }; /// Initialize internal state from the database and block index. [[nodiscard]] virtual bool Init(); /// Write update index entries for a newly connected block. virtual bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) { return true; } /// Virtual method called internally by Commit that can be overridden to /// atomically commit more index state. virtual bool CommitInternal(CDBBatch &batch); /// Rewind index to an earlier chain tip during a chain reorg. The tip must /// be an ancestor of the current best block. virtual bool Rewind(const CBlockIndex *current_tip, const CBlockIndex *new_tip); virtual DB &GetDB() const = 0; /// Get the name of the index for display in logs. virtual const char *GetName() const = 0; + /// Update the internal best block index as well as the prune lock. + void SetBestBlockIndex(const CBlockIndex *block); + public: /// Destructor interrupts sync thread if running and blocks until it exits. virtual ~BaseIndex(); /// Blocks the current thread until the index is caught up to the current /// state of the block chain. This only blocks if the index has gotten in /// sync once and only needs to process blocks in the ValidationInterface /// queue. If the index is catching up from far behind, this method does /// not block and immediately returns false. bool BlockUntilSyncedToCurrentChain() const LOCKS_EXCLUDED(::cs_main); void Interrupt(); /// Start initializes the sync state and registers the instance as a /// ValidationInterface so that it stays in sync with blockchain updates. [[nodiscard]] bool Start(Chainstate &active_chainstate); /// Stops the instance from staying in sync with blockchain updates. void Stop(); /// Get a summary of the index and its state. IndexSummary GetSummary() const; }; #endif // BITCOIN_INDEX_BASE_H diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h index d6a89769f..585b15fb3 100644 --- a/src/index/blockfilterindex.h +++ b/src/index/blockfilterindex.h @@ -1,112 +1,114 @@ // Copyright (c) 2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_INDEX_BLOCKFILTERINDEX_H #define BITCOIN_INDEX_BLOCKFILTERINDEX_H #include #include #include #include #include static const char *const DEFAULT_BLOCKFILTERINDEX = "0"; /** Interval between compact filter checkpoints. See BIP 157. */ static constexpr int CFCHECKPT_INTERVAL = 1000; /** * BlockFilterIndex is used to store and retrieve block filters, hashes, and * headers for a range of blocks by height. An index is constructed for each * supported filter type with its own database (ie. filter data for different * types are stored in separate databases). * * This index is used to serve BIP 157 net requests. */ class BlockFilterIndex final : public BaseIndex { private: BlockFilterType m_filter_type; std::string m_name; std::unique_ptr m_db; FlatFilePos m_next_filter_pos; std::unique_ptr m_filter_fileseq; bool ReadFilterFromDisk(const FlatFilePos &pos, BlockFilter &filter) const; size_t WriteFilterToDisk(FlatFilePos &pos, const BlockFilter &filter); Mutex m_cs_headers_cache; /** * Cache of block hash to filter header, to avoid disk access when * responding to getcfcheckpt. */ std::unordered_map m_headers_cache GUARDED_BY(m_cs_headers_cache); + bool AllowPrune() const override { return true; } + protected: bool Init() override; bool CommitInternal(CDBBatch &batch) override; bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) override; bool Rewind(const CBlockIndex *current_tip, const CBlockIndex *new_tip) override; BaseIndex::DB &GetDB() const override { return *m_db; } const char *GetName() const override { return m_name.c_str(); } public: /** Constructs the index, which becomes available to be queried. */ explicit BlockFilterIndex(BlockFilterType filter_type, size_t n_cache_size, bool f_memory = false, bool f_wipe = false); BlockFilterType GetFilterType() const { return m_filter_type; } /** Get a single filter by block. */ bool LookupFilter(const CBlockIndex *block_index, BlockFilter &filter_out) const; /** Get a single filter header by block. */ bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache); /** Get a range of filters between two heights on a chain. */ bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector &filters_out) const; /** Get a range of filter hashes between two heights on a chain. */ bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector &hashes_out) const; }; /** * Get a block filter index by type. Returns nullptr if index has not been * initialized or was already destroyed. */ BlockFilterIndex *GetBlockFilterIndex(BlockFilterType filter_type); /** Iterate over all running block filter indexes, invoking fn on each. */ void ForEachBlockFilterIndex(std::function fn); /** * Initialize a block filter index for the given type if one does not already * exist. Returns true if a new index is created and false if one has already * been initialized. */ bool InitBlockFilterIndex(BlockFilterType filter_type, size_t n_cache_size, bool f_memory = false, bool f_wipe = false); /** * Destroy the block filter index with the given type. Returns false if no such * index exists. This just releases the allocated memory and closes the database * connection, it does not delete the index data. */ bool DestroyBlockFilterIndex(BlockFilterType filter_type); /** Destroy all open block filter indexes. */ void DestroyAllBlockFilterIndexes(); #endif // BITCOIN_INDEX_BLOCKFILTERINDEX_H diff --git a/src/index/coinstatsindex.h b/src/index/coinstatsindex.h index 0ad1c8d25..53a1c9236 100644 --- a/src/index/coinstatsindex.h +++ b/src/index/coinstatsindex.h @@ -1,69 +1,71 @@ // Copyright (c) 2020-2021 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_INDEX_COINSTATSINDEX_H #define BITCOIN_INDEX_COINSTATSINDEX_H #include #include #include #include #include struct Amount; static constexpr bool DEFAULT_COINSTATSINDEX{false}; /** * CoinStatsIndex maintains statistics on the UTXO set. */ class CoinStatsIndex final : public BaseIndex { private: std::string m_name; std::unique_ptr m_db; MuHash3072 m_muhash; uint64_t m_transaction_output_count{0}; uint64_t m_bogo_size{0}; Amount m_total_amount{Amount::zero()}; Amount m_total_subsidy{Amount::zero()}; Amount m_total_unspendable_amount{Amount::zero()}; Amount m_total_prevout_spent_amount{Amount::zero()}; Amount m_total_new_outputs_ex_coinbase_amount{Amount::zero()}; Amount m_total_coinbase_amount{Amount::zero()}; Amount m_total_unspendables_genesis_block{Amount::zero()}; Amount m_total_unspendables_bip30{Amount::zero()}; Amount m_total_unspendables_scripts{Amount::zero()}; Amount m_total_unspendables_unclaimed_rewards{Amount::zero()}; bool ReverseBlock(const CBlock &block, const CBlockIndex *pindex); + bool AllowPrune() const override { return true; } + protected: bool Init() override; bool CommitInternal(CDBBatch &batch) override; bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) override; bool Rewind(const CBlockIndex *current_tip, const CBlockIndex *new_tip) override; BaseIndex::DB &GetDB() const override { return *m_db; } const char *GetName() const override { return "coinstatsindex"; } public: // Constructs the index, which becomes available to be queried. explicit CoinStatsIndex(size_t n_cache_size, bool f_memory = false, bool f_wipe = false); // Look up stats for a specific block using CBlockIndex std::optional LookUpStats(const CBlockIndex *block_index) const; }; /// The global UTXO set hash object. extern std::unique_ptr g_coin_stats_index; #endif // BITCOIN_INDEX_COINSTATSINDEX_H diff --git a/src/index/txindex.h b/src/index/txindex.h index 017a0d599..f7c641405 100644 --- a/src/index/txindex.h +++ b/src/index/txindex.h @@ -1,59 +1,61 @@ // Copyright (c) 2017-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_INDEX_TXINDEX_H #define BITCOIN_INDEX_TXINDEX_H #include #include struct BlockHash; struct TxId; static constexpr bool DEFAULT_TXINDEX{false}; /** * TxIndex is used to look up transactions included in the blockchain by ID. * The index is written to a LevelDB database and records the filesystem * location of each transaction by transaction ID. */ class TxIndex final : public BaseIndex { protected: class DB; private: const std::unique_ptr m_db; + bool AllowPrune() const override { return false; } + protected: bool WriteBlock(const CBlock &block, const CBlockIndex *pindex) override; BaseIndex::DB &GetDB() const override; const char *GetName() const override { return "txindex"; } public: /// Constructs the index, which becomes available to be queried. explicit TxIndex(size_t n_cache_size, bool f_memory = false, bool f_wipe = false); // Destructor is declared because this class contains a unique_ptr to an // incomplete type. virtual ~TxIndex() override; /// Look up a transaction by identifier. /// /// @param[in] txid The ID of the transaction to be returned. /// @param[out] block_hash The hash of the block the transaction is found /// in. /// @param[out] tx The transaction itself. /// @return true if transaction is found, false otherwise bool FindTx(const TxId &txid, BlockHash &block_hash, CTransactionRef &tx) const; }; /// The global transaction index, used in GetTransaction. May be null. extern std::unique_ptr g_txindex; #endif // BITCOIN_INDEX_TXINDEX_H diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index d2b5137fc..e8274306b 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -1,999 +1,1005 @@ // Copyright (c) 2011-2022 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace node { std::atomic_bool fReindex(false); static FILE *OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false); static FlatFileSeq BlockFileSeq(); static FlatFileSeq UndoFileSeq(); std::vector BlockManager::GetAllBlockIndices() { AssertLockHeld(cs_main); std::vector rv; rv.reserve(m_block_index.size()); for (auto &[_, block_index] : m_block_index) { rv.push_back(&block_index); } return rv; } CBlockIndex *BlockManager::LookupBlockIndex(const BlockHash &hash) { AssertLockHeld(cs_main); BlockMap::iterator it = m_block_index.find(hash); return it == m_block_index.end() ? nullptr : &it->second; } const CBlockIndex *BlockManager::LookupBlockIndex(const BlockHash &hash) const { AssertLockHeld(cs_main); BlockMap::const_iterator it = m_block_index.find(hash); return it == m_block_index.end() ? nullptr : &it->second; } CBlockIndex *BlockManager::AddToBlockIndex(const CBlockHeader &block, CBlockIndex *&best_header) { AssertLockHeld(cs_main); const auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block); if (!inserted) { return &mi->second; } CBlockIndex *pindexNew = &(*mi).second; // We assign the sequence id to blocks only when the full data is available, // to avoid miners withholding blocks but broadcasting headers, to get a // competitive advantage. pindexNew->nSequenceId = 0; pindexNew->phashBlock = &((*mi).first); BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock); if (miPrev != m_block_index.end()) { pindexNew->pprev = &(*miPrev).second; pindexNew->nHeight = pindexNew->pprev->nHeight + 1; pindexNew->BuildSkip(); } pindexNew->nTimeReceived = GetTime(); pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime); pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); pindexNew->RaiseValidity(BlockValidity::TREE); if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) { best_header = pindexNew; } m_dirty_blockindex.insert(pindexNew); return pindexNew; } void BlockManager::PruneOneBlockFile(const int fileNumber) { AssertLockHeld(cs_main); LOCK(cs_LastBlockFile); for (auto &entry : m_block_index) { CBlockIndex *pindex = &entry.second; if (pindex->nFile == fileNumber) { pindex->nStatus = pindex->nStatus.withData(false).withUndo(false); pindex->nFile = 0; pindex->nDataPos = 0; pindex->nUndoPos = 0; m_dirty_blockindex.insert(pindex); // Prune from m_blocks_unlinked -- any block we prune would have // to be downloaded again in order to consider its chain, at which // point it would be considered as a candidate for // m_blocks_unlinked or setBlockIndexCandidates. auto range = m_blocks_unlinked.equal_range(pindex->pprev); while (range.first != range.second) { std::multimap::iterator _it = range.first; range.first++; if (_it->second == pindex) { m_blocks_unlinked.erase(_it); } } } } m_blockfile_info[fileNumber].SetNull(); m_dirty_fileinfo.insert(fileNumber); } void BlockManager::FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight, int chain_tip_height) { assert(IsPruneMode() && nManualPruneHeight > 0); LOCK2(cs_main, cs_LastBlockFile); if (chain_tip_height < 0) { return; } // last block to prune is the lesser of (user-specified height, // MIN_BLOCKS_TO_KEEP from the tip) unsigned int nLastBlockWeCanPrune{std::min( (unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP)}; int count = 0; for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { if (m_blockfile_info[fileNumber].nSize == 0 || m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); setFilesToPrune.insert(fileNumber); count++; } LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count); } void BlockManager::FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd) { LOCK2(cs_main, cs_LastBlockFile); if (chain_tip_height < 0 || GetPruneTarget() == 0) { return; } if (uint64_t(chain_tip_height) <= nPruneAfterHeight) { return; } unsigned int nLastBlockWeCanPrune = std::min( prune_height, chain_tip_height - static_cast(MIN_BLOCKS_TO_KEEP)); uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files, // so we should leave a buffer under our target to account for another // allocation before the next pruning. uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; uint64_t nBytesToPrune; int count = 0; if (nCurrentUsage + nBuffer >= GetPruneTarget()) { // On a prune event, the chainstate DB is flushed. // To avoid excessive prune events negating the benefit of high dbcache // values, we should not prune too rapidly. // So when pruning in IBD, increase the buffer a bit to avoid a re-prune // too soon. if (is_ibd) { // Since this is only relevant during IBD, we use a fixed 10% nBuffer += GetPruneTarget() / 10; } for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { nBytesToPrune = m_blockfile_info[fileNumber].nSize + m_blockfile_info[fileNumber].nUndoSize; if (m_blockfile_info[fileNumber].nSize == 0) { continue; } // are we below our target? if (nCurrentUsage + nBuffer < GetPruneTarget()) { break; } // don't prune files that could have a block within // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning if (m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) { continue; } PruneOneBlockFile(fileNumber); // Queue up the files for removal setFilesToPrune.insert(fileNumber); nCurrentUsage -= nBytesToPrune; count++; } } LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB " "max_prune_height=%d removed %d blk/rev pairs\n", GetPruneTarget() / 1024 / 1024, nCurrentUsage / 1024 / 1024, (int64_t(GetPruneTarget()) - int64_t(nCurrentUsage)) / 1024 / 1024, nLastBlockWeCanPrune, count); } +void BlockManager::UpdatePruneLock(const std::string &name, + const PruneLockInfo &lock_info) { + AssertLockHeld(::cs_main); + m_prune_locks[name] = lock_info; +} + CBlockIndex *BlockManager::InsertBlockIndex(const BlockHash &hash) { AssertLockHeld(cs_main); if (hash.IsNull()) { return nullptr; } const auto [mi, inserted] = m_block_index.try_emplace(hash); CBlockIndex *pindex = &(*mi).second; if (inserted) { pindex->phashBlock = &((*mi).first); } return pindex; } bool BlockManager::LoadBlockIndex() { AssertLockHeld(cs_main); if (!m_block_tree_db->LoadBlockIndexGuts( GetConsensus(), [this](const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) { return false; } // Calculate nChainWork std::vector vSortedByHeight{GetAllBlockIndices()}; std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), CBlockIndexHeightOnlyComparator()); for (CBlockIndex *pindex : vSortedByHeight) { if (ShutdownRequested()) { return false; } pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime); // We can link the chain of blocks for which we've received // transactions at some point, or blocks that are assumed-valid on the // basis of snapshot load (see PopulateAndValidateSnapshot()). // Pruned nodes may have deleted the block. if (pindex->nTx > 0) { if (!pindex->UpdateChainStats() && pindex->pprev) { m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex)); } } if (!pindex->nStatus.hasFailed() && pindex->pprev && pindex->pprev->nStatus.hasFailed()) { pindex->nStatus = pindex->nStatus.withFailedParent(); m_dirty_blockindex.insert(pindex); } if (pindex->pprev) { pindex->BuildSkip(); } } return true; } bool BlockManager::WriteBlockIndexDB() { std::vector> vFiles; vFiles.reserve(m_dirty_fileinfo.size()); for (int i : m_dirty_fileinfo) { vFiles.push_back(std::make_pair(i, &m_blockfile_info[i])); } m_dirty_fileinfo.clear(); std::vector vBlocks; vBlocks.reserve(m_dirty_blockindex.size()); for (const CBlockIndex *cbi : m_dirty_blockindex) { vBlocks.push_back(cbi); } m_dirty_blockindex.clear(); if (!m_block_tree_db->WriteBatchSync(vFiles, m_last_blockfile, vBlocks)) { return false; } return true; } bool BlockManager::LoadBlockIndexDB() { if (!LoadBlockIndex()) { return false; } // Load block file info m_block_tree_db->ReadLastBlockFile(m_last_blockfile); m_blockfile_info.resize(m_last_blockfile + 1); LogPrintf("%s: last block file = %i\n", __func__, m_last_blockfile); for (int nFile = 0; nFile <= m_last_blockfile; nFile++) { m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]); } LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[m_last_blockfile].ToString()); for (int nFile = m_last_blockfile + 1; true; nFile++) { CBlockFileInfo info; if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) { m_blockfile_info.push_back(info); } else { break; } } // Check presence of blk files LogPrintf("Checking all blk files are present...\n"); std::set setBlkDataFiles; for (const auto &[_, block_index] : m_block_index) { if (block_index.nStatus.hasData()) { setBlkDataFiles.insert(block_index.nFile); } } for (const int i : setBlkDataFiles) { FlatFilePos pos(i, 0); if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION) .IsNull()) { return false; } } // Check whether we have ever pruned block & undo files m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned); if (m_have_pruned) { LogPrintf( "LoadBlockIndexDB(): Block files have previously been pruned\n"); } // Check whether we need to continue reindexing if (m_block_tree_db->IsReindexing()) { fReindex = true; } return true; } const CBlockIndex * BlockManager::GetLastCheckpoint(const CCheckpointData &data) { const MapCheckpoints &checkpoints = data.mapCheckpoints; for (const MapCheckpoints::value_type &i : reverse_iterate(checkpoints)) { const BlockHash &hash = i.second; const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { return pindex; } } return nullptr; } bool BlockManager::IsBlockPruned(const CBlockIndex *pblockindex) { AssertLockHeld(::cs_main); return (m_have_pruned && !pblockindex->nStatus.hasData() && pblockindex->nTx > 0); } const CBlockIndex *GetFirstStoredBlock(const CBlockIndex *start_block) { AssertLockHeld(::cs_main); assert(start_block); const CBlockIndex *last_block = start_block; while (last_block->pprev && (last_block->pprev->nStatus.hasData())) { last_block = last_block->pprev; } return last_block; } // If we're using -prune with -reindex, then delete block files that will be // ignored by the reindex. Since reindexing works by starting at block file 0 // and looping until a blockfile is missing, do the same here to delete any // later block files after a gap. Also delete all rev files since they'll be // rewritten by the reindex anyway. This ensures that m_blockfile_info is in // sync with what's actually on disk by the time we start downloading, so that // pruning works correctly. void CleanupBlockRevFiles() { std::map mapBlockFiles; // Glob all blk?????.dat and rev?????.dat files from the blocks directory. // Remove the rev files immediately and insert the blk file paths into an // ordered map keyed by block file index. LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for " "-reindex with -prune\n"); for (const auto &file : fs::directory_iterator{gArgs.GetBlocksDirPath()}) { const std::string path = fs::PathToString(file.path().filename()); if (fs::is_regular_file(file) && path.length() == 12 && path.substr(8, 4) == ".dat") { if (path.substr(0, 3) == "blk") { mapBlockFiles[path.substr(3, 5)] = file.path(); } else if (path.substr(0, 3) == "rev") { remove(file.path()); } } } // Remove all block files that aren't part of a contiguous set starting at // zero by walking the ordered map (keys are block file indices) by keeping // a separate counter. Once we hit a gap (or if 0 doesn't exist) start // removing block files. int contiguousCounter = 0; for (const auto &item : mapBlockFiles) { if (atoi(item.first) == contiguousCounter) { contiguousCounter++; continue; } remove(item.second); } } CBlockFileInfo *BlockManager::GetBlockFileInfo(size_t n) { LOCK(cs_LastBlockFile); return &m_blockfile_info.at(n); } static bool UndoWriteToDisk(const CBlockUndo &blockundo, FlatFilePos &pos, const BlockHash &hashBlock, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Write index header unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion()); fileout << messageStart << nSize; // Write undo data long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { return error("%s: ftell failed", __func__); } pos.nPos = (unsigned int)fileOutPos; fileout << blockundo; // calculate & write checksum CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); hasher << hashBlock; hasher << blockundo; fileout << hasher.GetHash(); return true; } bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex) { const FlatFilePos pos{WITH_LOCK(::cs_main, return pindex->GetUndoPos())}; if (pos.IsNull()) { return error("%s: no undo data available", __func__); } // Open history file to read CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("%s: OpenUndoFile failed", __func__); } // Read block uint256 hashChecksum; // We need a CHashVerifier as reserializing may lose data CHashVerifier verifier(&filein); try { verifier << pindex->pprev->GetBlockHash(); verifier >> blockundo; filein >> hashChecksum; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s", __func__, e.what()); } // Verify checksum if (hashChecksum != verifier.GetHash()) { return error("%s: Checksum mismatch", __func__); } return true; } void BlockManager::FlushUndoFile(int block_file, bool finalize) { FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize); if (!UndoFileSeq().Flush(undo_pos_old, finalize)) { AbortNode("Flushing undo file to disk failed. This is likely the " "result of an I/O error."); } } void BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo) { LOCK(cs_LastBlockFile); if (m_blockfile_info.empty()) { // Return if we haven't loaded any blockfiles yet. This happens during // chainstate init, when we call // ChainstateManager::MaybeRebalanceCaches() (which then calls // FlushStateToDisk()), resulting in a call to this function before we // have populated `m_blockfile_info` via LoadBlockIndexDB(). return; } assert(static_cast(m_blockfile_info.size()) > m_last_blockfile); FlatFilePos block_pos_old(m_last_blockfile, m_blockfile_info[m_last_blockfile].nSize); if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) { AbortNode("Flushing block file to disk failed. This is likely the " "result of an I/O error."); } // we do not always flush the undo file, as the chain tip may be lagging // behind the incoming blocks, // e.g. during IBD or a sync after a node going offline if (!fFinalize || finalize_undo) { FlushUndoFile(m_last_blockfile, finalize_undo); } } uint64_t BlockManager::CalculateCurrentUsage() { LOCK(cs_LastBlockFile); uint64_t retval = 0; for (const CBlockFileInfo &file : m_blockfile_info) { retval += file.nSize + file.nUndoSize; } return retval; } void UnlinkPrunedFiles(const std::set &setFilesToPrune) { for (const int i : setFilesToPrune) { FlatFilePos pos(i, 0); fs::remove(BlockFileSeq().FileName(pos)); fs::remove(UndoFileSeq().FileName(pos)); LogPrint(BCLog::BLOCKSTORE, "Prune: %s deleted blk/rev (%05u)\n", __func__, i); } } static FlatFileSeq BlockFileSeq() { return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE); } static FlatFileSeq UndoFileSeq() { return FlatFileSeq(gArgs.GetBlocksDirPath(), "rev", UNDOFILE_CHUNK_SIZE); } FILE *OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) { return BlockFileSeq().Open(pos, fReadOnly); } /** Open an undo file (rev?????.dat) */ static FILE *OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) { return UndoFileSeq().Open(pos, fReadOnly); } fs::path GetBlockPosFilename(const FlatFilePos &pos) { return BlockFileSeq().FileName(pos); } bool BlockManager::FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, CChain &active_chain, uint64_t nTime, bool fKnown) { LOCK(cs_LastBlockFile); unsigned int nFile = fKnown ? pos.nFile : m_last_blockfile; if (m_blockfile_info.size() <= nFile) { m_blockfile_info.resize(nFile + 1); } bool finalize_undo = false; if (!fKnown) { unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE}; // Use smaller blockfiles in test-only -fastprune mode - but avoid // the possibility of having a block not fit into the block file. if (gArgs.GetBoolArg("-fastprune", false)) { max_blockfile_size = 0x10000; // 64kiB if (nAddSize >= max_blockfile_size) { // dynamically adjust the blockfile size to be larger than the // added size max_blockfile_size = nAddSize + 1; } } // TODO: we will also need to dynamically adjust the blockfile size // or raise MAX_BLOCKFILE_SIZE when we reach block sizes larger than // 128 MiB assert(nAddSize < max_blockfile_size); while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { // when the undo file is keeping up with the block file, we want to // flush it explicitly when it is lagging behind (more blocks arrive // than are being connected), we let the undo block write case // handle it finalize_undo = (m_blockfile_info[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight); nFile++; if (m_blockfile_info.size() <= nFile) { m_blockfile_info.resize(nFile + 1); } } pos.nFile = nFile; pos.nPos = m_blockfile_info[nFile].nSize; } if ((int)nFile != m_last_blockfile) { if (!fKnown) { LogPrint(BCLog::BLOCKSTORE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString()); } FlushBlockFile(!fKnown, finalize_undo); m_last_blockfile = nFile; } m_blockfile_info[nFile].AddBlock(nHeight, nTime); if (fKnown) { m_blockfile_info[nFile].nSize = std::max(pos.nPos + nAddSize, m_blockfile_info[nFile].nSize); } else { m_blockfile_info[nFile].nSize += nAddSize; } if (!fKnown) { bool out_of_space; size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); if (out_of_space) { return AbortNode("Disk space is too low!", _("Disk space is too low!")); } if (bytes_allocated != 0 && IsPruneMode()) { m_check_for_pruning = true; } } m_dirty_fileinfo.insert(nFile); return true; } bool BlockManager::FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize) { pos.nFile = nFile; LOCK(cs_LastBlockFile); pos.nPos = m_blockfile_info[nFile].nUndoSize; m_blockfile_info[nFile].nUndoSize += nAddSize; m_dirty_fileinfo.insert(nFile); bool out_of_space; size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space); if (out_of_space) { return AbortNode(state, "Disk space is too low!", _("Disk space is too low!")); } if (bytes_allocated != 0 && IsPruneMode()) { m_check_for_pruning = true; } return true; } static bool WriteBlockToDisk(const CBlock &block, FlatFilePos &pos, const CMessageHeader::MessageMagic &messageStart) { // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { return error("WriteBlockToDisk: OpenBlockFile failed"); } // Write index header unsigned int nSize = GetSerializeSize(block, fileout.GetVersion()); fileout << messageStart << nSize; // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { return error("WriteBlockToDisk: ftell failed"); } pos.nPos = (unsigned int)fileOutPos; fileout << block; return true; } bool BlockManager::WriteUndoDataForBlock(const CBlockUndo &blockundo, BlockValidationState &state, CBlockIndex &block) { AssertLockHeld(::cs_main); // Write undo information to disk if (block.GetUndoPos().IsNull()) { FlatFilePos _pos; if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) { return error("ConnectBlock(): FindUndoPos failed"); } if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash(), GetParams().DiskMagic())) { return AbortNode(state, "Failed to write undo data"); } // rev files are written in block height order, whereas blk files are // written as blocks come in (often out of order) we want to flush the // rev (undo) file once we've written the last block, which is indicated // by the last height in the block file info as below; note that this // does not catch the case where the undo writes are keeping up with the // block writes (usually when a synced up node is getting newly mined // blocks) -- this case is caught in the FindBlockPos function if (_pos.nFile < m_last_blockfile && static_cast(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { FlushUndoFile(_pos.nFile, true); } // update nUndoPos in block index block.nUndoPos = _pos.nPos; block.nStatus = block.nStatus.withUndo(); m_dirty_blockindex.insert(&block); } return true; } bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params ¶ms) { block.SetNull(); // Open history file to read CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); } // Read block try { filein >> block; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); } // Check the header if (!CheckProofOfWork(block.GetHash(), block.nBits, params)) { return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); } return true; } bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex, const Consensus::Params ¶ms) { const FlatFilePos block_pos{ WITH_LOCK(cs_main, return pindex->GetBlockPos())}; if (!ReadBlockFromDisk(block, block_pos, params)) { return false; } if (block.GetHash() != pindex->GetBlockHash()) { return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() " "doesn't match index for %s at %s", pindex->ToString(), block_pos.ToString()); } return true; } bool ReadTxFromDisk(CMutableTransaction &tx, const FlatFilePos &pos) { // Open history file to read CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("ReadTxFromDisk: OpenBlockFile failed for %s", pos.ToString()); } // Read tx try { filein >> tx; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); } return true; } bool ReadTxUndoFromDisk(CTxUndo &tx_undo, const FlatFilePos &pos) { // Open undo file to read CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { return error("ReadTxUndoFromDisk: OpenUndoFile failed for %s", pos.ToString()); } // Read undo data try { filein >> tx_undo; } catch (const std::exception &e) { return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); } return true; } FlatFilePos BlockManager::SaveBlockToDisk(const CBlock &block, int nHeight, CChain &active_chain, const FlatFilePos *dbp) { unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION); FlatFilePos blockPos; const auto position_known{dbp != nullptr}; if (position_known) { blockPos = *dbp; } else { // When known, blockPos.nPos points at the offset of the block data in // the blk file. That already accounts for the serialization header // present in the file (the 4 magic message start bytes + the 4 length // bytes = 8 bytes = BLOCK_SERIALIZATION_HEADER_SIZE). We add // BLOCK_SERIALIZATION_HEADER_SIZE only for new blocks since they will // have the serialization header added when written to disk. nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); } if (!FindBlockPos(blockPos, nBlockSize, nHeight, active_chain, block.GetBlockTime(), position_known)) { error("%s: FindBlockPos failed", __func__); return FlatFilePos(); } if (!position_known) { if (!WriteBlockToDisk(block, blockPos, GetParams().DiskMagic())) { AbortNode("Failed to write block"); return FlatFilePos(); } } return blockPos; } class ImportingNow { std::atomic &m_importing; public: ImportingNow(std::atomic &importing) : m_importing{importing} { assert(m_importing == false); m_importing = true; } ~ImportingNow() { assert(m_importing == true); m_importing = false; } }; void ThreadImport(ChainstateManager &chainman, std::vector vImportFiles, const ArgsManager &args, const fs::path &mempool_path) { ScheduleBatchPriority(); { ImportingNow imp{chainman.m_blockman.m_importing}; // -reindex if (fReindex) { int nFile = 0; // Map of disk positions for blocks with unknown parent (only used // for reindex); parent hash -> child disk position, multiple // children can have the same parent. std::multimap blocks_with_unknown_parent; while (true) { FlatFilePos pos(nFile, 0); if (!fs::exists(GetBlockPosFilename(pos))) { // No block files left to reindex break; } FILE *file = OpenBlockFile(pos, true); if (!file) { // This error is logged in OpenBlockFile break; } LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); chainman.ActiveChainstate().LoadExternalBlockFile( file, &pos, &blocks_with_unknown_parent); if (ShutdownRequested()) { LogPrintf("Shutdown requested. Exit %s\n", __func__); return; } nFile++; } WITH_LOCK( ::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false)); fReindex = false; LogPrintf("Reindexing finished\n"); // To avoid ending up in a situation without genesis block, re-try // initializing (no-op if reindexing worked): chainman.ActiveChainstate().LoadGenesisBlock(); } // -loadblock= for (const fs::path &path : vImportFiles) { FILE *file = fsbridge::fopen(path, "rb"); if (file) { LogPrintf("Importing blocks file %s...\n", fs::PathToString(path)); chainman.ActiveChainstate().LoadExternalBlockFile(file); if (ShutdownRequested()) { LogPrintf("Shutdown requested. Exit %s\n", __func__); return; } } else { LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path)); } } // Reconsider blocks we know are valid. They may have been marked // invalid by, for instance, running an outdated version of the node // software. const MapCheckpoints &checkpoints = chainman.GetParams().Checkpoints().mapCheckpoints; for (const MapCheckpoints::value_type &i : checkpoints) { const BlockHash &hash = i.second; LOCK(cs_main); CBlockIndex *pblockindex = chainman.m_blockman.LookupBlockIndex(hash); if (pblockindex && !pblockindex->nStatus.isValid()) { LogPrintf("Reconsidering checkpointed block %s ...\n", hash.GetHex()); chainman.ActiveChainstate().ResetBlockFailureFlags(pblockindex); } if (pblockindex && pblockindex->nStatus.isOnParkedChain()) { LogPrintf("Unparking checkpointed block %s ...\n", hash.GetHex()); chainman.ActiveChainstate().UnparkBlockAndChildren(pblockindex); } } // scan for better chains in the block chain database, that are not yet // connected in the active best chain // We can't hold cs_main during ActivateBestChain even though we're // accessing the chainman unique_ptrs since ABC requires us not to be // holding cs_main, so retrieve the relevant pointers before the ABC // call. for (Chainstate *chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) { BlockValidationState state; if (!chainstate->ActivateBestChain(state, nullptr)) { LogPrintf("Failed to connect best block (%s)\n", state.ToString()); StartShutdown(); return; } } if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { LogPrintf("Stopping after block import\n"); StartShutdown(); return; } } // End scope of ImportingNow chainman.ActiveChainstate().LoadMempool(mempool_path); } } // namespace node diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 57979d280..25f5a3273 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -1,275 +1,280 @@ // Copyright (c) 2011-2021 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NODE_BLOCKSTORAGE_H #define BITCOIN_NODE_BLOCKSTORAGE_H #include #include #include #include #include #include #include #include #include // For CMessageHeader::MessageStartChars #include #include class ArgsManager; class BlockValidationState; class CBlock; class CBlockFileInfo; class CBlockHeader; class CBlockUndo; class CChain; class CChainParams; class CTxUndo; class Chainstate; class ChainstateManager; struct CCheckpointData; class Config; struct FlatFilePos; namespace Consensus { struct Params; } namespace node { static constexpr bool DEFAULT_STOPAFTERBLOCKIMPORT{false}; /** The pre-allocation chunk size for blk?????.dat files (since 0.8) */ static constexpr unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB /** The pre-allocation chunk size for rev?????.dat files (since 0.8) */ static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB /** The maximum size of a blk?????.dat file (since 0.8) */ static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB /** Size of header written by WriteBlockToDisk before a serialized CBlock */ static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE = CMessageHeader::MESSAGE_START_SIZE + sizeof(unsigned int); extern std::atomic_bool fReindex; // Because validation code takes pointers to the map's CBlockIndex objects, if // we ever switch to another associative container, we need to either use a // container that has stable addressing (true of all std associative // containers), or make the key a `std::unique_ptr` using BlockMap = std::unordered_map; struct PruneLockInfo { //! Height of earliest block that should be kept and not pruned int height_first{std::numeric_limits::max()}; }; /** * Maintains a tree of blocks (stored in `m_block_index`) which is consulted * to determine where the most-work tip is. * * This data is used mostly in `Chainstate` - information about, e.g., * candidate tips is not maintained here. */ class BlockManager { friend Chainstate; friend ChainstateManager; private: const CChainParams &GetParams() const { return m_opts.chainparams; } const Consensus::Params &GetConsensus() const { return m_opts.chainparams.GetConsensus(); } /** * Load the blocktree off disk and into memory. Populate certain metadata * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as * peripheral collections like m_dirty_blockindex. */ bool LoadBlockIndex() EXCLUSIVE_LOCKS_REQUIRED(cs_main); void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false); void FlushUndoFile(int block_file, bool finalize = false); bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, CChain &active_chain, uint64_t nTime, bool fKnown); bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize); /** * Calculate the block/rev files to delete based on height specified * by user with RPC command pruneblockchain */ void FindFilesToPruneManual(std::set &setFilesToPrune, int nManualPruneHeight, int chain_tip_height); /** * Prune block and undo files (blk???.dat and undo???.dat) so that the disk * space used is less than a user-defined target. The user sets the target * (in MB) on the command line or in config file. This will be run on * startup and whenever new space is allocated in a block or undo file, * staying below the target. Changing back to unpruned requires a reindex * (which in this case means the blockchain must be re-downloaded.) * * Pruning functions are called from FlushStateToDisk when the * m_check_for_pruning flag has been set. Block and undo files are deleted * in lock-step (when blk00003.dat is deleted, so is rev00003.dat.) Pruning * cannot take place until the longest chain is at least a certain length * (CChainParams::nPruneAfterHeight). Pruning will never delete a block * within a defined distance (currently 288) from the active chain's tip. * The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any * blocks that were stored in the deleted files. A db flag records the fact * that at least some block files have been pruned. * * @param[out] setFilesToPrune The set of file indices that can be * unlinked will be returned */ void FindFilesToPrune(std::set &setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd); RecursiveMutex cs_LastBlockFile; std::vector m_blockfile_info; int m_last_blockfile = 0; /** * Global flag to indicate we should check to see if there are * block/undo files that should be deleted. Set on startup * or if we allocate more file space when we're in prune mode */ bool m_check_for_pruning = false; const bool m_prune_mode; /** Dirty block index entries. */ std::set m_dirty_blockindex; /** Dirty block file entries. */ std::set m_dirty_fileinfo; /** * Map from external index name to oldest block that must not be pruned. * * @note Internally, only blocks at height * (height_first - PRUNE_LOCK_BUFFER - 1) and below will be pruned, * but callers should avoid assuming any particular buffer size. */ std::unordered_map m_prune_locks GUARDED_BY(::cs_main); const kernel::BlockManagerOpts m_opts; public: using Options = kernel::BlockManagerOpts; explicit BlockManager(Options opts) : m_prune_mode{opts.prune_target > 0}, m_opts{std::move(opts)} {}; std::atomic m_importing{false}; BlockMap m_block_index GUARDED_BY(cs_main); std::vector GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** * All pairs A->B, where A (or one of its ancestors) misses transactions, * but B has transactions. Pruned nodes may have entries where B is missing * data. */ std::multimap m_blocks_unlinked; std::unique_ptr m_block_tree_db GUARDED_BY(::cs_main); bool WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); bool LoadBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); CBlockIndex *AddToBlockIndex(const CBlockHeader &block, CBlockIndex *&best_header) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Create a new block index entry for a given block hash */ CBlockIndex *InsertBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); //! Mark one block file as pruned (modify associated database entries) void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main); CBlockIndex *LookupBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); const CBlockIndex *LookupBlockIndex(const BlockHash &hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Get block file info entry for one block file */ CBlockFileInfo *GetBlockFileInfo(size_t n); bool WriteUndoDataForBlock(const CBlockUndo &blockundo, BlockValidationState &state, CBlockIndex &block) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** * Store block on disk. If dbp is not nullptr, then it provides the known * position of the block within a block file on disk. */ FlatFilePos SaveBlockToDisk(const CBlock &block, int nHeight, CChain &active_chain, const FlatFilePos *dbp); /** Whether running in -prune mode. */ [[nodiscard]] bool IsPruneMode() const { return m_prune_mode; } /** Attempt to stay below this number of bytes of block files. */ [[nodiscard]] uint64_t GetPruneTarget() const { return m_opts.prune_target; } static constexpr auto PRUNE_TARGET_MANUAL{ std::numeric_limits::max()}; [[nodiscard]] bool LoadingBlocks() const { return m_importing || fReindex; } /** * Calculate the amount of disk space the block & undo files currently use */ uint64_t CalculateCurrentUsage(); //! Returns last CBlockIndex* that is a checkpoint const CBlockIndex *GetLastCheckpoint(const CCheckpointData &data) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** True if any block files have ever been pruned. */ bool m_have_pruned = false; //! Check whether the block associated with this index entry is pruned or //! not. bool IsBlockPruned(const CBlockIndex *pblockindex) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + + //! Create or update a prune lock identified by its name + void UpdatePruneLock(const std::string &name, + const PruneLockInfo &lock_info) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main); }; //! Find the first block that is not pruned const CBlockIndex *GetFirstStoredBlock(const CBlockIndex *start_block) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); void CleanupBlockRevFiles(); /** Open a block file (blk?????.dat) */ FILE *OpenBlockFile(const FlatFilePos &pos, bool fReadOnly = false); /** Translation to a filesystem path. */ fs::path GetBlockPosFilename(const FlatFilePos &pos); /** * Actually unlink the specified files */ void UnlinkPrunedFiles(const std::set &setFilesToPrune); /** Functions for disk access for blocks */ bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params &consensusParams); bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex, const Consensus::Params &consensusParams); bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex); /** Functions for disk access for txs */ bool ReadTxFromDisk(CMutableTransaction &tx, const FlatFilePos &pos); bool ReadTxUndoFromDisk(CTxUndo &tx, const FlatFilePos &pos); void ThreadImport(ChainstateManager &chainman, std::vector vImportFiles, const ArgsManager &args, const fs::path &mempool_path); } // namespace node #endif // BITCOIN_NODE_BLOCKSTORAGE_H diff --git a/src/validation.cpp b/src/validation.cpp index 18708d619..e43a7664b 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1,6629 +1,6623 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2018 The Bitcoin Core developers // Copyright (c) 2017-2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include