Page MenuHomePhabricator

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 707f2357f8..43e1e3fb9a 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -1,65 +1,67 @@
(note: this is a temporary file, to be added-to by anybody, and moved to
release-notes at release time)
Notable changes
===============
Example item
----------------
bitcoin-cli: arguments privacy
--------------------------------
The RPC command line client gained a new argument, `-stdin`
to read extra arguments from standard input, one per line until EOF/Ctrl-D.
For example:
$ echo -e "mysecretcode\n120" | src/bitcoin-cli -stdin walletpassphrase
It is recommended to use this for sensitive information such as wallet
passphrases, as command-line arguments can usually be read from the process
table by any user on the system.
0.13.0 Change log
=================
Detailed release notes follow. This overview includes changes that affect
behavior, not code moves, refactors and string updates. For convenience in locating
the code changes and accompanying discussion, both the pull request and
git merge commit are mentioned.
### RPC and REST
Asm script outputs now contain OP_CHECKLOCKTIMEVERIFY in place of OP_NOP2
-------------------------------------------------------------------------
OP_NOP2 has been renamed to OP_CHECKLOCKTIMEVERIFY by [BIP
65](https://github.com/bitcoin/bips/blob/master/bip-0065.mediawiki)
The following outputs are affected by this change:
- RPC `getrawtransaction` (in verbose mode)
- RPC `decoderawtransaction`
- RPC `decodescript`
- REST `/rest/tx/` (JSON format)
- REST `/rest/block/` (JSON format when including extended tx details)
- `bitcoin-tx -json`
### Configuration and command-line options
### Block and transaction handling
### P2P protocol and network code
+The p2p alert system has been removed in #7692 and the 'alert' message is no longer supported.
+
### Validation
### Build system
### Wallet
### GUI
### Tests
### Miscellaneous
diff --git a/src/Makefile.am b/src/Makefile.am
index 7765ea43ed..6ad7aabae0 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -1,499 +1,497 @@
DIST_SUBDIRS = secp256k1 univalue
AM_LDFLAGS = $(PTHREAD_CFLAGS) $(LIBTOOL_LDFLAGS) $(HARDENED_LDFLAGS)
AM_CXXFLAGS = $(HARDENED_CXXFLAGS)
AM_CPPFLAGS = $(HARDENED_CPPFLAGS)
if EMBEDDED_UNIVALUE
LIBUNIVALUE = univalue/libunivalue.la
$(LIBUNIVALUE): $(wildcard univalue/lib/*) $(wildcard univalue/include/*)
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F)
else
LIBUNIVALUE = $(UNIVALUE_LIBS)
endif
if EMBEDDED_LEVELDB
LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/include
LEVELDB_CPPFLAGS += -I$(srcdir)/leveldb/helpers/memenv
LIBLEVELDB += $(builddir)/leveldb/libleveldb.a
LIBMEMENV += $(builddir)/leveldb/libmemenv.a
# NOTE: This dependency is not strictly necessary, but without it make may try to build both in parallel, which breaks the LevelDB build system in a race
$(LIBLEVELDB): $(LIBMEMENV)
$(LIBLEVELDB) $(LIBMEMENV):
@echo "Building LevelDB ..." && $(MAKE) -C $(@D) $(@F) CXX="$(CXX)" \
CC="$(CC)" PLATFORM=$(TARGET_OS) AR="$(AR)" $(LEVELDB_TARGET_FLAGS) \
OPT="$(AM_CXXFLAGS) $(PIE_FLAGS) $(CXXFLAGS) $(AM_CPPFLAGS) $(CPPFLAGS) -D__STDC_LIMIT_MACROS"
endif
BITCOIN_CONFIG_INCLUDES=-I$(builddir)/config
BITCOIN_INCLUDES=-I$(builddir) -I$(builddir)/obj $(BOOST_CPPFLAGS) $(LEVELDB_CPPFLAGS) $(CRYPTO_CFLAGS) $(SSL_CFLAGS)
BITCOIN_INCLUDES += -I$(srcdir)/secp256k1/include
BITCOIN_INCLUDES += $(UNIVALUE_CFLAGS)
LIBBITCOIN_SERVER=libbitcoin_server.a
LIBBITCOIN_WALLET=libbitcoin_wallet.a
LIBBITCOIN_COMMON=libbitcoin_common.a
LIBBITCOIN_CONSENSUS=libbitcoin_consensus.a
LIBBITCOIN_CLI=libbitcoin_cli.a
LIBBITCOIN_UTIL=libbitcoin_util.a
LIBBITCOIN_CRYPTO=crypto/libbitcoin_crypto.a
LIBBITCOINQT=qt/libbitcoinqt.a
LIBSECP256K1=secp256k1/libsecp256k1.la
$(LIBSECP256K1): $(wildcard secp256k1/src/*) $(wildcard secp256k1/include/*)
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F)
# Make is not made aware of per-object dependencies to avoid limiting building parallelization
# But to build the less dependent modules first, we manually select their order here:
EXTRA_LIBRARIES = \
crypto/libbitcoin_crypto.a \
libbitcoin_util.a \
libbitcoin_common.a \
libbitcoin_consensus.a \
libbitcoin_server.a \
libbitcoin_cli.a
if ENABLE_WALLET
BITCOIN_INCLUDES += $(BDB_CPPFLAGS)
EXTRA_LIBRARIES += libbitcoin_wallet.a
endif
if ENABLE_ZMQ
EXTRA_LIBRARIES += libbitcoin_zmq.a
endif
if BUILD_BITCOIN_LIBS
lib_LTLIBRARIES = libbitcoinconsensus.la
LIBBITCOINCONSENSUS=libbitcoinconsensus.la
else
LIBBITCOINCONSENSUS=
endif
bin_PROGRAMS =
TESTS =
BENCHMARKS =
if BUILD_BITCOIND
bin_PROGRAMS += bitcoind
endif
if BUILD_BITCOIN_UTILS
bin_PROGRAMS += bitcoin-cli bitcoin-tx
endif
.PHONY: FORCE check-symbols check-security
# bitcoin core #
BITCOIN_CORE_H = \
addrman.h \
- alert.h \
base58.h \
bloom.h \
chain.h \
chainparams.h \
chainparamsbase.h \
chainparamsseeds.h \
checkpoints.h \
checkqueue.h \
clientversion.h \
coincontrol.h \
coins.h \
compat.h \
compat/byteswap.h \
compat/endian.h \
compat/sanity.h \
compressor.h \
consensus/consensus.h \
core_io.h \
core_memusage.h \
httprpc.h \
httpserver.h \
init.h \
key.h \
keystore.h \
dbwrapper.h \
limitedmap.h \
main.h \
memusage.h \
merkleblock.h \
miner.h \
net.h \
netbase.h \
noui.h \
policy/fees.h \
policy/policy.h \
policy/rbf.h \
pow.h \
protocol.h \
random.h \
reverselock.h \
rpc/client.h \
rpc/protocol.h \
rpc/server.h \
scheduler.h \
script/sigcache.h \
script/sign.h \
script/standard.h \
streams.h \
support/allocators/secure.h \
support/allocators/zeroafterfree.h \
support/cleanse.h \
support/pagelocker.h \
sync.h \
threadsafety.h \
timedata.h \
torcontrol.h \
txdb.h \
txmempool.h \
ui_interface.h \
undo.h \
util.h \
utilmoneystr.h \
utiltime.h \
validationinterface.h \
versionbits.h \
wallet/crypter.h \
wallet/db.h \
wallet/rpcwallet.h \
wallet/wallet.h \
wallet/wallet_ismine.h \
wallet/walletdb.h \
zmq/zmqabstractnotifier.h \
zmq/zmqconfig.h\
zmq/zmqnotificationinterface.h \
zmq/zmqpublishnotifier.h
obj/build.h: FORCE
@$(MKDIR_P) $(builddir)/obj
@$(top_srcdir)/share/genbuild.sh $(abs_top_builddir)/src/obj/build.h \
$(abs_top_srcdir)
libbitcoin_util_a-clientversion.$(OBJEXT): obj/build.h
# server: shared between bitcoind and bitcoin-qt
libbitcoin_server_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(MINIUPNPC_CPPFLAGS) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS)
libbitcoin_server_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_server_a_SOURCES = \
addrman.cpp \
- alert.cpp \
bloom.cpp \
chain.cpp \
checkpoints.cpp \
httprpc.cpp \
httpserver.cpp \
init.cpp \
dbwrapper.cpp \
main.cpp \
merkleblock.cpp \
miner.cpp \
net.cpp \
noui.cpp \
policy/fees.cpp \
policy/policy.cpp \
pow.cpp \
rest.cpp \
rpc/blockchain.cpp \
rpc/mining.cpp \
rpc/misc.cpp \
rpc/net.cpp \
rpc/rawtransaction.cpp \
rpc/server.cpp \
script/sigcache.cpp \
timedata.cpp \
torcontrol.cpp \
txdb.cpp \
txmempool.cpp \
validationinterface.cpp \
versionbits.cpp \
$(BITCOIN_CORE_H)
if ENABLE_ZMQ
LIBBITCOIN_ZMQ=libbitcoin_zmq.a
libbitcoin_zmq_a_CPPFLAGS = $(BITCOIN_INCLUDES) $(ZMQ_CFLAGS)
libbitcoin_zmq_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_zmq_a_SOURCES = \
zmq/zmqabstractnotifier.cpp \
zmq/zmqnotificationinterface.cpp \
zmq/zmqpublishnotifier.cpp
endif
# wallet: shared between bitcoind and bitcoin-qt, but only linked
# when wallet enabled
libbitcoin_wallet_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
libbitcoin_wallet_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_wallet_a_SOURCES = \
wallet/crypter.cpp \
wallet/db.cpp \
wallet/rpcdump.cpp \
wallet/rpcwallet.cpp \
wallet/wallet.cpp \
wallet/wallet_ismine.cpp \
wallet/walletdb.cpp \
policy/rbf.cpp \
$(BITCOIN_CORE_H)
# crypto primitives library
crypto_libbitcoin_crypto_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_CONFIG_INCLUDES)
crypto_libbitcoin_crypto_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
crypto_libbitcoin_crypto_a_SOURCES = \
crypto/common.h \
crypto/hmac_sha256.cpp \
crypto/hmac_sha256.h \
crypto/hmac_sha512.cpp \
crypto/hmac_sha512.h \
crypto/ripemd160.cpp \
crypto/ripemd160.h \
crypto/sha1.cpp \
crypto/sha1.h \
crypto/sha256.cpp \
crypto/sha256.h \
crypto/sha512.cpp \
crypto/sha512.h
# consensus: shared between all executables that validate any consensus rules.
libbitcoin_consensus_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
libbitcoin_consensus_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_consensus_a_SOURCES = \
amount.h \
arith_uint256.cpp \
arith_uint256.h \
consensus/merkle.cpp \
consensus/merkle.h \
consensus/params.h \
consensus/validation.h \
hash.cpp \
hash.h \
prevector.h \
primitives/block.cpp \
primitives/block.h \
primitives/transaction.cpp \
primitives/transaction.h \
pubkey.cpp \
pubkey.h \
script/bitcoinconsensus.cpp \
script/interpreter.cpp \
script/interpreter.h \
script/script.cpp \
script/script.h \
script/script_error.cpp \
script/script_error.h \
serialize.h \
tinyformat.h \
uint256.cpp \
uint256.h \
utilstrencodings.cpp \
utilstrencodings.h \
version.h
# common: shared between bitcoind, and bitcoin-qt and non-server tools
libbitcoin_common_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
libbitcoin_common_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_common_a_SOURCES = \
amount.cpp \
base58.cpp \
chainparams.cpp \
coins.cpp \
compressor.cpp \
core_read.cpp \
core_write.cpp \
key.cpp \
keystore.cpp \
netbase.cpp \
protocol.cpp \
scheduler.cpp \
script/sign.cpp \
script/standard.cpp \
$(BITCOIN_CORE_H)
# util: shared between all executables.
# This library *must* be included to make sure that the glibc
# backward-compatibility objects and their sanity checks are linked.
libbitcoin_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
libbitcoin_util_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_util_a_SOURCES = \
support/pagelocker.cpp \
chainparamsbase.cpp \
clientversion.cpp \
compat/glibc_sanity.cpp \
compat/glibcxx_sanity.cpp \
compat/strnlen.cpp \
random.cpp \
rpc/protocol.cpp \
support/cleanse.cpp \
sync.cpp \
util.cpp \
utilmoneystr.cpp \
utilstrencodings.cpp \
utiltime.cpp \
$(BITCOIN_CORE_H)
if GLIBC_BACK_COMPAT
libbitcoin_util_a_SOURCES += compat/glibc_compat.cpp
endif
# cli: shared between bitcoin-cli and bitcoin-qt
libbitcoin_cli_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
libbitcoin_cli_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_cli_a_SOURCES = \
rpc/client.cpp \
$(BITCOIN_CORE_H)
nodist_libbitcoin_util_a_SOURCES = $(srcdir)/obj/build.h
#
# bitcoind binary #
bitcoind_SOURCES = bitcoind.cpp
bitcoind_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
bitcoind_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
bitcoind_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
if TARGET_WINDOWS
bitcoind_SOURCES += bitcoind-res.rc
endif
bitcoind_LDADD = \
$(LIBBITCOIN_SERVER) \
$(LIBBITCOIN_COMMON) \
$(LIBUNIVALUE) \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
$(LIBLEVELDB) \
$(LIBMEMENV) \
$(LIBSECP256K1)
if ENABLE_ZMQ
bitcoind_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS)
endif
if ENABLE_WALLET
bitcoind_LDADD += libbitcoin_wallet.a
endif
bitcoind_LDADD += $(BOOST_LIBS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS) $(EVENT_PTHREADS_LIBS) $(EVENT_LIBS)
# bitcoin-cli binary #
bitcoin_cli_SOURCES = bitcoin-cli.cpp
bitcoin_cli_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CFLAGS)
bitcoin_cli_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
bitcoin_cli_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
if TARGET_WINDOWS
bitcoin_cli_SOURCES += bitcoin-cli-res.rc
endif
bitcoin_cli_LDADD = \
$(LIBBITCOIN_CLI) \
$(LIBUNIVALUE) \
$(LIBBITCOIN_UTIL)
bitcoin_cli_LDADD += $(BOOST_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(EVENT_LIBS)
#
# bitcoin-tx binary #
bitcoin_tx_SOURCES = bitcoin-tx.cpp
bitcoin_tx_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
bitcoin_tx_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
bitcoin_tx_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS)
if TARGET_WINDOWS
bitcoin_tx_SOURCES += bitcoin-tx-res.rc
endif
bitcoin_tx_LDADD = \
$(LIBUNIVALUE) \
$(LIBBITCOIN_COMMON) \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_CONSENSUS) \
$(LIBBITCOIN_CRYPTO) \
$(LIBSECP256K1)
bitcoin_tx_LDADD += $(BOOST_LIBS) $(CRYPTO_LIBS)
#
# bitcoinconsensus library #
if BUILD_BITCOIN_LIBS
include_HEADERS = script/bitcoinconsensus.h
libbitcoinconsensus_la_SOURCES = $(crypto_libbitcoin_crypto_a_SOURCES) $(libbitcoin_consensus_a_SOURCES)
if GLIBC_BACK_COMPAT
libbitcoinconsensus_la_SOURCES += compat/glibc_compat.cpp
endif
libbitcoinconsensus_la_LDFLAGS = $(AM_LDFLAGS) -no-undefined $(RELDFLAGS)
libbitcoinconsensus_la_LIBADD = $(LIBSECP256K1)
libbitcoinconsensus_la_CPPFLAGS = $(AM_CPPFLAGS) -I$(builddir)/obj -I$(srcdir)/secp256k1/include -DBUILD_BITCOIN_INTERNAL
libbitcoinconsensus_la_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
endif
#
CLEANFILES = leveldb/libleveldb.a leveldb/libmemenv.a
CLEANFILES += $(EXTRA_LIBRARIES)
CLEANFILES += *.gcda *.gcno
CLEANFILES += compat/*.gcda compat/*.gcno
CLEANFILES += consensus/*.gcda consensus/*.gcno
CLEANFILES += crypto/*.gcda crypto/*.gcno
CLEANFILES += policy/*.gcda policy/*.gcno
CLEANFILES += primitives/*.gcda primitives/*.gcno
CLEANFILES += script/*.gcda script/*.gcno
CLEANFILES += support/*.gcda support/*.gcno
CLEANFILES += univalue/*.gcda univalue/*.gcno
CLEANFILES += wallet/*.gcda wallet/*.gcno
CLEANFILES += wallet/test/*.gcda wallet/test/*.gcno
CLEANFILES += zmq/*.gcda zmq/*.gcno
DISTCLEANFILES = obj/build.h
EXTRA_DIST = leveldb
clean-local:
-$(MAKE) -C leveldb clean
-$(MAKE) -C secp256k1 clean
-$(MAKE) -C univalue clean
-rm -f leveldb/*/*.gcda leveldb/*/*.gcno leveldb/helpers/memenv/*.gcda leveldb/helpers/memenv/*.gcno
-rm -f config.h
.rc.o:
@test -f $(WINDRES)
## FIXME: How to get the appropriate modulename_CPPFLAGS in here?
$(AM_V_GEN) $(WINDRES) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(CPPFLAGS) -DWINDRES_PREPROC -i $< -o $@
.mm.o:
$(AM_V_CXX) $(OBJCXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CXXFLAGS) $(QT_INCLUDES) $(AM_CXXFLAGS) $(PIE_FLAGS) $(CXXFLAGS) -c -o $@ $<
check-symbols: $(bin_PROGRAMS)
if GLIBC_BACK_COMPAT
@echo "Checking glibc back compat..."
$(AM_V_at) READELF=$(READELF) CPPFILT=$(CPPFILT) $(top_srcdir)/contrib/devtools/symbol-check.py < $(bin_PROGRAMS)
endif
check-security: $(bin_PROGRAMS)
if HARDEN
@echo "Checking binary security..."
$(AM_V_at) READELF=$(READELF) OBJDUMP=$(OBJDUMP) $(top_srcdir)/contrib/devtools/security-check.py < $(bin_PROGRAMS)
endif
%.pb.cc %.pb.h: %.proto
@test -f $(PROTOC)
$(AM_V_GEN) $(PROTOC) --cpp_out=$(@D) --proto_path=$(abspath $(<D) $<)
if ENABLE_TESTS
include Makefile.test.include
endif
if ENABLE_BENCH
include Makefile.bench.include
endif
if ENABLE_QT
include Makefile.qt.include
endif
if ENABLE_QT_TESTS
include Makefile.qttest.include
endif
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 820a144a95..4481349e50 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -1,153 +1,153 @@
TESTS += test/test_bitcoin
bin_PROGRAMS += test/test_bitcoin
TEST_SRCDIR = test
TEST_BINARY=test/test_bitcoin$(EXEEXT)
EXTRA_DIST += \
test/bctest.py \
test/bitcoin-util-test.py \
test/data/bitcoin-util-test.json \
test/data/blanktx.hex \
test/data/tt-delin1-out.hex \
test/data/tt-delout1-out.hex \
test/data/tt-locktime317000-out.hex \
test/data/tx394b54bb.hex \
test/data/txcreate1.hex \
test/data/txcreate2.hex \
test/data/txcreatedata1.hex \
test/data/txcreatedata2.hex \
test/data/txcreatesign.hex
JSON_TEST_FILES = \
test/data/script_valid.json \
test/data/base58_keys_valid.json \
test/data/base58_encode_decode.json \
test/data/base58_keys_invalid.json \
test/data/script_invalid.json \
test/data/tx_invalid.json \
test/data/tx_valid.json \
test/data/sighash.json
-RAW_TEST_FILES = test/data/alertTests.raw
+RAW_TEST_FILES =
GENERATED_TEST_FILES = $(JSON_TEST_FILES:.json=.json.h) $(RAW_TEST_FILES:.raw=.raw.h)
BITCOIN_TESTS =\
test/arith_uint256_tests.cpp \
test/scriptnum10.h \
test/addrman_tests.cpp \
test/alert_tests.cpp \
test/amount_tests.cpp \
test/allocator_tests.cpp \
test/base32_tests.cpp \
test/base58_tests.cpp \
test/base64_tests.cpp \
test/bip32_tests.cpp \
test/bloom_tests.cpp \
test/Checkpoints_tests.cpp \
test/coins_tests.cpp \
test/compress_tests.cpp \
test/crypto_tests.cpp \
test/DoS_tests.cpp \
test/getarg_tests.cpp \
test/hash_tests.cpp \
test/key_tests.cpp \
test/limitedmap_tests.cpp \
test/dbwrapper_tests.cpp \
test/main_tests.cpp \
test/mempool_tests.cpp \
test/merkle_tests.cpp \
test/miner_tests.cpp \
test/multisig_tests.cpp \
test/netbase_tests.cpp \
test/pmt_tests.cpp \
test/policyestimator_tests.cpp \
test/pow_tests.cpp \
test/prevector_tests.cpp \
test/reverselock_tests.cpp \
test/rpc_tests.cpp \
test/sanity_tests.cpp \
test/scheduler_tests.cpp \
test/script_P2SH_tests.cpp \
test/script_tests.cpp \
test/scriptnum_tests.cpp \
test/serialize_tests.cpp \
test/sighash_tests.cpp \
test/sigopcount_tests.cpp \
test/skiplist_tests.cpp \
test/streams_tests.cpp \
test/test_bitcoin.cpp \
test/test_bitcoin.h \
test/testutil.cpp \
test/testutil.h \
test/timedata_tests.cpp \
test/transaction_tests.cpp \
test/txvalidationcache_tests.cpp \
test/versionbits_tests.cpp \
test/uint256_tests.cpp \
test/univalue_tests.cpp \
test/util_tests.cpp
if ENABLE_WALLET
BITCOIN_TESTS += \
test/accounting_tests.cpp \
wallet/test/wallet_tests.cpp \
test/rpc_wallet_tests.cpp
endif
test_test_bitcoin_SOURCES = $(BITCOIN_TESTS) $(JSON_TEST_FILES) $(RAW_TEST_FILES)
test_test_bitcoin_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) -I$(builddir)/test/ $(TESTDEFS)
test_test_bitcoin_LDADD = $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) $(LIBLEVELDB) $(LIBMEMENV) \
$(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1)
test_test_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
if ENABLE_WALLET
test_test_bitcoin_LDADD += $(LIBBITCOIN_WALLET)
endif
test_test_bitcoin_LDADD += $(LIBBITCOIN_CONSENSUS) $(BDB_LIBS) $(SSL_LIBS) $(CRYPTO_LIBS) $(MINIUPNPC_LIBS)
test_test_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) -static
if ENABLE_ZMQ
test_test_bitcoin_LDADD += $(ZMQ_LIBS)
endif
nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES)
$(BITCOIN_TESTS): $(GENERATED_TEST_FILES)
CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno $(GENERATED_TEST_FILES)
CLEANFILES += $(CLEAN_BITCOIN_TEST)
bitcoin_test: $(TEST_BINARY)
bitcoin_test_check: $(TEST_BINARY) FORCE
$(MAKE) check-TESTS TESTS=$^
bitcoin_test_clean : FORCE
rm -f $(CLEAN_BITCOIN_TEST) $(test_test_bitcoin_OBJECTS) $(TEST_BINARY)
check-local:
@echo "Running test/bitcoin-util-test.py..."
$(AM_V_at)srcdir=$(srcdir) PYTHONPATH=$(builddir)/test $(srcdir)/test/bitcoin-util-test.py
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C secp256k1 check
if EMBEDDED_UNIVALUE
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C univalue check
endif
%.json.h: %.json
@$(MKDIR_P) $(@D)
@echo "namespace json_tests{" > $@
@echo "static unsigned const char $(*F)[] = {" >> $@
@$(HEXDUMP) -v -e '8/1 "0x%02x, "' -e '"\n"' $< | $(SED) -e 's/0x ,//g' >> $@
@echo "};};" >> $@
@echo "Generated $@"
%.raw.h: %.raw
@$(MKDIR_P) $(@D)
@echo "namespace alert_tests{" > $@
@echo "static unsigned const char $(*F)[] = {" >> $@
@$(HEXDUMP) -v -e '8/1 "0x%02x, "' -e '"\n"' $< | $(SED) -e 's/0x ,//g' >> $@
@echo "};};" >> $@
@echo "Generated $@"
diff --git a/src/alert.cpp b/src/alert.cpp
deleted file mode 100644
index eb1cd5e7f6..0000000000
--- a/src/alert.cpp
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright (c) 2010 Satoshi Nakamoto
-// Copyright (c) 2009-2015 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include "alert.h"
-
-#include "clientversion.h"
-#include "net.h"
-#include "pubkey.h"
-#include "timedata.h"
-#include "ui_interface.h"
-#include "util.h"
-#include "utilstrencodings.h"
-
-#include <stdint.h>
-#include <algorithm>
-#include <map>
-
-#include <boost/algorithm/string/classification.hpp>
-#include <boost/algorithm/string/replace.hpp>
-#include <boost/foreach.hpp>
-#include <boost/thread.hpp>
-
-using namespace std;
-
-map<uint256, CAlert> mapAlerts;
-CCriticalSection cs_mapAlerts;
-
-void CUnsignedAlert::SetNull()
-{
- nVersion = 1;
- nRelayUntil = 0;
- nExpiration = 0;
- nID = 0;
- nCancel = 0;
- setCancel.clear();
- nMinVer = 0;
- nMaxVer = 0;
- setSubVer.clear();
- nPriority = 0;
-
- strComment.clear();
- strStatusBar.clear();
- strReserved.clear();
-}
-
-std::string CUnsignedAlert::ToString() const
-{
- std::string strSetCancel;
- BOOST_FOREACH(int n, setCancel)
- strSetCancel += strprintf("%d ", n);
- std::string strSetSubVer;
- BOOST_FOREACH(const std::string& str, setSubVer)
- strSetSubVer += "\"" + str + "\" ";
- return strprintf(
- "CAlert(\n"
- " nVersion = %d\n"
- " nRelayUntil = %d\n"
- " nExpiration = %d\n"
- " nID = %d\n"
- " nCancel = %d\n"
- " setCancel = %s\n"
- " nMinVer = %d\n"
- " nMaxVer = %d\n"
- " setSubVer = %s\n"
- " nPriority = %d\n"
- " strComment = \"%s\"\n"
- " strStatusBar = \"%s\"\n"
- ")\n",
- nVersion,
- nRelayUntil,
- nExpiration,
- nID,
- nCancel,
- strSetCancel,
- nMinVer,
- nMaxVer,
- strSetSubVer,
- nPriority,
- strComment,
- strStatusBar);
-}
-
-void CAlert::SetNull()
-{
- CUnsignedAlert::SetNull();
- vchMsg.clear();
- vchSig.clear();
-}
-
-bool CAlert::IsNull() const
-{
- return (nExpiration == 0);
-}
-
-uint256 CAlert::GetHash() const
-{
- return Hash(this->vchMsg.begin(), this->vchMsg.end());
-}
-
-bool CAlert::IsInEffect() const
-{
- return (GetAdjustedTime() < nExpiration);
-}
-
-bool CAlert::Cancels(const CAlert& alert) const
-{
- if (!IsInEffect())
- return false; // this was a no-op before 31403
- return (alert.nID <= nCancel || setCancel.count(alert.nID));
-}
-
-bool CAlert::AppliesTo(int nVersion, const std::string& strSubVerIn) const
-{
- // TODO: rework for client-version-embedded-in-strSubVer ?
- return (IsInEffect() &&
- nMinVer <= nVersion && nVersion <= nMaxVer &&
- (setSubVer.empty() || setSubVer.count(strSubVerIn)));
-}
-
-bool CAlert::AppliesToMe() const
-{
- return AppliesTo(PROTOCOL_VERSION, FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, std::vector<std::string>()));
-}
-
-bool CAlert::RelayTo(CNode* pnode) const
-{
- if (!IsInEffect())
- return false;
- // don't relay to nodes which haven't sent their version message
- if (pnode->nVersion == 0)
- return false;
- // returns true if wasn't already contained in the set
- if (pnode->setKnown.insert(GetHash()).second)
- {
- if (AppliesTo(pnode->nVersion, pnode->strSubVer) ||
- AppliesToMe() ||
- GetAdjustedTime() < nRelayUntil)
- {
- pnode->PushMessage(NetMsgType::ALERT, *this);
- return true;
- }
- }
- return false;
-}
-
-bool CAlert::CheckSignature(const std::vector<unsigned char>& alertKey) const
-{
- CPubKey key(alertKey);
- if (!key.Verify(Hash(vchMsg.begin(), vchMsg.end()), vchSig))
- return error("CAlert::CheckSignature(): verify signature failed");
-
- // Now unserialize the data
- CDataStream sMsg(vchMsg, SER_NETWORK, PROTOCOL_VERSION);
- sMsg >> *(CUnsignedAlert*)this;
- return true;
-}
-
-CAlert CAlert::getAlertByHash(const uint256 &hash)
-{
- CAlert retval;
- {
- LOCK(cs_mapAlerts);
- map<uint256, CAlert>::iterator mi = mapAlerts.find(hash);
- if(mi != mapAlerts.end())
- retval = mi->second;
- }
- return retval;
-}
-
-bool CAlert::ProcessAlert(const std::vector<unsigned char>& alertKey, bool fThread)
-{
- if (!CheckSignature(alertKey))
- return false;
- if (!IsInEffect())
- return false;
-
- // alert.nID=max is reserved for if the alert key is
- // compromised. It must have a pre-defined message,
- // must never expire, must apply to all versions,
- // and must cancel all previous
- // alerts or it will be ignored (so an attacker can't
- // send an "everything is OK, don't panic" version that
- // cannot be overridden):
- int maxInt = std::numeric_limits<int>::max();
- if (nID == maxInt)
- {
- if (!(
- nExpiration == maxInt &&
- nCancel == (maxInt-1) &&
- nMinVer == 0 &&
- nMaxVer == maxInt &&
- setSubVer.empty() &&
- nPriority == maxInt &&
- strStatusBar == "URGENT: Alert key compromised, upgrade required"
- ))
- return false;
- }
-
- {
- LOCK(cs_mapAlerts);
- // Cancel previous alerts
- for (map<uint256, CAlert>::iterator mi = mapAlerts.begin(); mi != mapAlerts.end();)
- {
- const CAlert& alert = (*mi).second;
- if (Cancels(alert))
- {
- LogPrint("alert", "cancelling alert %d\n", alert.nID);
- uiInterface.NotifyAlertChanged((*mi).first, CT_DELETED);
- mapAlerts.erase(mi++);
- }
- else if (!alert.IsInEffect())
- {
- LogPrint("alert", "expiring alert %d\n", alert.nID);
- uiInterface.NotifyAlertChanged((*mi).first, CT_DELETED);
- mapAlerts.erase(mi++);
- }
- else
- mi++;
- }
-
- // Check if this alert has been cancelled
- BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
- {
- const CAlert& alert = item.second;
- if (alert.Cancels(*this))
- {
- LogPrint("alert", "alert already cancelled by %d\n", alert.nID);
- return false;
- }
- }
-
- // Add to mapAlerts
- mapAlerts.insert(make_pair(GetHash(), *this));
- // Notify UI and -alertnotify if it applies to me
- if(AppliesToMe())
- {
- uiInterface.NotifyAlertChanged(GetHash(), CT_NEW);
- Notify(strStatusBar, fThread);
- }
- }
-
- LogPrint("alert", "accepted alert %d, AppliesToMe()=%d\n", nID, AppliesToMe());
- return true;
-}
-
-void
-CAlert::Notify(const std::string& strMessage, bool fThread)
-{
- std::string strCmd = GetArg("-alertnotify", "");
- if (strCmd.empty()) return;
-
- // Alert text should be plain ascii coming from a trusted source, but to
- // be safe we first strip anything not in safeChars, then add single quotes around
- // the whole string before passing it to the shell:
- std::string singleQuote("'");
- std::string safeStatus = SanitizeString(strMessage);
- safeStatus = singleQuote+safeStatus+singleQuote;
- boost::replace_all(strCmd, "%s", safeStatus);
-
- if (fThread)
- boost::thread t(runCommand, strCmd); // thread runs free
- else
- runCommand(strCmd);
-}
diff --git a/src/alert.h b/src/alert.h
deleted file mode 100644
index 8cb86e338c..0000000000
--- a/src/alert.h
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2010 Satoshi Nakamoto
-// Copyright (c) 2009-2015 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_ALERT_H
-#define BITCOIN_ALERT_H
-
-#include "serialize.h"
-#include "sync.h"
-
-#include <map>
-#include <set>
-#include <stdint.h>
-#include <string>
-
-class CAlert;
-class CNode;
-class uint256;
-
-extern std::map<uint256, CAlert> mapAlerts;
-extern CCriticalSection cs_mapAlerts;
-
-/** Alerts are for notifying old versions if they become too obsolete and
- * need to upgrade. The message is displayed in the status bar.
- * Alert messages are broadcast as a vector of signed data. Unserializing may
- * not read the entire buffer if the alert is for a newer version, but older
- * versions can still relay the original data.
- */
-class CUnsignedAlert
-{
-public:
- int nVersion;
- int64_t nRelayUntil; // when newer nodes stop relaying to newer nodes
- int64_t nExpiration;
- int nID;
- int nCancel;
- std::set<int> setCancel;
- int nMinVer; // lowest version inclusive
- int nMaxVer; // highest version inclusive
- std::set<std::string> setSubVer; // empty matches all
- int nPriority;
-
- // Actions
- std::string strComment;
- std::string strStatusBar;
- std::string strReserved;
-
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- READWRITE(this->nVersion);
- nVersion = this->nVersion;
- READWRITE(nRelayUntil);
- READWRITE(nExpiration);
- READWRITE(nID);
- READWRITE(nCancel);
- READWRITE(setCancel);
- READWRITE(nMinVer);
- READWRITE(nMaxVer);
- READWRITE(setSubVer);
- READWRITE(nPriority);
-
- READWRITE(LIMITED_STRING(strComment, 65536));
- READWRITE(LIMITED_STRING(strStatusBar, 256));
- READWRITE(LIMITED_STRING(strReserved, 256));
- }
-
- void SetNull();
-
- std::string ToString() const;
-};
-
-/** An alert is a combination of a serialized CUnsignedAlert and a signature. */
-class CAlert : public CUnsignedAlert
-{
-public:
- std::vector<unsigned char> vchMsg;
- std::vector<unsigned char> vchSig;
-
- CAlert()
- {
- SetNull();
- }
-
- ADD_SERIALIZE_METHODS;
-
- template <typename Stream, typename Operation>
- inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
- READWRITE(vchMsg);
- READWRITE(vchSig);
- }
-
- void SetNull();
- bool IsNull() const;
- uint256 GetHash() const;
- bool IsInEffect() const;
- bool Cancels(const CAlert& alert) const;
- bool AppliesTo(int nVersion, const std::string& strSubVerIn) const;
- bool AppliesToMe() const;
- bool RelayTo(CNode* pnode) const;
- bool CheckSignature(const std::vector<unsigned char>& alertKey) const;
- bool ProcessAlert(const std::vector<unsigned char>& alertKey, bool fThread = true); // fThread means run -alertnotify in a free-running thread
- static void Notify(const std::string& strMessage, bool fThread);
-
- /*
- * Get copy of (active) alert object by hash. Returns a null alert if it is not found.
- */
- static CAlert getAlertByHash(const uint256 &hash);
-};
-
-#endif // BITCOIN_ALERT_H
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 35e090a0b3..508c4de167 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -1,304 +1,302 @@
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "chainparams.h"
#include "consensus/merkle.h"
#include "tinyformat.h"
#include "util.h"
#include "utilstrencodings.h"
#include <assert.h>
#include <boost/assign/list_of.hpp>
#include "chainparamsseeds.h"
static CBlock CreateGenesisBlock(const char* pszTimestamp, const CScript& genesisOutputScript, uint32_t nTime, uint32_t nNonce, uint32_t nBits, int32_t nVersion, const CAmount& genesisReward)
{
CMutableTransaction txNew;
txNew.nVersion = 1;
txNew.vin.resize(1);
txNew.vout.resize(1);
txNew.vin[0].scriptSig = CScript() << 486604799 << CScriptNum(4) << std::vector<unsigned char>((const unsigned char*)pszTimestamp, (const unsigned char*)pszTimestamp + strlen(pszTimestamp));
txNew.vout[0].nValue = genesisReward;
txNew.vout[0].scriptPubKey = genesisOutputScript;
CBlock genesis;
genesis.nTime = nTime;
genesis.nBits = nBits;
genesis.nNonce = nNonce;
genesis.nVersion = nVersion;
genesis.vtx.push_back(txNew);
genesis.hashPrevBlock.SetNull();
genesis.hashMerkleRoot = BlockMerkleRoot(genesis);
return genesis;
}
/**
* Build the genesis block. Note that the output of its generation
* transaction cannot be spent since it did not originally exist in the
* database.
*
* CBlock(hash=000000000019d6, ver=1, hashPrevBlock=00000000000000, hashMerkleRoot=4a5e1e, nTime=1231006505, nBits=1d00ffff, nNonce=2083236893, vtx=1)
* CTransaction(hash=4a5e1e, ver=1, vin.size=1, vout.size=1, nLockTime=0)
* CTxIn(COutPoint(000000, -1), coinbase 04ffff001d0104455468652054696d65732030332f4a616e2f32303039204368616e63656c6c6f72206f6e206272696e6b206f66207365636f6e64206261696c6f757420666f722062616e6b73)
* CTxOut(nValue=50.00000000, scriptPubKey=0x5F1DF16B2B704C8A578D0B)
* vMerkleTree: 4a5e1e
*/
static CBlock CreateGenesisBlock(uint32_t nTime, uint32_t nNonce, uint32_t nBits, int32_t nVersion, const CAmount& genesisReward)
{
const char* pszTimestamp = "The Times 03/Jan/2009 Chancellor on brink of second bailout for banks";
const CScript genesisOutputScript = CScript() << ParseHex("04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f") << OP_CHECKSIG;
return CreateGenesisBlock(pszTimestamp, genesisOutputScript, nTime, nNonce, nBits, nVersion, genesisReward);
}
/**
* Main network
*/
/**
* What makes a good checkpoint block?
* + Is surrounded by blocks with reasonable timestamps
* (no blocks before with a timestamp after, none after with
* timestamp before)
* + Contains no strange transactions
*/
class CMainParams : public CChainParams {
public:
CMainParams() {
strNetworkID = "main";
consensus.nSubsidyHalvingInterval = 210000;
consensus.nMajorityEnforceBlockUpgrade = 750;
consensus.nMajorityRejectBlockOutdated = 950;
consensus.nMajorityWindow = 1000;
consensus.BIP34Height = 227931;
consensus.BIP34Hash = uint256S("0x000000000000024b89b42a942fe0d9fea3bb44ab7bd1b19115dd6a759c0808b8");
consensus.powLimit = uint256S("00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
consensus.nPowTargetTimespan = 14 * 24 * 60 * 60; // two weeks
consensus.nPowTargetSpacing = 10 * 60;
consensus.fPowAllowMinDifficultyBlocks = false;
consensus.fPowNoRetargeting = false;
consensus.nRuleChangeActivationThreshold = 1916; // 95% of 2016
consensus.nMinerConfirmationWindow = 2016; // nPowTargetTimespan / nPowTargetSpacing
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28;
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 1199145601; // January 1, 2008
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 1230767999; // December 31, 2008
/**
* The message start string is designed to be unlikely to occur in normal data.
* The characters are rarely used upper ASCII, not valid as UTF-8, and produce
* a large 32-bit integer with any alignment.
*/
pchMessageStart[0] = 0xf9;
pchMessageStart[1] = 0xbe;
pchMessageStart[2] = 0xb4;
pchMessageStart[3] = 0xd9;
- vAlertPubKey = ParseHex("04fc9702847840aaf195de8442ebecedf5b095cdbb9bc716bda9110971b28a49e0ead8564ff0db22209e0374782c093bb899692d524e9d6a6956e7c5ecbcd68284");
nDefaultPort = 8333;
nPruneAfterHeight = 100000;
genesis = CreateGenesisBlock(1231006505, 2083236893, 0x1d00ffff, 1, 50 * COIN);
consensus.hashGenesisBlock = genesis.GetHash();
assert(consensus.hashGenesisBlock == uint256S("0x000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"));
assert(genesis.hashMerkleRoot == uint256S("0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"));
vSeeds.push_back(CDNSSeedData("bitcoin.sipa.be", "seed.bitcoin.sipa.be")); // Pieter Wuille
vSeeds.push_back(CDNSSeedData("bluematt.me", "dnsseed.bluematt.me")); // Matt Corallo
vSeeds.push_back(CDNSSeedData("dashjr.org", "dnsseed.bitcoin.dashjr.org")); // Luke Dashjr
vSeeds.push_back(CDNSSeedData("bitcoinstats.com", "seed.bitcoinstats.com")); // Christian Decker
vSeeds.push_back(CDNSSeedData("xf2.org", "bitseed.xf2.org")); // Jeff Garzik
vSeeds.push_back(CDNSSeedData("bitcoin.jonasschnelli.ch", "seed.bitcoin.jonasschnelli.ch")); // Jonas Schnelli
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,0);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,5);
base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,128);
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x88)(0xB2)(0x1E).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x88)(0xAD)(0xE4).convert_to_container<std::vector<unsigned char> >();
vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_main, pnSeed6_main + ARRAYLEN(pnSeed6_main));
fMiningRequiresPeers = true;
fDefaultConsistencyChecks = false;
fRequireStandard = true;
fMineBlocksOnDemand = false;
fTestnetToBeDeprecatedFieldRPC = false;
checkpointData = (CCheckpointData) {
boost::assign::map_list_of
( 11111, uint256S("0x0000000069e244f73d78e8fd29ba2fd2ed618bd6fa2ee92559f542fdb26e7c1d"))
( 33333, uint256S("0x000000002dd5588a74784eaa7ab0507a18ad16a236e7b1ce69f00d7ddfb5d0a6"))
( 74000, uint256S("0x0000000000573993a3c9e41ce34471c079dcf5f52a0e824a81e7f953b8661a20"))
(105000, uint256S("0x00000000000291ce28027faea320c8d2b054b2e0fe44a773f3eefb151d6bdc97"))
(134444, uint256S("0x00000000000005b12ffd4cd315cd34ffd4a594f430ac814c91184a0d42d2b0fe"))
(168000, uint256S("0x000000000000099e61ea72015e79632f216fe6cb33d7899acb35b75c8303b763"))
(193000, uint256S("0x000000000000059f452a5f7340de6682a977387c17010ff6e6c3bd83ca8b1317"))
(210000, uint256S("0x000000000000048b95347e83192f69cf0366076336c639f9b7228e9ba171342e"))
(216116, uint256S("0x00000000000001b4f4b433e81ee46494af945cf96014816a4e2370f11b23df4e"))
(225430, uint256S("0x00000000000001c108384350f74090433e7fcf79a606b8e797f065b130575932"))
(250000, uint256S("0x000000000000003887df1f29024b06fc2200b55f8af8f35453d7be294df2d214"))
(279000, uint256S("0x0000000000000001ae8c72a0b0c301f67e3afca10e819efa9041e458e9bd7e40"))
(295000, uint256S("0x00000000000000004d9b4ef50f0f9d686fd69db2e03af35a100370c64632a983")),
1397080064, // * UNIX timestamp of last checkpoint block
36544669, // * total number of transactions between genesis and last checkpoint
// (the tx=... number in the SetBestChain debug.log lines)
60000.0 // * estimated number of transactions per day after checkpoint
};
}
};
static CMainParams mainParams;
/**
* Testnet (v3)
*/
class CTestNetParams : public CChainParams {
public:
CTestNetParams() {
strNetworkID = "test";
consensus.nSubsidyHalvingInterval = 210000;
consensus.nMajorityEnforceBlockUpgrade = 51;
consensus.nMajorityRejectBlockOutdated = 75;
consensus.nMajorityWindow = 100;
consensus.BIP34Height = 21111;
consensus.BIP34Hash = uint256S("0x0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8");
consensus.powLimit = uint256S("00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
consensus.nPowTargetTimespan = 14 * 24 * 60 * 60; // two weeks
consensus.nPowTargetSpacing = 10 * 60;
consensus.fPowAllowMinDifficultyBlocks = true;
consensus.fPowNoRetargeting = false;
consensus.nRuleChangeActivationThreshold = 1512; // 75% for testchains
consensus.nMinerConfirmationWindow = 2016; // nPowTargetTimespan / nPowTargetSpacing
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28;
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 1199145601; // January 1, 2008
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 1230767999; // December 31, 2008
pchMessageStart[0] = 0x0b;
pchMessageStart[1] = 0x11;
pchMessageStart[2] = 0x09;
pchMessageStart[3] = 0x07;
- vAlertPubKey = ParseHex("04302390343f91cc401d56d68b123028bf52e5fca1939df127f63c6467cdf9c8e2c14b61104cf817d0b780da337893ecc4aaff1309e536162dabbdb45200ca2b0a");
nDefaultPort = 18333;
nPruneAfterHeight = 1000;
genesis = CreateGenesisBlock(1296688602, 414098458, 0x1d00ffff, 1, 50 * COIN);
consensus.hashGenesisBlock = genesis.GetHash();
assert(consensus.hashGenesisBlock == uint256S("0x000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"));
assert(genesis.hashMerkleRoot == uint256S("0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"));
vFixedSeeds.clear();
vSeeds.clear();
vSeeds.push_back(CDNSSeedData("bitcoin.petertodd.org", "testnet-seed.bitcoin.petertodd.org"));
vSeeds.push_back(CDNSSeedData("bluematt.me", "testnet-seed.bluematt.me"));
vSeeds.push_back(CDNSSeedData("bitcoin.schildbach.de", "testnet-seed.bitcoin.schildbach.de"));
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,111);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,196);
base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,239);
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x35)(0x87)(0xCF).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x35)(0x83)(0x94).convert_to_container<std::vector<unsigned char> >();
vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_test, pnSeed6_test + ARRAYLEN(pnSeed6_test));
fMiningRequiresPeers = true;
fDefaultConsistencyChecks = false;
fRequireStandard = false;
fMineBlocksOnDemand = false;
fTestnetToBeDeprecatedFieldRPC = true;
checkpointData = (CCheckpointData) {
boost::assign::map_list_of
( 546, uint256S("000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70")),
1337966069,
1488,
300
};
}
};
static CTestNetParams testNetParams;
/**
* Regression test
*/
class CRegTestParams : public CChainParams {
public:
CRegTestParams() {
strNetworkID = "regtest";
consensus.nSubsidyHalvingInterval = 150;
consensus.nMajorityEnforceBlockUpgrade = 750;
consensus.nMajorityRejectBlockOutdated = 950;
consensus.nMajorityWindow = 1000;
consensus.BIP34Height = -1; // BIP34 has not necessarily activated on regtest
consensus.BIP34Hash = uint256();
consensus.powLimit = uint256S("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
consensus.nPowTargetTimespan = 14 * 24 * 60 * 60; // two weeks
consensus.nPowTargetSpacing = 10 * 60;
consensus.fPowAllowMinDifficultyBlocks = true;
consensus.fPowNoRetargeting = true;
consensus.nRuleChangeActivationThreshold = 108; // 75% for testchains
consensus.nMinerConfirmationWindow = 144; // Faster than normal for regtest (144 instead of 2016)
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28;
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = 0;
consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout = 999999999999ULL;
pchMessageStart[0] = 0xfa;
pchMessageStart[1] = 0xbf;
pchMessageStart[2] = 0xb5;
pchMessageStart[3] = 0xda;
nDefaultPort = 18444;
nPruneAfterHeight = 1000;
genesis = CreateGenesisBlock(1296688602, 2, 0x207fffff, 1, 50 * COIN);
consensus.hashGenesisBlock = genesis.GetHash();
assert(consensus.hashGenesisBlock == uint256S("0x0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"));
assert(genesis.hashMerkleRoot == uint256S("0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"));
vFixedSeeds.clear(); //! Regtest mode doesn't have any fixed seeds.
vSeeds.clear(); //! Regtest mode doesn't have any DNS seeds.
fMiningRequiresPeers = false;
fDefaultConsistencyChecks = true;
fRequireStandard = false;
fMineBlocksOnDemand = true;
fTestnetToBeDeprecatedFieldRPC = false;
checkpointData = (CCheckpointData){
boost::assign::map_list_of
( 0, uint256S("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206")),
0,
0,
0
};
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,111);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,196);
base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1,239);
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x35)(0x87)(0xCF).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x35)(0x83)(0x94).convert_to_container<std::vector<unsigned char> >();
}
};
static CRegTestParams regTestParams;
static CChainParams *pCurrentParams = 0;
const CChainParams &Params() {
assert(pCurrentParams);
return *pCurrentParams;
}
CChainParams& Params(const std::string& chain)
{
if (chain == CBaseChainParams::MAIN)
return mainParams;
else if (chain == CBaseChainParams::TESTNET)
return testNetParams;
else if (chain == CBaseChainParams::REGTEST)
return regTestParams;
else
throw std::runtime_error(strprintf("%s: Unknown chain %s.", __func__, chain));
}
void SelectParams(const std::string& network)
{
SelectBaseParams(network);
pCurrentParams = &Params(network);
}
diff --git a/src/chainparams.h b/src/chainparams.h
index 88bc666765..59202f548a 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -1,117 +1,114 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_CHAINPARAMS_H
#define BITCOIN_CHAINPARAMS_H
#include "chainparamsbase.h"
#include "consensus/params.h"
#include "primitives/block.h"
#include "protocol.h"
#include <vector>
struct CDNSSeedData {
std::string name, host;
CDNSSeedData(const std::string &strName, const std::string &strHost) : name(strName), host(strHost) {}
};
struct SeedSpec6 {
uint8_t addr[16];
uint16_t port;
};
typedef std::map<int, uint256> MapCheckpoints;
struct CCheckpointData {
MapCheckpoints mapCheckpoints;
int64_t nTimeLastCheckpoint;
int64_t nTransactionsLastCheckpoint;
double fTransactionsPerDay;
};
/**
* CChainParams defines various tweakable parameters of a given instance of the
* Bitcoin system. There are three: the main network on which people trade goods
* and services, the public test network which gets reset from time to time and
* a regression test mode which is intended for private networks only. It has
* minimal difficulty to ensure that blocks can be found instantly.
*/
class CChainParams
{
public:
enum Base58Type {
PUBKEY_ADDRESS,
SCRIPT_ADDRESS,
SECRET_KEY,
EXT_PUBLIC_KEY,
EXT_SECRET_KEY,
MAX_BASE58_TYPES
};
const Consensus::Params& GetConsensus() const { return consensus; }
const CMessageHeader::MessageStartChars& MessageStart() const { return pchMessageStart; }
- const std::vector<unsigned char>& AlertKey() const { return vAlertPubKey; }
int GetDefaultPort() const { return nDefaultPort; }
const CBlock& GenesisBlock() const { return genesis; }
/** Make miner wait to have peers to avoid wasting work */
bool MiningRequiresPeers() const { return fMiningRequiresPeers; }
/** Default value for -checkmempool and -checkblockindex argument */
bool DefaultConsistencyChecks() const { return fDefaultConsistencyChecks; }
/** Policy: Filter transactions that do not match well-defined patterns */
bool RequireStandard() const { return fRequireStandard; }
uint64_t PruneAfterHeight() const { return nPruneAfterHeight; }
/** Make miner stop after a block is found. In RPC, don't return until nGenProcLimit blocks are generated */
bool MineBlocksOnDemand() const { return fMineBlocksOnDemand; }
/** In the future use NetworkIDString() for RPC fields */
bool TestnetToBeDeprecatedFieldRPC() const { return fTestnetToBeDeprecatedFieldRPC; }
/** Return the BIP70 network string (main, test or regtest) */
std::string NetworkIDString() const { return strNetworkID; }
const std::vector<CDNSSeedData>& DNSSeeds() const { return vSeeds; }
const std::vector<unsigned char>& Base58Prefix(Base58Type type) const { return base58Prefixes[type]; }
const std::vector<SeedSpec6>& FixedSeeds() const { return vFixedSeeds; }
const CCheckpointData& Checkpoints() const { return checkpointData; }
protected:
CChainParams() {}
Consensus::Params consensus;
CMessageHeader::MessageStartChars pchMessageStart;
- //! Raw pub key bytes for the broadcast alert signing key.
- std::vector<unsigned char> vAlertPubKey;
int nDefaultPort;
uint64_t nPruneAfterHeight;
std::vector<CDNSSeedData> vSeeds;
std::vector<unsigned char> base58Prefixes[MAX_BASE58_TYPES];
std::string strNetworkID;
CBlock genesis;
std::vector<SeedSpec6> vFixedSeeds;
bool fMiningRequiresPeers;
bool fDefaultConsistencyChecks;
bool fRequireStandard;
bool fMineBlocksOnDemand;
bool fTestnetToBeDeprecatedFieldRPC;
CCheckpointData checkpointData;
};
/**
* Return the currently selected parameters. This won't change after app
* startup, except for unit tests.
*/
const CChainParams &Params();
/**
* @returns CChainParams for the given BIP70 chain name.
*/
CChainParams& Params(const std::string& chain);
/**
* Sets the params returned by Params() to those for the given BIP70 chain name.
* @throws std::runtime_error when the chain is not supported.
*/
void SelectParams(const std::string& chain);
#endif // BITCOIN_CHAINPARAMS_H
diff --git a/src/init.cpp b/src/init.cpp
index fa97521a23..f53d7d3c3c 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1,1522 +1,1519 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/bitcoin-config.h"
#endif
#include "init.h"
#include "addrman.h"
#include "amount.h"
#include "chain.h"
#include "chainparams.h"
#include "checkpoints.h"
#include "compat/sanity.h"
#include "consensus/validation.h"
#include "httpserver.h"
#include "httprpc.h"
#include "key.h"
#include "main.h"
#include "miner.h"
#include "net.h"
#include "policy/policy.h"
#include "rpc/server.h"
#include "script/standard.h"
#include "script/sigcache.h"
#include "scheduler.h"
#include "txdb.h"
#include "txmempool.h"
#include "torcontrol.h"
#include "ui_interface.h"
#include "util.h"
#include "utilmoneystr.h"
#include "validationinterface.h"
#ifdef ENABLE_WALLET
#include "wallet/db.h"
#include "wallet/wallet.h"
#include "wallet/walletdb.h"
#endif
#include <stdint.h>
#include <stdio.h>
#ifndef WIN32
#include <signal.h>
#endif
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/bind.hpp>
#include <boost/filesystem.hpp>
#include <boost/function.hpp>
#include <boost/interprocess/sync/file_lock.hpp>
#include <boost/thread.hpp>
#include <openssl/crypto.h>
#if ENABLE_ZMQ
#include "zmq/zmqnotificationinterface.h"
#endif
using namespace std;
#ifdef ENABLE_WALLET
CWallet* pwalletMain = NULL;
#endif
bool fFeeEstimatesInitialized = false;
static const bool DEFAULT_PROXYRANDOMIZE = true;
static const bool DEFAULT_REST_ENABLE = false;
static const bool DEFAULT_DISABLE_SAFEMODE = false;
static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false;
#if ENABLE_ZMQ
static CZMQNotificationInterface* pzmqNotificationInterface = NULL;
#endif
#ifdef WIN32
// Win32 LevelDB doesn't use filedescriptors, and the ones used for
// accessing block files don't count towards the fd_set size limit
// anyway.
#define MIN_CORE_FILEDESCRIPTORS 0
#else
#define MIN_CORE_FILEDESCRIPTORS 150
#endif
/** Used to pass flags to the Bind() function */
enum BindFlags {
BF_NONE = 0,
BF_EXPLICIT = (1U << 0),
BF_REPORT_ERROR = (1U << 1),
BF_WHITELIST = (1U << 2),
};
static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat";
CClientUIInterface uiInterface; // Declared but not defined in ui_interface.h
//////////////////////////////////////////////////////////////////////////////
//
// Shutdown
//
//
// Thread management and startup/shutdown:
//
// The network-processing threads are all part of a thread group
// created by AppInit() or the Qt main() function.
//
// A clean exit happens when StartShutdown() or the SIGTERM
// signal handler sets fRequestShutdown, which triggers
// the DetectShutdownThread(), which interrupts the main thread group.
// DetectShutdownThread() then exits, which causes AppInit() to
// continue (it .joins the shutdown thread).
// Shutdown() is then
// called to clean up database connections, and stop other
// threads that should only be stopped after the main network-processing
// threads have exited.
//
// Note that if running -daemon the parent process returns from AppInit2
// before adding any threads to the threadGroup, so .join_all() returns
// immediately and the parent exits from main().
//
// Shutdown for Qt is very similar, only it uses a QTimer to detect
// fRequestShutdown getting set, and then does the normal Qt
// shutdown thing.
//
volatile bool fRequestShutdown = false;
void StartShutdown()
{
fRequestShutdown = true;
}
bool ShutdownRequested()
{
return fRequestShutdown;
}
class CCoinsViewErrorCatcher : public CCoinsViewBacked
{
public:
CCoinsViewErrorCatcher(CCoinsView* view) : CCoinsViewBacked(view) {}
bool GetCoins(const uint256 &txid, CCoins &coins) const {
try {
return CCoinsViewBacked::GetCoins(txid, coins);
} catch(const std::runtime_error& e) {
uiInterface.ThreadSafeMessageBox(_("Error reading from database, shutting down."), "", CClientUIInterface::MSG_ERROR);
LogPrintf("Error reading from database: %s\n", e.what());
// Starting the shutdown sequence and returning false to the caller would be
// interpreted as 'entry not found' (as opposed to unable to read data), and
// could lead to invalid interpretation. Just exit immediately, as we can't
// continue anyway, and all writes should be atomic.
abort();
}
}
// Writes do not need similar protection, as failure to write is handled by the caller.
};
static CCoinsViewDB *pcoinsdbview = NULL;
static CCoinsViewErrorCatcher *pcoinscatcher = NULL;
static boost::scoped_ptr<ECCVerifyHandle> globalVerifyHandle;
void Interrupt(boost::thread_group& threadGroup)
{
InterruptHTTPServer();
InterruptHTTPRPC();
InterruptRPC();
InterruptREST();
InterruptTorControl();
threadGroup.interrupt_all();
}
void Shutdown()
{
LogPrintf("%s: In progress...\n", __func__);
static CCriticalSection cs_Shutdown;
TRY_LOCK(cs_Shutdown, lockShutdown);
if (!lockShutdown)
return;
/// Note: Shutdown() must be able to handle cases in which AppInit2() failed part of the way,
/// for example if the data directory was found to be locked.
/// Be sure that anything that writes files or flushes caches only does this if the respective
/// module was initialized.
RenameThread("bitcoin-shutoff");
mempool.AddTransactionsUpdated(1);
StopHTTPRPC();
StopREST();
StopRPC();
StopHTTPServer();
#ifdef ENABLE_WALLET
if (pwalletMain)
pwalletMain->Flush(false);
#endif
StopNode();
StopTorControl();
UnregisterNodeSignals(GetNodeSignals());
if (fFeeEstimatesInitialized)
{
boost::filesystem::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_fileout(fopen(est_path.string().c_str(), "wb"), SER_DISK, CLIENT_VERSION);
if (!est_fileout.IsNull())
mempool.WriteFeeEstimates(est_fileout);
else
LogPrintf("%s: Failed to write fee estimates to %s\n", __func__, est_path.string());
fFeeEstimatesInitialized = false;
}
{
LOCK(cs_main);
if (pcoinsTip != NULL) {
FlushStateToDisk();
}
delete pcoinsTip;
pcoinsTip = NULL;
delete pcoinscatcher;
pcoinscatcher = NULL;
delete pcoinsdbview;
pcoinsdbview = NULL;
delete pblocktree;
pblocktree = NULL;
}
#ifdef ENABLE_WALLET
if (pwalletMain)
pwalletMain->Flush(true);
#endif
#if ENABLE_ZMQ
if (pzmqNotificationInterface) {
UnregisterValidationInterface(pzmqNotificationInterface);
delete pzmqNotificationInterface;
pzmqNotificationInterface = NULL;
}
#endif
#ifndef WIN32
try {
boost::filesystem::remove(GetPidFile());
} catch (const boost::filesystem::filesystem_error& e) {
LogPrintf("%s: Unable to remove pidfile: %s\n", __func__, e.what());
}
#endif
UnregisterAllValidationInterfaces();
#ifdef ENABLE_WALLET
delete pwalletMain;
pwalletMain = NULL;
#endif
globalVerifyHandle.reset();
ECC_Stop();
LogPrintf("%s: done\n", __func__);
}
/**
* Signal handlers are very limited in what they are allowed to do, so:
*/
void HandleSIGTERM(int)
{
fRequestShutdown = true;
}
void HandleSIGHUP(int)
{
fReopenDebugLog = true;
}
bool static InitError(const std::string &str)
{
uiInterface.ThreadSafeMessageBox(str, "", CClientUIInterface::MSG_ERROR);
return false;
}
bool static InitWarning(const std::string &str)
{
uiInterface.ThreadSafeMessageBox(str, "", CClientUIInterface::MSG_WARNING);
return true;
}
bool static Bind(const CService &addr, unsigned int flags) {
if (!(flags & BF_EXPLICIT) && IsLimited(addr))
return false;
std::string strError;
if (!BindListenPort(addr, strError, (flags & BF_WHITELIST) != 0)) {
if (flags & BF_REPORT_ERROR)
return InitError(strError);
return false;
}
return true;
}
void OnRPCStopped()
{
cvBlockChange.notify_all();
LogPrint("rpc", "RPC stopped.\n");
}
void OnRPCPreCommand(const CRPCCommand& cmd)
{
// Observe safe mode
string strWarning = GetWarnings("rpc");
if (strWarning != "" && !GetBoolArg("-disablesafemode", DEFAULT_DISABLE_SAFEMODE) &&
!cmd.okSafeMode)
throw JSONRPCError(RPC_FORBIDDEN_BY_SAFE_MODE, string("Safe mode: ") + strWarning);
}
std::string HelpMessage(HelpMessageMode mode)
{
const bool showDebug = GetBoolArg("-help-debug", false);
// When adding new options to the categories, please keep and ensure alphabetical ordering.
// Do not translate _(...) -help-debug options, Many technical terms, and only a very small audience, so is unnecessary stress to translators.
string strUsage = HelpMessageGroup(_("Options:"));
strUsage += HelpMessageOpt("-?", _("Print this help message and exit"));
strUsage += HelpMessageOpt("-version", _("Print version and exit"));
- strUsage += HelpMessageOpt("-alerts", strprintf(_("Receive and display P2P network alerts (default: %u)"), DEFAULT_ALERTS));
strUsage += HelpMessageOpt("-alertnotify=<cmd>", _("Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)"));
strUsage += HelpMessageOpt("-blocknotify=<cmd>", _("Execute command when the best block changes (%s in cmd is replaced by block hash)"));
if (showDebug)
strUsage += HelpMessageOpt("-blocksonly", strprintf(_("Whether to operate in a blocks only mode (default: %u)"), DEFAULT_BLOCKSONLY));
strUsage += HelpMessageOpt("-checkblocks=<n>", strprintf(_("How many blocks to check at startup (default: %u, 0 = all)"), DEFAULT_CHECKBLOCKS));
strUsage += HelpMessageOpt("-checklevel=<n>", strprintf(_("How thorough the block verification of -checkblocks is (0-4, default: %u)"), DEFAULT_CHECKLEVEL));
strUsage += HelpMessageOpt("-conf=<file>", strprintf(_("Specify configuration file (default: %s)"), BITCOIN_CONF_FILENAME));
if (mode == HMM_BITCOIND)
{
#ifndef WIN32
strUsage += HelpMessageOpt("-daemon", _("Run in the background as a daemon and accept commands"));
#endif
}
strUsage += HelpMessageOpt("-datadir=<dir>", _("Specify data directory"));
strUsage += HelpMessageOpt("-dbcache=<n>", strprintf(_("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache));
strUsage += HelpMessageOpt("-loadblock=<file>", _("Imports blocks from external blk000??.dat file on startup"));
strUsage += HelpMessageOpt("-maxorphantx=<n>", strprintf(_("Keep at most <n> unconnectable transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS));
strUsage += HelpMessageOpt("-maxmempool=<n>", strprintf(_("Keep the transaction memory pool below <n> megabytes (default: %u)"), DEFAULT_MAX_MEMPOOL_SIZE));
strUsage += HelpMessageOpt("-mempoolexpiry=<n>", strprintf(_("Do not keep transactions in the mempool longer than <n> hours (default: %u)"), DEFAULT_MEMPOOL_EXPIRY));
strUsage += HelpMessageOpt("-par=<n>", strprintf(_("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)"),
-GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS));
#ifndef WIN32
strUsage += HelpMessageOpt("-pid=<file>", strprintf(_("Specify pid file (default: %s)"), BITCOIN_PID_FILENAME));
#endif
strUsage += HelpMessageOpt("-prune=<n>", strprintf(_("Reduce storage requirements by pruning (deleting) old blocks. This mode is incompatible with -txindex and -rescan. "
"Warning: Reverting this setting requires re-downloading the entire blockchain. "
"(default: 0 = disable pruning blocks, >%u = target size in MiB to use for block files)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
strUsage += HelpMessageOpt("-reindex", _("Rebuild block chain index from current blk000??.dat files on startup"));
#ifndef WIN32
strUsage += HelpMessageOpt("-sysperms", _("Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)"));
#endif
strUsage += HelpMessageOpt("-txindex", strprintf(_("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)"), DEFAULT_TXINDEX));
strUsage += HelpMessageGroup(_("Connection options:"));
strUsage += HelpMessageOpt("-addnode=<ip>", _("Add a node to connect to and attempt to keep the connection open"));
strUsage += HelpMessageOpt("-banscore=<n>", strprintf(_("Threshold for disconnecting misbehaving peers (default: %u)"), DEFAULT_BANSCORE_THRESHOLD));
strUsage += HelpMessageOpt("-bantime=<n>", strprintf(_("Number of seconds to keep misbehaving peers from reconnecting (default: %u)"), DEFAULT_MISBEHAVING_BANTIME));
strUsage += HelpMessageOpt("-bind=<addr>", _("Bind to given address and always listen on it. Use [host]:port notation for IPv6"));
strUsage += HelpMessageOpt("-connect=<ip>", _("Connect only to the specified node(s)"));
strUsage += HelpMessageOpt("-discover", _("Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)"));
strUsage += HelpMessageOpt("-dns", _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + strprintf(_("(default: %u)"), DEFAULT_NAME_LOOKUP));
strUsage += HelpMessageOpt("-dnsseed", _("Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)"));
strUsage += HelpMessageOpt("-externalip=<ip>", _("Specify your own public address"));
strUsage += HelpMessageOpt("-forcednsseed", strprintf(_("Always query for peer addresses via DNS lookup (default: %u)"), DEFAULT_FORCEDNSSEED));
strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: 1 if no -proxy or -connect)"));
strUsage += HelpMessageOpt("-listenonion", strprintf(_("Automatically create Tor hidden service (default: %d)"), DEFAULT_LISTEN_ONION));
strUsage += HelpMessageOpt("-maxconnections=<n>", strprintf(_("Maintain at most <n> connections to peers (default: %u)"), DEFAULT_MAX_PEER_CONNECTIONS));
strUsage += HelpMessageOpt("-maxreceivebuffer=<n>", strprintf(_("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)"), DEFAULT_MAXRECEIVEBUFFER));
strUsage += HelpMessageOpt("-maxsendbuffer=<n>", strprintf(_("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)"), DEFAULT_MAXSENDBUFFER));
strUsage += HelpMessageOpt("-onion=<ip:port>", strprintf(_("Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: %s)"), "-proxy"));
strUsage += HelpMessageOpt("-onlynet=<net>", _("Only connect to nodes in network <net> (ipv4, ipv6 or onion)"));
strUsage += HelpMessageOpt("-permitbaremultisig", strprintf(_("Relay non-P2SH multisig (default: %u)"), DEFAULT_PERMIT_BAREMULTISIG));
strUsage += HelpMessageOpt("-peerbloomfilters", strprintf(_("Support filtering of blocks and transaction with bloom filters (default: %u)"), DEFAULT_PEERBLOOMFILTERS));
strUsage += HelpMessageOpt("-port=<port>", strprintf(_("Listen for connections on <port> (default: %u or testnet: %u)"), Params(CBaseChainParams::MAIN).GetDefaultPort(), Params(CBaseChainParams::TESTNET).GetDefaultPort()));
strUsage += HelpMessageOpt("-proxy=<ip:port>", _("Connect through SOCKS5 proxy"));
strUsage += HelpMessageOpt("-proxyrandomize", strprintf(_("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)"), DEFAULT_PROXYRANDOMIZE));
strUsage += HelpMessageOpt("-seednode=<ip>", _("Connect to a node to retrieve peer addresses, and disconnect"));
strUsage += HelpMessageOpt("-timeout=<n>", strprintf(_("Specify connection timeout in milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT));
strUsage += HelpMessageOpt("-torcontrol=<ip>:<port>", strprintf(_("Tor control port to use if onion listening enabled (default: %s)"), DEFAULT_TOR_CONTROL));
strUsage += HelpMessageOpt("-torpassword=<pass>", _("Tor control port password (default: empty)"));
#ifdef USE_UPNP
#if USE_UPNP
strUsage += HelpMessageOpt("-upnp", _("Use UPnP to map the listening port (default: 1 when listening and no -proxy)"));
#else
strUsage += HelpMessageOpt("-upnp", strprintf(_("Use UPnP to map the listening port (default: %u)"), 0));
#endif
#endif
strUsage += HelpMessageOpt("-whitebind=<addr>", _("Bind to given address and whitelist peers connecting to it. Use [host]:port notation for IPv6"));
strUsage += HelpMessageOpt("-whitelist=<netmask>", _("Whitelist peers connecting from the given netmask or IP address. Can be specified multiple times.") +
" " + _("Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway"));
strUsage += HelpMessageOpt("-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY));
strUsage += HelpMessageOpt("-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY));
strUsage += HelpMessageOpt("-maxuploadtarget=<n>", strprintf(_("Tries to keep outbound traffic under the given target (in MiB per 24h), 0 = no limit (default: %d)"), DEFAULT_MAX_UPLOAD_TARGET));
#ifdef ENABLE_WALLET
strUsage += CWallet::GetWalletHelpString(showDebug);
#endif
#if ENABLE_ZMQ
strUsage += HelpMessageGroup(_("ZeroMQ notification options:"));
strUsage += HelpMessageOpt("-zmqpubhashblock=<address>", _("Enable publish hash block in <address>"));
strUsage += HelpMessageOpt("-zmqpubhashtx=<address>", _("Enable publish hash transaction in <address>"));
strUsage += HelpMessageOpt("-zmqpubrawblock=<address>", _("Enable publish raw block in <address>"));
strUsage += HelpMessageOpt("-zmqpubrawtx=<address>", _("Enable publish raw transaction in <address>"));
#endif
strUsage += HelpMessageGroup(_("Debugging/Testing options:"));
strUsage += HelpMessageOpt("-uacomment=<cmt>", _("Append comment to the user agent string"));
if (showDebug)
{
strUsage += HelpMessageOpt("-checkblockindex", strprintf("Do a full consistency check for mapBlockIndex, setBlockIndexCandidates, chainActive and mapBlocksUnlinked occasionally. Also sets -checkmempool (default: %u)", Params(CBaseChainParams::MAIN).DefaultConsistencyChecks()));
strUsage += HelpMessageOpt("-checkmempool=<n>", strprintf("Run checks every <n> transactions (default: %u)", Params(CBaseChainParams::MAIN).DefaultConsistencyChecks()));
strUsage += HelpMessageOpt("-checkpoints", strprintf("Disable expensive verification for known chain history (default: %u)", DEFAULT_CHECKPOINTS_ENABLED));
strUsage += HelpMessageOpt("-disablesafemode", strprintf("Disable safemode, override a real safe mode event (default: %u)", DEFAULT_DISABLE_SAFEMODE));
strUsage += HelpMessageOpt("-testsafemode", strprintf("Force safe mode (default: %u)", DEFAULT_TESTSAFEMODE));
strUsage += HelpMessageOpt("-dropmessagestest=<n>", "Randomly drop 1 of every <n> network messages");
strUsage += HelpMessageOpt("-fuzzmessagestest=<n>", "Randomly fuzz 1 of every <n> network messages");
strUsage += HelpMessageOpt("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT));
strUsage += HelpMessageOpt("-limitancestorcount=<n>", strprintf("Do not accept transactions if number of in-mempool ancestors is <n> or more (default: %u)", DEFAULT_ANCESTOR_LIMIT));
strUsage += HelpMessageOpt("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT));
strUsage += HelpMessageOpt("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT));
strUsage += HelpMessageOpt("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT));
}
string debugCategories = "addrman, alert, bench, coindb, db, lock, rand, rpc, selectcoins, mempool, mempoolrej, net, proxy, prune, http, libevent, tor, zmq"; // Don't translate these and qt below
if (mode == HMM_BITCOIN_QT)
debugCategories += ", qt";
strUsage += HelpMessageOpt("-debug=<category>", strprintf(_("Output debugging information (default: %u, supplying <category> is optional)"), 0) + ". " +
_("If <category> is not supplied or if <category> = 1, output all debugging information.") + _("<category> can be:") + " " + debugCategories + ".");
if (showDebug)
strUsage += HelpMessageOpt("-nodebug", "Turn off debugging messages, same as -debug=0");
strUsage += HelpMessageOpt("-help-debug", _("Show all debugging options (usage: --help -help-debug)"));
strUsage += HelpMessageOpt("-logips", strprintf(_("Include IP addresses in debug output (default: %u)"), DEFAULT_LOGIPS));
strUsage += HelpMessageOpt("-logtimestamps", strprintf(_("Prepend debug output with timestamp (default: %u)"), DEFAULT_LOGTIMESTAMPS));
if (showDebug)
{
strUsage += HelpMessageOpt("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS));
strUsage += HelpMessageOpt("-mocktime=<n>", "Replace actual time with <n> seconds since epoch (default: 0)");
strUsage += HelpMessageOpt("-limitfreerelay=<n>", strprintf("Continuously rate-limit free transactions to <n>*1000 bytes per minute (default: %u)", DEFAULT_LIMITFREERELAY));
strUsage += HelpMessageOpt("-relaypriority", strprintf("Require high priority for relaying free or low-fee transactions (default: %u)", DEFAULT_RELAYPRIORITY));
strUsage += HelpMessageOpt("-maxsigcachesize=<n>", strprintf("Limit size of signature cache to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE));
strUsage += HelpMessageOpt("-maxtipage=<n>", strprintf("Maximum tip age in seconds to consider node in initial block download (default: %u)", DEFAULT_MAX_TIP_AGE));
}
strUsage += HelpMessageOpt("-minrelaytxfee=<amt>", strprintf(_("Fees (in %s/kB) smaller than this are considered zero fee for relaying, mining and transaction creation (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE)));
strUsage += HelpMessageOpt("-maxtxfee=<amt>", strprintf(_("Maximum total fees (in %s) to use in a single wallet transaction or raw transaction; setting this too low may abort large transactions (default: %s)"),
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)));
strUsage += HelpMessageOpt("-printtoconsole", _("Send trace/debug info to console instead of debug.log file"));
if (showDebug)
{
strUsage += HelpMessageOpt("-printpriority", strprintf("Log transaction priority and fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY));
}
strUsage += HelpMessageOpt("-shrinkdebugfile", _("Shrink debug.log file on client startup (default: 1 when no -debug)"));
AppendParamsHelpMessages(strUsage, showDebug);
strUsage += HelpMessageGroup(_("Node relay options:"));
if (showDebug)
strUsage += HelpMessageOpt("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !Params(CBaseChainParams::TESTNET).RequireStandard()));
strUsage += HelpMessageOpt("-bytespersigop", strprintf(_("Minimum bytes per sigop in transactions we relay and mine (default: %u)"), DEFAULT_BYTES_PER_SIGOP));
strUsage += HelpMessageOpt("-datacarrier", strprintf(_("Relay and mine data carrier transactions (default: %u)"), DEFAULT_ACCEPT_DATACARRIER));
strUsage += HelpMessageOpt("-datacarriersize", strprintf(_("Maximum size of data in data carrier transactions we relay and mine (default: %u)"), MAX_OP_RETURN_RELAY));
strUsage += HelpMessageOpt("-mempoolreplacement", strprintf(_("Enable transaction replacement in the memory pool (default: %u)"), DEFAULT_ENABLE_REPLACEMENT));
strUsage += HelpMessageGroup(_("Block creation options:"));
strUsage += HelpMessageOpt("-blockminsize=<n>", strprintf(_("Set minimum block size in bytes (default: %u)"), DEFAULT_BLOCK_MIN_SIZE));
strUsage += HelpMessageOpt("-blockmaxsize=<n>", strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_BLOCK_MAX_SIZE));
strUsage += HelpMessageOpt("-blockprioritysize=<n>", strprintf(_("Set maximum size of high-priority/low-fee transactions in bytes (default: %d)"), DEFAULT_BLOCK_PRIORITY_SIZE));
if (showDebug)
strUsage += HelpMessageOpt("-blockversion=<n>", "Override block version to test forking scenarios");
strUsage += HelpMessageGroup(_("RPC server options:"));
strUsage += HelpMessageOpt("-server", _("Accept command line and JSON-RPC commands"));
strUsage += HelpMessageOpt("-rest", strprintf(_("Accept public REST requests (default: %u)"), DEFAULT_REST_ENABLE));
strUsage += HelpMessageOpt("-rpcbind=<addr>", _("Bind to given address to listen for JSON-RPC connections. Use [host]:port notation for IPv6. This option can be specified multiple times (default: bind to all interfaces)"));
strUsage += HelpMessageOpt("-rpccookiefile=<loc>", _("Location of the auth cookie (default: data dir)"));
strUsage += HelpMessageOpt("-rpcuser=<user>", _("Username for JSON-RPC connections"));
strUsage += HelpMessageOpt("-rpcpassword=<pw>", _("Password for JSON-RPC connections"));
strUsage += HelpMessageOpt("-rpcauth=<userpw>", _("Username and hashed password for JSON-RPC connections. The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical python script is included in share/rpcuser. This option can be specified multiple times"));
strUsage += HelpMessageOpt("-rpcport=<port>", strprintf(_("Listen for JSON-RPC connections on <port> (default: %u or testnet: %u)"), BaseParams(CBaseChainParams::MAIN).RPCPort(), BaseParams(CBaseChainParams::TESTNET).RPCPort()));
strUsage += HelpMessageOpt("-rpcallowip=<ip>", _("Allow JSON-RPC connections from specified source. Valid for <ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. 1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This option can be specified multiple times"));
strUsage += HelpMessageOpt("-rpcthreads=<n>", strprintf(_("Set the number of threads to service RPC calls (default: %d)"), DEFAULT_HTTP_THREADS));
if (showDebug) {
strUsage += HelpMessageOpt("-rpcworkqueue=<n>", strprintf("Set the depth of the work queue to service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE));
strUsage += HelpMessageOpt("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT));
}
return strUsage;
}
std::string LicenseInfo()
{
// todo: remove urls from translations on next change
return CopyrightHolders(strprintf(_("Copyright (C) %i-%i"), 2009, COPYRIGHT_YEAR) + " ") + "\n" +
"\n" +
_("This is experimental software.") + "\n" +
"\n" +
_("Distributed under the MIT software license, see the accompanying file COPYING or <http://www.opensource.org/licenses/mit-license.php>.") + "\n" +
"\n" +
_("This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit <https://www.openssl.org/> and cryptographic software written by Eric Young and UPnP software written by Thomas Bernard.") +
"\n";
}
static void BlockNotifyCallback(bool initialSync, const CBlockIndex *pBlockIndex)
{
if (initialSync || !pBlockIndex)
return;
std::string strCmd = GetArg("-blocknotify", "");
boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex());
boost::thread t(runCommand, strCmd); // thread runs free
}
struct CImportingNow
{
CImportingNow() {
assert(fImporting == false);
fImporting = true;
}
~CImportingNow() {
assert(fImporting == true);
fImporting = false;
}
};
// If we're using -prune with -reindex, then delete block files that will be ignored by the
// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
// is missing, do the same here to delete any later block files after a gap. Also delete all
// rev files since they'll be rewritten by the reindex anyway. This ensures that vinfoBlockFile
// is in sync with what's actually on disk by the time we start downloading, so that pruning
// works correctly.
void CleanupBlockRevFiles()
{
using namespace boost::filesystem;
map<string, path> mapBlockFiles;
// Glob all blk?????.dat and rev?????.dat files from the blocks directory.
// Remove the rev files immediately and insert the blk file paths into an
// ordered map keyed by block file index.
LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
path blocksdir = GetDataDir() / "blocks";
for (directory_iterator it(blocksdir); it != directory_iterator(); it++) {
if (is_regular_file(*it) &&
it->path().filename().string().length() == 12 &&
it->path().filename().string().substr(8,4) == ".dat")
{
if (it->path().filename().string().substr(0,3) == "blk")
mapBlockFiles[it->path().filename().string().substr(3,5)] = it->path();
else if (it->path().filename().string().substr(0,3) == "rev")
remove(it->path());
}
}
// Remove all block files that aren't part of a contiguous set starting at
// zero by walking the ordered map (keys are block file indices) by
// keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
// start removing block files.
int nContigCounter = 0;
BOOST_FOREACH(const PAIRTYPE(string, path)& item, mapBlockFiles) {
if (atoi(item.first) == nContigCounter) {
nContigCounter++;
continue;
}
remove(item.second);
}
}
void ThreadImport(std::vector<boost::filesystem::path> vImportFiles)
{
const CChainParams& chainparams = Params();
RenameThread("bitcoin-loadblk");
// -reindex
if (fReindex) {
CImportingNow imp;
int nFile = 0;
while (true) {
CDiskBlockPos pos(nFile, 0);
if (!boost::filesystem::exists(GetBlockPosFilename(pos, "blk")))
break; // No block files left to reindex
FILE *file = OpenBlockFile(pos, true);
if (!file)
break; // This error is logged in OpenBlockFile
LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
LoadExternalBlockFile(chainparams, file, &pos);
nFile++;
}
pblocktree->WriteReindexing(false);
fReindex = false;
LogPrintf("Reindexing finished\n");
// To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
InitBlockIndex(chainparams);
}
// hardcoded $DATADIR/bootstrap.dat
boost::filesystem::path pathBootstrap = GetDataDir() / "bootstrap.dat";
if (boost::filesystem::exists(pathBootstrap)) {
FILE *file = fopen(pathBootstrap.string().c_str(), "rb");
if (file) {
CImportingNow imp;
boost::filesystem::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old";
LogPrintf("Importing bootstrap.dat...\n");
LoadExternalBlockFile(chainparams, file);
RenameOver(pathBootstrap, pathBootstrapOld);
} else {
LogPrintf("Warning: Could not open bootstrap file %s\n", pathBootstrap.string());
}
}
// -loadblock=
BOOST_FOREACH(const boost::filesystem::path& path, vImportFiles) {
FILE *file = fopen(path.string().c_str(), "rb");
if (file) {
CImportingNow imp;
LogPrintf("Importing blocks file %s...\n", path.string());
LoadExternalBlockFile(chainparams, file);
} else {
LogPrintf("Warning: Could not open blocks file %s\n", path.string());
}
}
if (GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) {
LogPrintf("Stopping after block import\n");
StartShutdown();
}
}
/** Sanity checks
* Ensure that Bitcoin is running in a usable environment with all
* necessary library support.
*/
bool InitSanityCheck(void)
{
if(!ECC_InitSanityCheck()) {
InitError("Elliptic curve cryptography sanity check failure. Aborting.");
return false;
}
if (!glibc_sanity_test() || !glibcxx_sanity_test())
return false;
return true;
}
bool AppInitServers(boost::thread_group& threadGroup)
{
RPCServer::OnStopped(&OnRPCStopped);
RPCServer::OnPreCommand(&OnRPCPreCommand);
if (!InitHTTPServer())
return false;
if (!StartRPC())
return false;
if (!StartHTTPRPC())
return false;
if (GetBoolArg("-rest", DEFAULT_REST_ENABLE) && !StartREST())
return false;
if (!StartHTTPServer())
return false;
return true;
}
// Parameter interaction based on rules
void InitParameterInteraction()
{
// when specifying an explicit binding address, you want to listen on it
// even when -connect or -proxy is specified
if (mapArgs.count("-bind")) {
if (SoftSetBoolArg("-listen", true))
LogPrintf("%s: parameter interaction: -bind set -> setting -listen=1\n", __func__);
}
if (mapArgs.count("-whitebind")) {
if (SoftSetBoolArg("-listen", true))
LogPrintf("%s: parameter interaction: -whitebind set -> setting -listen=1\n", __func__);
}
if (mapArgs.count("-connect") && mapMultiArgs["-connect"].size() > 0) {
// when only connecting to trusted nodes, do not seed via DNS, or listen by default
if (SoftSetBoolArg("-dnsseed", false))
LogPrintf("%s: parameter interaction: -connect set -> setting -dnsseed=0\n", __func__);
if (SoftSetBoolArg("-listen", false))
LogPrintf("%s: parameter interaction: -connect set -> setting -listen=0\n", __func__);
}
if (mapArgs.count("-proxy")) {
// to protect privacy, do not listen by default if a default proxy server is specified
if (SoftSetBoolArg("-listen", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting -listen=0\n", __func__);
// to protect privacy, do not use UPNP when a proxy is set. The user may still specify -listen=1
// to listen locally, so don't rely on this happening through -listen below.
if (SoftSetBoolArg("-upnp", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting -upnp=0\n", __func__);
// to protect privacy, do not discover addresses by default
if (SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -proxy set -> setting -discover=0\n", __func__);
}
if (!GetBoolArg("-listen", DEFAULT_LISTEN)) {
// do not map ports or try to retrieve public IP when not listening (pointless)
if (SoftSetBoolArg("-upnp", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting -upnp=0\n", __func__);
if (SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting -discover=0\n", __func__);
if (SoftSetBoolArg("-listenonion", false))
LogPrintf("%s: parameter interaction: -listen=0 -> setting -listenonion=0\n", __func__);
}
if (mapArgs.count("-externalip")) {
// if an explicit public IP is specified, do not try to find others
if (SoftSetBoolArg("-discover", false))
LogPrintf("%s: parameter interaction: -externalip set -> setting -discover=0\n", __func__);
}
if (GetBoolArg("-salvagewallet", false)) {
// Rewrite just private keys: rescan to find transactions
if (SoftSetBoolArg("-rescan", true))
LogPrintf("%s: parameter interaction: -salvagewallet=1 -> setting -rescan=1\n", __func__);
}
// -zapwallettx implies a rescan
if (GetBoolArg("-zapwallettxes", false)) {
if (SoftSetBoolArg("-rescan", true))
LogPrintf("%s: parameter interaction: -zapwallettxes=<mode> -> setting -rescan=1\n", __func__);
}
// disable walletbroadcast and whitelistrelay in blocksonly mode
if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
if (SoftSetBoolArg("-whitelistrelay", false))
LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n", __func__);
#ifdef ENABLE_WALLET
if (SoftSetBoolArg("-walletbroadcast", false))
LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -walletbroadcast=0\n", __func__);
#endif
}
// Forcing relay from whitelisted hosts implies we will accept relays from them in the first place.
if (GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
if (SoftSetBoolArg("-whitelistrelay", true))
LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n", __func__);
}
}
static std::string ResolveErrMsg(const char * const optname, const std::string& strBind)
{
return strprintf(_("Cannot resolve -%s address: '%s'"), optname, strBind);
}
static std::string AmountErrMsg(const char * const optname, const std::string& strValue)
{
return strprintf(_("Invalid amount for -%s=<amount>: '%s'"), optname, strValue);
}
void InitLogging()
{
fPrintToConsole = GetBoolArg("-printtoconsole", false);
fLogTimestamps = GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS);
fLogTimeMicros = GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS);
fLogIPs = GetBoolArg("-logips", DEFAULT_LOGIPS);
LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
LogPrintf("Bitcoin version %s (%s)\n", FormatFullVersion(), CLIENT_DATE);
}
/** Initialize bitcoin.
* @pre Parameters should be parsed and config file should be read.
*/
bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
{
// ********************************************************* Step 1: setup
#ifdef _MSC_VER
// Turn off Microsoft heap dump noise
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, 0));
#endif
#if _MSC_VER >= 1400
// Disable confusing "helpful" text message on abort, Ctrl-C
_set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
#endif
#ifdef WIN32
// Enable Data Execution Prevention (DEP)
// Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008
// A failure is non-critical and needs no further attention!
#ifndef PROCESS_DEP_ENABLE
// We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >= 0x0601 (Windows 7),
// which is not correct. Can be removed, when GCCs winbase.h is fixed!
#define PROCESS_DEP_ENABLE 0x00000001
#endif
typedef BOOL (WINAPI *PSETPROCDEPPOL)(DWORD);
PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress(GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy");
if (setProcDEPPol != NULL) setProcDEPPol(PROCESS_DEP_ENABLE);
#endif
if (!SetupNetworking())
return InitError("Initializing networking failed");
#ifndef WIN32
if (GetBoolArg("-sysperms", false)) {
#ifdef ENABLE_WALLET
if (!GetBoolArg("-disablewallet", false))
return InitError("-sysperms is not allowed in combination with enabled wallet functionality");
#endif
} else {
umask(077);
}
// Clean shutdown on SIGTERM
struct sigaction sa;
sa.sa_handler = HandleSIGTERM;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sigaction(SIGTERM, &sa, NULL);
sigaction(SIGINT, &sa, NULL);
// Reopen debug.log on SIGHUP
struct sigaction sa_hup;
sa_hup.sa_handler = HandleSIGHUP;
sigemptyset(&sa_hup.sa_mask);
sa_hup.sa_flags = 0;
sigaction(SIGHUP, &sa_hup, NULL);
// Ignore SIGPIPE, otherwise it will bring the daemon down if the client closes unexpectedly
signal(SIGPIPE, SIG_IGN);
#endif
// ********************************************************* Step 2: parameter interactions
const CChainParams& chainparams = Params();
// also see: InitParameterInteraction()
// if using block pruning, then disable txindex
if (GetArg("-prune", 0)) {
if (GetBoolArg("-txindex", DEFAULT_TXINDEX))
return InitError(_("Prune mode is incompatible with -txindex."));
#ifdef ENABLE_WALLET
if (GetBoolArg("-rescan", false)) {
return InitError(_("Rescans are not possible in pruned mode. You will need to use -reindex which will download the whole blockchain again."));
}
#endif
}
// Make sure enough file descriptors are available
int nBind = std::max((int)mapArgs.count("-bind") + (int)mapArgs.count("-whitebind"), 1);
int nUserMaxConnections = GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
nMaxConnections = std::max(nUserMaxConnections, 0);
// Trim requested connection counts, to fit into system limitations
nMaxConnections = std::max(std::min(nMaxConnections, (int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS)), 0);
int nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS);
if (nFD < MIN_CORE_FILEDESCRIPTORS)
return InitError(_("Not enough file descriptors available."));
nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS, nMaxConnections);
if (nMaxConnections < nUserMaxConnections)
InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, because of system limitations."), nUserMaxConnections, nMaxConnections));
// ********************************************************* Step 3: parameter-to-internal-flags
fDebug = !mapMultiArgs["-debug"].empty();
// Special-case: if -debug=0/-nodebug is set, turn off debugging messages
const vector<string>& categories = mapMultiArgs["-debug"];
if (GetBoolArg("-nodebug", false) || find(categories.begin(), categories.end(), string("0")) != categories.end())
fDebug = false;
// Check for -debugnet
if (GetBoolArg("-debugnet", false))
InitWarning(_("Unsupported argument -debugnet ignored, use -debug=net."));
// Check for -socks - as this is a privacy risk to continue, exit here
if (mapArgs.count("-socks"))
return InitError(_("Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported."));
// Check for -tor - as this is a privacy risk to continue, exit here
if (GetBoolArg("-tor", false))
return InitError(_("Unsupported argument -tor found, use -onion."));
if (GetBoolArg("-benchmark", false))
InitWarning(_("Unsupported argument -benchmark ignored, use -debug=bench."));
if (GetBoolArg("-whitelistalwaysrelay", false))
InitWarning(_("Unsupported argument -whitelistalwaysrelay ignored, use -whitelistrelay and/or -whitelistforcerelay."));
// Checkmempool and checkblockindex default to true in regtest mode
int ratio = std::min<int>(std::max<int>(GetArg("-checkmempool", chainparams.DefaultConsistencyChecks() ? 1 : 0), 0), 1000000);
if (ratio != 0) {
mempool.setSanityCheck(1.0 / ratio);
}
fCheckBlockIndex = GetBoolArg("-checkblockindex", chainparams.DefaultConsistencyChecks());
fCheckpointsEnabled = GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED);
// mempool limits
int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
int64_t nMempoolSizeMin = GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40;
if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin)
return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(nMempoolSizeMin / 1000000.0)));
// -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency
nScriptCheckThreads = GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS);
if (nScriptCheckThreads <= 0)
nScriptCheckThreads += GetNumCores();
if (nScriptCheckThreads <= 1)
nScriptCheckThreads = 0;
else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS)
nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS;
fServer = GetBoolArg("-server", false);
// block pruning; get the amount of disk space (in MiB) to allot for block & undo files
int64_t nSignedPruneTarget = GetArg("-prune", 0) * 1024 * 1024;
if (nSignedPruneTarget < 0) {
return InitError(_("Prune cannot be configured with a negative value."));
}
nPruneTarget = (uint64_t) nSignedPruneTarget;
if (nPruneTarget) {
if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) {
return InitError(strprintf(_("Prune configured below the minimum of %d MiB. Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
}
LogPrintf("Prune configured to target %uMiB on disk for block and undo files.\n", nPruneTarget / 1024 / 1024);
fPruneMode = true;
}
#ifdef ENABLE_WALLET
bool fDisableWallet = GetBoolArg("-disablewallet", false);
if (!fDisableWallet)
walletRegisterRPCCommands();
#endif
nConnectTimeout = GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
if (nConnectTimeout <= 0)
nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
// Fee-per-kilobyte amount considered the same as "free"
// If you are mining, be careful setting this:
// if you set it to zero then
// a transaction spammer can cheaply fill blocks using
// 1-satoshi-fee transactions. It should be set above the real
// cost to you of processing a transaction.
if (mapArgs.count("-minrelaytxfee"))
{
CAmount n = 0;
if (ParseMoney(mapArgs["-minrelaytxfee"], n) && n > 0)
::minRelayTxFee = CFeeRate(n);
else
return InitError(AmountErrMsg("minrelaytxfee", mapArgs["-minrelaytxfee"]));
}
fRequireStandard = !GetBoolArg("-acceptnonstdtxn", !Params().RequireStandard());
if (Params().RequireStandard() && !fRequireStandard)
return InitError(strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString()));
nBytesPerSigOp = GetArg("-bytespersigop", nBytesPerSigOp);
#ifdef ENABLE_WALLET
if (mapArgs.count("-mintxfee"))
{
CAmount n = 0;
if (ParseMoney(mapArgs["-mintxfee"], n) && n > 0)
CWallet::minTxFee = CFeeRate(n);
else
return InitError(AmountErrMsg("mintxfee", mapArgs["-mintxfee"]));
}
if (mapArgs.count("-fallbackfee"))
{
CAmount nFeePerK = 0;
if (!ParseMoney(mapArgs["-fallbackfee"], nFeePerK))
return InitError(strprintf(_("Invalid amount for -fallbackfee=<amount>: '%s'"), mapArgs["-fallbackfee"]));
if (nFeePerK > HIGH_TX_FEE_PER_KB)
InitWarning(_("-fallbackfee is set very high! This is the transaction fee you may pay when fee estimates are not available."));
CWallet::fallbackFee = CFeeRate(nFeePerK);
}
if (mapArgs.count("-paytxfee"))
{
CAmount nFeePerK = 0;
if (!ParseMoney(mapArgs["-paytxfee"], nFeePerK))
return InitError(AmountErrMsg("paytxfee", mapArgs["-paytxfee"]));
if (nFeePerK > HIGH_TX_FEE_PER_KB)
InitWarning(_("-paytxfee is set very high! This is the transaction fee you will pay if you send a transaction."));
payTxFee = CFeeRate(nFeePerK, 1000);
if (payTxFee < ::minRelayTxFee)
{
return InitError(strprintf(_("Invalid amount for -paytxfee=<amount>: '%s' (must be at least %s)"),
mapArgs["-paytxfee"], ::minRelayTxFee.ToString()));
}
}
if (mapArgs.count("-maxtxfee"))
{
CAmount nMaxFee = 0;
if (!ParseMoney(mapArgs["-maxtxfee"], nMaxFee))
return InitError(AmountErrMsg("maxtxfee", mapArgs["-maxtxfee"]));
if (nMaxFee > HIGH_MAX_TX_FEE)
InitWarning(_("-maxtxfee is set very high! Fees this large could be paid on a single transaction."));
maxTxFee = nMaxFee;
if (CFeeRate(maxTxFee, 1000) < ::minRelayTxFee)
{
return InitError(strprintf(_("Invalid amount for -maxtxfee=<amount>: '%s' (must be at least the minrelay fee of %s to prevent stuck transactions)"),
mapArgs["-maxtxfee"], ::minRelayTxFee.ToString()));
}
}
nTxConfirmTarget = GetArg("-txconfirmtarget", DEFAULT_TX_CONFIRM_TARGET);
bSpendZeroConfChange = GetBoolArg("-spendzeroconfchange", DEFAULT_SPEND_ZEROCONF_CHANGE);
fSendFreeTransactions = GetBoolArg("-sendfreetransactions", DEFAULT_SEND_FREE_TRANSACTIONS);
std::string strWalletFile = GetArg("-wallet", DEFAULT_WALLET_DAT);
#endif // ENABLE_WALLET
fIsBareMultisigStd = GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG);
fAcceptDatacarrier = GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER);
nMaxDatacarrierBytes = GetArg("-datacarriersize", nMaxDatacarrierBytes);
- fAlerts = GetBoolArg("-alerts", DEFAULT_ALERTS);
-
// Option to startup with mocktime set (used for regression testing):
SetMockTime(GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
if (GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS))
nLocalServices |= NODE_BLOOM;
nMaxTipAge = GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
fEnableReplacement = GetBoolArg("-mempoolreplacement", DEFAULT_ENABLE_REPLACEMENT);
if ((!fEnableReplacement) && mapArgs.count("-mempoolreplacement")) {
// Minimal effort at forwards compatibility
std::string strReplacementModeList = GetArg("-mempoolreplacement", ""); // default is impossible
std::vector<std::string> vstrReplacementModes;
boost::split(vstrReplacementModes, strReplacementModeList, boost::is_any_of(","));
fEnableReplacement = (std::find(vstrReplacementModes.begin(), vstrReplacementModes.end(), "fee") != vstrReplacementModes.end());
}
// ********************************************************* Step 4: application initialization: dir lock, daemonize, pidfile, debug log
// Initialize elliptic curve code
ECC_Start();
globalVerifyHandle.reset(new ECCVerifyHandle());
// Sanity check
if (!InitSanityCheck())
return InitError(strprintf(_("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME)));
std::string strDataDir = GetDataDir().string();
#ifdef ENABLE_WALLET
// Wallet file must be a plain filename without a directory
if (strWalletFile != boost::filesystem::basename(strWalletFile) + boost::filesystem::extension(strWalletFile))
return InitError(strprintf(_("Wallet %s resides outside data directory %s"), strWalletFile, strDataDir));
#endif
// Make sure only a single Bitcoin process is using the data directory.
boost::filesystem::path pathLockFile = GetDataDir() / ".lock";
FILE* file = fopen(pathLockFile.string().c_str(), "a"); // empty lock file; created if it doesn't exist.
if (file) fclose(file);
try {
static boost::interprocess::file_lock lock(pathLockFile.string().c_str());
if (!lock.try_lock())
return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), strDataDir, _(PACKAGE_NAME)));
} catch(const boost::interprocess::interprocess_exception& e) {
return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running.") + " %s.", strDataDir, _(PACKAGE_NAME), e.what()));
}
#ifndef WIN32
CreatePidFile(GetPidFile(), getpid());
#endif
if (GetBoolArg("-shrinkdebugfile", !fDebug))
ShrinkDebugFile();
if (fPrintToDebugLog)
OpenDebugLog();
#ifdef ENABLE_WALLET
LogPrintf("Using BerkeleyDB version %s\n", DbEnv::version(0, 0, 0));
#endif
if (!fLogTimestamps)
LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime()));
LogPrintf("Default data directory %s\n", GetDefaultDataDir().string());
LogPrintf("Using data directory %s\n", strDataDir);
LogPrintf("Using config file %s\n", GetConfigFile().string());
LogPrintf("Using at most %i connections (%i file descriptors available)\n", nMaxConnections, nFD);
std::ostringstream strErrors;
LogPrintf("Using %u threads for script verification\n", nScriptCheckThreads);
if (nScriptCheckThreads) {
for (int i=0; i<nScriptCheckThreads-1; i++)
threadGroup.create_thread(&ThreadScriptCheck);
}
// Start the lightweight task scheduler thread
CScheduler::Function serviceLoop = boost::bind(&CScheduler::serviceQueue, &scheduler);
threadGroup.create_thread(boost::bind(&TraceThread<CScheduler::Function>, "scheduler", serviceLoop));
/* Start the RPC server already. It will be started in "warmup" mode
* and not really process calls already (but it will signify connections
* that the server is there and will be ready later). Warmup mode will
* be disabled when initialisation is finished.
*/
if (fServer)
{
uiInterface.InitMessage.connect(SetRPCWarmupStatus);
if (!AppInitServers(threadGroup))
return InitError(_("Unable to start HTTP server. See debug log for details."));
}
int64_t nStart;
// ********************************************************* Step 5: verify wallet database integrity
#ifdef ENABLE_WALLET
if (!fDisableWallet) {
LogPrintf("Using wallet %s\n", strWalletFile);
uiInterface.InitMessage(_("Verifying wallet..."));
std::string warningString;
std::string errorString;
if (!CWallet::Verify(strWalletFile, warningString, errorString))
return false;
if (!warningString.empty())
InitWarning(warningString);
if (!errorString.empty())
return InitError(errorString);
} // (!fDisableWallet)
#endif // ENABLE_WALLET
// ********************************************************* Step 6: network initialization
RegisterNodeSignals(GetNodeSignals());
// sanitize comments per BIP-0014, format user agent and check total size
std::vector<string> uacomments;
BOOST_FOREACH(string cmt, mapMultiArgs["-uacomment"])
{
if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT))
return InitError(strprintf(_("User Agent comment (%s) contains unsafe characters."), cmt));
uacomments.push_back(SanitizeString(cmt, SAFE_CHARS_UA_COMMENT));
}
strSubVersion = FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments);
if (strSubVersion.size() > MAX_SUBVERSION_LENGTH) {
return InitError(strprintf(_("Total length of network version string (%i) exceeds maximum length (%i). Reduce the number or size of uacomments."),
strSubVersion.size(), MAX_SUBVERSION_LENGTH));
}
if (mapArgs.count("-onlynet")) {
std::set<enum Network> nets;
BOOST_FOREACH(const std::string& snet, mapMultiArgs["-onlynet"]) {
enum Network net = ParseNetwork(snet);
if (net == NET_UNROUTABLE)
return InitError(strprintf(_("Unknown network specified in -onlynet: '%s'"), snet));
nets.insert(net);
}
for (int n = 0; n < NET_MAX; n++) {
enum Network net = (enum Network)n;
if (!nets.count(net))
SetLimited(net);
}
}
if (mapArgs.count("-whitelist")) {
BOOST_FOREACH(const std::string& net, mapMultiArgs["-whitelist"]) {
CSubNet subnet(net);
if (!subnet.IsValid())
return InitError(strprintf(_("Invalid netmask specified in -whitelist: '%s'"), net));
CNode::AddWhitelistedRange(subnet);
}
}
bool proxyRandomize = GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE);
// -proxy sets a proxy for all outgoing network traffic
// -noproxy (or -proxy=0) as well as the empty string can be used to not set a proxy, this is the default
std::string proxyArg = GetArg("-proxy", "");
SetLimited(NET_TOR);
if (proxyArg != "" && proxyArg != "0") {
proxyType addrProxy = proxyType(CService(proxyArg, 9050), proxyRandomize);
if (!addrProxy.IsValid())
return InitError(strprintf(_("Invalid -proxy address: '%s'"), proxyArg));
SetProxy(NET_IPV4, addrProxy);
SetProxy(NET_IPV6, addrProxy);
SetProxy(NET_TOR, addrProxy);
SetNameProxy(addrProxy);
SetLimited(NET_TOR, false); // by default, -proxy sets onion as reachable, unless -noonion later
}
// -onion can be used to set only a proxy for .onion, or override normal proxy for .onion addresses
// -noonion (or -onion=0) disables connecting to .onion entirely
// An empty string is used to not override the onion proxy (in which case it defaults to -proxy set above, or none)
std::string onionArg = GetArg("-onion", "");
if (onionArg != "") {
if (onionArg == "0") { // Handle -noonion/-onion=0
SetLimited(NET_TOR); // set onions as unreachable
} else {
proxyType addrOnion = proxyType(CService(onionArg, 9050), proxyRandomize);
if (!addrOnion.IsValid())
return InitError(strprintf(_("Invalid -onion address: '%s'"), onionArg));
SetProxy(NET_TOR, addrOnion);
SetLimited(NET_TOR, false);
}
}
// see Step 2: parameter interactions for more information about these
fListen = GetBoolArg("-listen", DEFAULT_LISTEN);
fDiscover = GetBoolArg("-discover", true);
fNameLookup = GetBoolArg("-dns", DEFAULT_NAME_LOOKUP);
bool fBound = false;
if (fListen) {
if (mapArgs.count("-bind") || mapArgs.count("-whitebind")) {
BOOST_FOREACH(const std::string& strBind, mapMultiArgs["-bind"]) {
CService addrBind;
if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false))
return InitError(ResolveErrMsg("bind", strBind));
fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR));
}
BOOST_FOREACH(const std::string& strBind, mapMultiArgs["-whitebind"]) {
CService addrBind;
if (!Lookup(strBind.c_str(), addrBind, 0, false))
return InitError(ResolveErrMsg("whitebind", strBind));
if (addrBind.GetPort() == 0)
return InitError(strprintf(_("Need to specify a port with -whitebind: '%s'"), strBind));
fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR | BF_WHITELIST));
}
}
else {
struct in_addr inaddr_any;
inaddr_any.s_addr = INADDR_ANY;
fBound |= Bind(CService(in6addr_any, GetListenPort()), BF_NONE);
fBound |= Bind(CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE);
}
if (!fBound)
return InitError(_("Failed to listen on any port. Use -listen=0 if you want this."));
}
if (mapArgs.count("-externalip")) {
BOOST_FOREACH(const std::string& strAddr, mapMultiArgs["-externalip"]) {
CService addrLocal(strAddr, GetListenPort(), fNameLookup);
if (!addrLocal.IsValid())
return InitError(ResolveErrMsg("externalip", strAddr));
AddLocal(CService(strAddr, GetListenPort(), fNameLookup), LOCAL_MANUAL);
}
}
BOOST_FOREACH(const std::string& strDest, mapMultiArgs["-seednode"])
AddOneShot(strDest);
#if ENABLE_ZMQ
pzmqNotificationInterface = CZMQNotificationInterface::CreateWithArguments(mapArgs);
if (pzmqNotificationInterface) {
RegisterValidationInterface(pzmqNotificationInterface);
}
#endif
if (mapArgs.count("-maxuploadtarget")) {
CNode::SetMaxOutboundTarget(GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET)*1024*1024);
}
// ********************************************************* Step 7: load block chain
fReindex = GetBoolArg("-reindex", false);
// Upgrading to 0.8; hard-link the old blknnnn.dat files into /blocks/
boost::filesystem::path blocksDir = GetDataDir() / "blocks";
if (!boost::filesystem::exists(blocksDir))
{
boost::filesystem::create_directories(blocksDir);
bool linked = false;
for (unsigned int i = 1; i < 10000; i++) {
boost::filesystem::path source = GetDataDir() / strprintf("blk%04u.dat", i);
if (!boost::filesystem::exists(source)) break;
boost::filesystem::path dest = blocksDir / strprintf("blk%05u.dat", i-1);
try {
boost::filesystem::create_hard_link(source, dest);
LogPrintf("Hardlinked %s -> %s\n", source.string(), dest.string());
linked = true;
} catch (const boost::filesystem::filesystem_error& e) {
// Note: hardlink creation failing is not a disaster, it just means
// blocks will get re-downloaded from peers.
LogPrintf("Error hardlinking blk%04u.dat: %s\n", i, e.what());
break;
}
}
if (linked)
{
fReindex = true;
}
}
// cache size calculations
int64_t nTotalCache = (GetArg("-dbcache", nDefaultDbCache) << 20);
nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be less than nMinDbCache
nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); // total cache cannot be greated than nMaxDbcache
int64_t nBlockTreeDBCache = nTotalCache / 8;
if (nBlockTreeDBCache > (1 << 21) && !GetBoolArg("-txindex", DEFAULT_TXINDEX))
nBlockTreeDBCache = (1 << 21); // block tree db cache shouldn't be larger than 2 MiB
nTotalCache -= nBlockTreeDBCache;
int64_t nCoinDBCache = std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23)); // use 25%-50% of the remainder for disk cache
nTotalCache -= nCoinDBCache;
nCoinCacheUsage = nTotalCache; // the rest goes to in-memory cache
LogPrintf("Cache configuration:\n");
LogPrintf("* Using %.1fMiB for block index database\n", nBlockTreeDBCache * (1.0 / 1024 / 1024));
LogPrintf("* Using %.1fMiB for chain state database\n", nCoinDBCache * (1.0 / 1024 / 1024));
LogPrintf("* Using %.1fMiB for in-memory UTXO set\n", nCoinCacheUsage * (1.0 / 1024 / 1024));
bool fLoaded = false;
while (!fLoaded) {
bool fReset = fReindex;
std::string strLoadError;
uiInterface.InitMessage(_("Loading block index..."));
nStart = GetTimeMillis();
do {
try {
UnloadBlockIndex();
delete pcoinsTip;
delete pcoinsdbview;
delete pcoinscatcher;
delete pblocktree;
pblocktree = new CBlockTreeDB(nBlockTreeDBCache, false, fReindex);
pcoinsdbview = new CCoinsViewDB(nCoinDBCache, false, fReindex);
pcoinscatcher = new CCoinsViewErrorCatcher(pcoinsdbview);
pcoinsTip = new CCoinsViewCache(pcoinscatcher);
if (fReindex) {
pblocktree->WriteReindexing(true);
//If we're reindexing in prune mode, wipe away unusable block files and all undo data files
if (fPruneMode)
CleanupBlockRevFiles();
}
if (!LoadBlockIndex()) {
strLoadError = _("Error loading block database");
break;
}
// If the loaded chain has a wrong genesis, bail out immediately
// (we're likely using a testnet datadir, or the other way around).
if (!mapBlockIndex.empty() && mapBlockIndex.count(chainparams.GetConsensus().hashGenesisBlock) == 0)
return InitError(_("Incorrect or no genesis block found. Wrong datadir for network?"));
// Initialize the block index (no-op if non-empty database was already loaded)
if (!InitBlockIndex(chainparams)) {
strLoadError = _("Error initializing block database");
break;
}
// Check for changed -txindex state
if (fTxIndex != GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
strLoadError = _("You need to rebuild the database using -reindex to change -txindex");
break;
}
// Check for changed -prune state. What we are concerned about is a user who has pruned blocks
// in the past, but is now trying to run unpruned.
if (fHavePruned && !fPruneMode) {
strLoadError = _("You need to rebuild the database using -reindex to go back to unpruned mode. This will redownload the entire blockchain");
break;
}
uiInterface.InitMessage(_("Verifying blocks..."));
if (fHavePruned && GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) {
LogPrintf("Prune: pruned datadir may not have more than %d blocks; -checkblocks=%d may fail\n",
MIN_BLOCKS_TO_KEEP, GetArg("-checkblocks", DEFAULT_CHECKBLOCKS));
}
{
LOCK(cs_main);
CBlockIndex* tip = chainActive.Tip();
if (tip && tip->nTime > GetAdjustedTime() + 2 * 60 * 60) {
strLoadError = _("The block database contains a block which appears to be from the future. "
"This may be due to your computer's date and time being set incorrectly. "
"Only rebuild the block database if you are sure that your computer's date and time are correct");
break;
}
}
if (!CVerifyDB().VerifyDB(chainparams, pcoinsdbview, GetArg("-checklevel", DEFAULT_CHECKLEVEL),
GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) {
strLoadError = _("Corrupted block database detected");
break;
}
} catch (const std::exception& e) {
if (fDebug) LogPrintf("%s\n", e.what());
strLoadError = _("Error opening block database");
break;
}
fLoaded = true;
} while(false);
if (!fLoaded) {
// first suggest a reindex
if (!fReset) {
bool fRet = uiInterface.ThreadSafeMessageBox(
strLoadError + ".\n\n" + _("Do you want to rebuild the block database now?"),
"", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT);
if (fRet) {
fReindex = true;
fRequestShutdown = false;
} else {
LogPrintf("Aborted block database rebuild. Exiting.\n");
return false;
}
} else {
return InitError(strLoadError);
}
}
}
// As LoadBlockIndex can take several minutes, it's possible the user
// requested to kill the GUI during the last operation. If so, exit.
// As the program has not fully started yet, Shutdown() is possibly overkill.
if (fRequestShutdown)
{
LogPrintf("Shutdown requested. Exiting.\n");
return false;
}
LogPrintf(" block index %15dms\n", GetTimeMillis() - nStart);
boost::filesystem::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
CAutoFile est_filein(fopen(est_path.string().c_str(), "rb"), SER_DISK, CLIENT_VERSION);
// Allowed to fail as this file IS missing on first startup.
if (!est_filein.IsNull())
mempool.ReadFeeEstimates(est_filein);
fFeeEstimatesInitialized = true;
// ********************************************************* Step 8: load wallet
#ifdef ENABLE_WALLET
if (fDisableWallet) {
pwalletMain = NULL;
LogPrintf("Wallet disabled!\n");
} else {
std::string warningString;
std::string errorString;
pwalletMain = CWallet::InitLoadWallet(fDisableWallet, strWalletFile, warningString, errorString);
if (!warningString.empty())
InitWarning(warningString);
if (!errorString.empty())
{
LogPrintf("%s", errorString);
return InitError(errorString);
}
if (!pwalletMain)
return false;
}
#else // ENABLE_WALLET
LogPrintf("No wallet support compiled in!\n");
#endif // !ENABLE_WALLET
// ********************************************************* Step 9: data directory maintenance
// if pruning, unset the service bit and perform the initial blockstore prune
// after any wallet rescanning has taken place.
if (fPruneMode) {
LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
nLocalServices &= ~NODE_NETWORK;
if (!fReindex) {
uiInterface.InitMessage(_("Pruning blockstore..."));
PruneAndFlush();
}
}
// ********************************************************* Step 10: import blocks
if (mapArgs.count("-blocknotify"))
uiInterface.NotifyBlockTip.connect(BlockNotifyCallback);
uiInterface.InitMessage(_("Activating best chain..."));
// scan for better chains in the block chain database, that are not yet connected in the active best chain
CValidationState state;
if (!ActivateBestChain(state, chainparams))
strErrors << "Failed to connect best block";
std::vector<boost::filesystem::path> vImportFiles;
if (mapArgs.count("-loadblock"))
{
BOOST_FOREACH(const std::string& strFile, mapMultiArgs["-loadblock"])
vImportFiles.push_back(strFile);
}
threadGroup.create_thread(boost::bind(&ThreadImport, vImportFiles));
if (chainActive.Tip() == NULL) {
LogPrintf("Waiting for genesis block to be imported...\n");
while (!fRequestShutdown && chainActive.Tip() == NULL)
MilliSleep(10);
}
// ********************************************************* Step 11: start node
if (!CheckDiskSpace())
return false;
if (!strErrors.str().empty())
return InitError(strErrors.str());
RandAddSeedPerfmon();
//// debug print
LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size());
LogPrintf("nBestHeight = %d\n", chainActive.Height());
#ifdef ENABLE_WALLET
LogPrintf("setKeyPool.size() = %u\n", pwalletMain ? pwalletMain->setKeyPool.size() : 0);
LogPrintf("mapWallet.size() = %u\n", pwalletMain ? pwalletMain->mapWallet.size() : 0);
LogPrintf("mapAddressBook.size() = %u\n", pwalletMain ? pwalletMain->mapAddressBook.size() : 0);
#endif
if (GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION))
StartTorControl(threadGroup, scheduler);
StartNode(threadGroup, scheduler);
// Monitor the chain, and alert if we get blocks much quicker or slower than expected
int64_t nPowTargetSpacing = Params().GetConsensus().nPowTargetSpacing;
CScheduler::Function f = boost::bind(&PartitionCheck, &IsInitialBlockDownload,
boost::ref(cs_main), boost::cref(pindexBestHeader), nPowTargetSpacing);
scheduler.scheduleEvery(f, nPowTargetSpacing);
// ********************************************************* Step 12: finished
SetRPCWarmupFinished();
uiInterface.InitMessage(_("Done loading"));
#ifdef ENABLE_WALLET
if (pwalletMain) {
// Add wallet transactions that aren't already in a block to mapTransactions
pwalletMain->ReacceptWalletTransactions();
// Run a thread to flush wallet periodically
threadGroup.create_thread(boost::bind(&ThreadFlushWalletDB, boost::ref(pwalletMain->strWalletFile)));
}
#endif
return !fRequestShutdown;
}
diff --git a/src/main.cpp b/src/main.cpp
index 16b26444dc..fc443cfb72 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1,5920 +1,5877 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "main.h"
#include "addrman.h"
-#include "alert.h"
#include "arith_uint256.h"
#include "chainparams.h"
#include "checkpoints.h"
#include "checkqueue.h"
#include "consensus/consensus.h"
#include "consensus/merkle.h"
#include "consensus/validation.h"
#include "hash.h"
#include "init.h"
#include "merkleblock.h"
#include "net.h"
#include "policy/policy.h"
#include "pow.h"
#include "primitives/block.h"
#include "primitives/transaction.h"
#include "script/script.h"
#include "script/sigcache.h"
#include "script/standard.h"
#include "tinyformat.h"
#include "txdb.h"
#include "txmempool.h"
#include "ui_interface.h"
#include "undo.h"
#include "util.h"
#include "utilmoneystr.h"
#include "utilstrencodings.h"
#include "validationinterface.h"
#include "versionbits.h"
#include <sstream>
#include <boost/algorithm/string/replace.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/math/distributions/poisson.hpp>
#include <boost/thread.hpp>
using namespace std;
#if defined(NDEBUG)
# error "Bitcoin cannot be compiled without assertions."
#endif
/**
* Global state
*/
CCriticalSection cs_main;
BlockMap mapBlockIndex;
CChain chainActive;
CBlockIndex *pindexBestHeader = NULL;
int64_t nTimeBestReceived = 0;
CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
int nScriptCheckThreads = 0;
bool fImporting = false;
bool fReindex = false;
bool fTxIndex = false;
bool fHavePruned = false;
bool fPruneMode = false;
bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG;
bool fRequireStandard = true;
unsigned int nBytesPerSigOp = DEFAULT_BYTES_PER_SIGOP;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
-bool fAlerts = DEFAULT_ALERTS;
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
bool fEnableReplacement = DEFAULT_ENABLE_REPLACEMENT;
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
CTxMemPool mempool(::minRelayTxFee);
struct COrphanTx {
CTransaction tx;
NodeId fromPeer;
};
map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
map<uint256, set<uint256> > mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Returns true if there are nRequired or more blocks of minVersion or above
* in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards.
*/
static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams);
static void CheckBlockIndex(const Consensus::Params& consensusParams);
/** Constant stuff for coinbase transactions we create: */
CScript COINBASE_FLAGS;
const string strMessageMagic = "Bitcoin Signed Message:\n";
// Internal stuff
namespace {
struct CBlockIndexWorkComparator
{
bool operator()(CBlockIndex *pa, CBlockIndex *pb) const {
// First sort by most total work, ...
if (pa->nChainWork > pb->nChainWork) return false;
if (pa->nChainWork < pb->nChainWork) return true;
// ... then by earliest time received, ...
if (pa->nSequenceId < pb->nSequenceId) return false;
if (pa->nSequenceId > pb->nSequenceId) return true;
// Use pointer address as tie breaker (should only happen with blocks
// loaded from disk, as those all have id 0).
if (pa < pb) return false;
if (pa > pb) return true;
// Identical blocks.
return false;
}
};
CBlockIndex *pindexBestInvalid;
/**
* The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
* as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
* missing the data for the block.
*/
set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
/** Number of nodes with fSyncStarted. */
int nSyncStarted = 0;
/** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
* Pruned nodes may have entries where B is missing data.
*/
multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
CCriticalSection cs_LastBlockFile;
std::vector<CBlockFileInfo> vinfoBlockFile;
int nLastBlockFile = 0;
/** Global flag to indicate we should check to see if there are
* block/undo files that should be deleted. Set on startup
* or if we allocate more file space when we're in prune mode
*/
bool fCheckForPruning = false;
/**
* Every received block is assigned a unique and increasing identifier, so we
* know which one to give priority in case of a fork.
*/
CCriticalSection cs_nBlockSequenceId;
/** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
uint32_t nBlockSequenceId = 1;
/**
* Sources of received blocks, saved to be able to send them reject
* messages or ban them when processing happens afterwards. Protected by
* cs_main.
*/
map<uint256, NodeId> mapBlockSource;
/**
* Filter for transactions that were recently rejected by
* AcceptToMemoryPool. These are not rerequested until the chain tip
* changes, at which point the entire filter is reset. Protected by
* cs_main.
*
* Without this filter we'd be re-requesting txs from each of our peers,
* increasing bandwidth consumption considerably. For instance, with 100
* peers, half of which relay a tx we don't accept, that might be a 50x
* bandwidth increase. A flooding attacker attempting to roll-over the
* filter using minimum-sized, 60byte, transactions might manage to send
* 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
* two minute window to send invs to us.
*
* Decreasing the false positive rate is fairly cheap, so we pick one in a
* million to make it highly unlikely for users to have issues with this
* filter.
*
* Memory used: 1.3 MB
*/
boost::scoped_ptr<CRollingBloomFilter> recentRejects;
uint256 hashRecentRejectsChainTip;
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
struct QueuedBlock {
uint256 hash;
CBlockIndex *pindex; //! Optional.
int64_t nTime; //! Time of "getdata" request in microseconds.
bool fValidatedHeaders; //! Whether this block has validated headers at the time of request.
int64_t nTimeDisconnect; //! The timeout for this block request (for disconnecting a slow peer)
};
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight;
/** Number of blocks in flight with validated headers. */
int nQueuedValidatedHeaders = 0;
/** Number of preferable block download peers. */
int nPreferredDownload = 0;
/** Dirty block index entries. */
set<CBlockIndex*> setDirtyBlockIndex;
/** Dirty block file entries. */
set<int> setDirtyFileInfo;
} // anon namespace
//////////////////////////////////////////////////////////////////////////////
//
// Registration of network node signals.
//
namespace {
struct CBlockReject {
unsigned char chRejectCode;
string strRejectReason;
uint256 hashBlock;
};
/**
* Maintain validation-specific state about nodes, protected by cs_main, instead
* by CNode's own locks. This simplifies asynchronous operation, where
* processing of incoming data is done after the ProcessMessage call returns,
* and we're no longer holding the node's locks.
*/
struct CNodeState {
//! The peer's address
CService address;
//! Whether we have a fully established connection.
bool fCurrentlyConnected;
//! Accumulated misbehaviour score for this peer.
int nMisbehavior;
//! Whether this peer should be disconnected and banned (unless whitelisted).
bool fShouldBan;
//! String name of this peer (debugging/logging purposes).
std::string name;
//! List of asynchronously-determined block rejections to notify this peer about.
std::vector<CBlockReject> rejects;
//! The best known block we know this peer has announced.
CBlockIndex *pindexBestKnownBlock;
//! The hash of the last unknown block this peer has announced.
uint256 hashLastUnknownBlock;
//! The last full block we both have.
CBlockIndex *pindexLastCommonBlock;
//! The best header we have sent our peer.
CBlockIndex *pindexBestHeaderSent;
//! Whether we've started headers synchronization with this peer.
bool fSyncStarted;
//! Since when we're stalling block download progress (in microseconds), or 0.
int64_t nStallingSince;
list<QueuedBlock> vBlocksInFlight;
int nBlocksInFlight;
int nBlocksInFlightValidHeaders;
//! Whether we consider this a preferred download peer.
bool fPreferredDownload;
//! Whether this peer wants invs or headers (when possible) for block announcements.
bool fPreferHeaders;
CNodeState() {
fCurrentlyConnected = false;
nMisbehavior = 0;
fShouldBan = false;
pindexBestKnownBlock = NULL;
hashLastUnknownBlock.SetNull();
pindexLastCommonBlock = NULL;
pindexBestHeaderSent = NULL;
fSyncStarted = false;
nStallingSince = 0;
nBlocksInFlight = 0;
nBlocksInFlightValidHeaders = 0;
fPreferredDownload = false;
fPreferHeaders = false;
}
};
/** Map maintaining per-node state. Requires cs_main. */
map<NodeId, CNodeState> mapNodeState;
// Requires cs_main.
CNodeState *State(NodeId pnode) {
map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
if (it == mapNodeState.end())
return NULL;
return &it->second;
}
int GetHeight()
{
LOCK(cs_main);
return chainActive.Height();
}
void UpdatePreferredDownload(CNode* node, CNodeState* state)
{
nPreferredDownload -= state->fPreferredDownload;
// Whether this node should be marked as a preferred download node.
state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
nPreferredDownload += state->fPreferredDownload;
}
// Returns time at which to timeout block request (nTime in microseconds)
int64_t GetBlockTimeout(int64_t nTime, int nValidatedQueuedBefore, const Consensus::Params &consensusParams)
{
return nTime + 500000 * consensusParams.nPowTargetSpacing * (4 + nValidatedQueuedBefore);
}
void InitializeNode(NodeId nodeid, const CNode *pnode) {
LOCK(cs_main);
CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second;
state.name = pnode->addrName;
state.address = pnode->addr;
}
void FinalizeNode(NodeId nodeid) {
LOCK(cs_main);
CNodeState *state = State(nodeid);
if (state->fSyncStarted)
nSyncStarted--;
if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
AddressCurrentlyConnected(state->address);
}
BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight) {
nQueuedValidatedHeaders -= entry.fValidatedHeaders;
mapBlocksInFlight.erase(entry.hash);
}
EraseOrphansFor(nodeid);
nPreferredDownload -= state->fPreferredDownload;
mapNodeState.erase(nodeid);
}
// Requires cs_main.
// Returns a bool indicating whether we requested this block.
bool MarkBlockAsReceived(const uint256& hash) {
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end()) {
CNodeState *state = State(itInFlight->second.first);
nQueuedValidatedHeaders -= itInFlight->second.second->fValidatedHeaders;
state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
state->vBlocksInFlight.erase(itInFlight->second.second);
state->nBlocksInFlight--;
state->nStallingSince = 0;
mapBlocksInFlight.erase(itInFlight);
return true;
}
return false;
}
// Requires cs_main.
void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) {
CNodeState *state = State(nodeid);
assert(state != NULL);
// Make sure it's not listed somewhere already.
MarkBlockAsReceived(hash);
int64_t nNow = GetTimeMicros();
QueuedBlock newentry = {hash, pindex, nNow, pindex != NULL, GetBlockTimeout(nNow, nQueuedValidatedHeaders, consensusParams)};
nQueuedValidatedHeaders += newentry.fValidatedHeaders;
list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry);
state->nBlocksInFlight++;
state->nBlocksInFlightValidHeaders += newentry.fValidatedHeaders;
mapBlocksInFlight[hash] = std::make_pair(nodeid, it);
}
/** Check whether the last unknown block a peer advertised is not yet known. */
void ProcessBlockAvailability(NodeId nodeid) {
CNodeState *state = State(nodeid);
assert(state != NULL);
if (!state->hashLastUnknownBlock.IsNull()) {
BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
state->pindexBestKnownBlock = itOld->second;
state->hashLastUnknownBlock.SetNull();
}
}
}
/** Update tracking information about which blocks a peer is assumed to have. */
void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
CNodeState *state = State(nodeid);
assert(state != NULL);
ProcessBlockAvailability(nodeid);
BlockMap::iterator it = mapBlockIndex.find(hash);
if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
// An actually better block was announced.
if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
state->pindexBestKnownBlock = it->second;
} else {
// An unknown block was announced; just assume that the latest one is the best one.
state->hashLastUnknownBlock = hash;
}
}
// Requires cs_main
bool CanDirectFetch(const Consensus::Params &consensusParams)
{
return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
}
// Requires cs_main
bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex)
{
if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
return true;
if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
return true;
return false;
}
/** Find the last common ancestor two blocks have.
* Both pa and pb must be non-NULL. */
CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
if (pa->nHeight > pb->nHeight) {
pa = pa->GetAncestor(pb->nHeight);
} else if (pb->nHeight > pa->nHeight) {
pb = pb->GetAncestor(pa->nHeight);
}
while (pa != pb && pa && pb) {
pa = pa->pprev;
pb = pb->pprev;
}
// Eventually all chain branches meet at the genesis block.
assert(pa == pb);
return pa;
}
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries. */
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller) {
if (count == 0)
return;
vBlocks.reserve(vBlocks.size() + count);
CNodeState *state = State(nodeid);
assert(state != NULL);
// Make sure pindexBestKnownBlock is up to date, we'll need it.
ProcessBlockAvailability(nodeid);
if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) {
// This peer has nothing interesting.
return;
}
if (state->pindexLastCommonBlock == NULL) {
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
// Guessing wrong in either direction is not a problem.
state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
}
// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
// of its current tip anymore. Go back enough to fix that.
state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return;
std::vector<CBlockIndex*> vToFetch;
CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
NodeId waitingfor = -1;
while (pindexWalk->nHeight < nMaxHeight) {
// Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
// pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
// as iterating over ~100 CBlockIndex* entries anyway.
int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
vToFetch.resize(nToFetch);
pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
vToFetch[nToFetch - 1] = pindexWalk;
for (unsigned int i = nToFetch - 1; i > 0; i--) {
vToFetch[i - 1] = vToFetch[i]->pprev;
}
// Iterate over those blocks in vToFetch (in forward direction), adding the ones that
// are not yet downloaded and not in flight to vBlocks. In the mean time, update
// pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
// already part of our chain (and therefore don't need it even if pruned).
BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
if (!pindex->IsValid(BLOCK_VALID_TREE)) {
// We consider the chain that this peer is on invalid.
return;
}
if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
if (pindex->nChainTx)
state->pindexLastCommonBlock = pindex;
} else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
// The block is not already downloaded, and not yet in flight.
if (pindex->nHeight > nWindowEnd) {
// We reached the end of the window.
if (vBlocks.size() == 0 && waitingfor != nodeid) {
// We aren't able to fetch anything, but we would be if the download window was one larger.
nodeStaller = waitingfor;
}
return;
}
vBlocks.push_back(pindex);
if (vBlocks.size() == count) {
return;
}
} else if (waitingfor == -1) {
// This is the first already-in-flight block.
waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
}
}
}
}
} // anon namespace
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
LOCK(cs_main);
CNodeState *state = State(nodeid);
if (state == NULL)
return false;
stats.nMisbehavior = state->nMisbehavior;
stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) {
if (queue.pindex)
stats.vHeightInFlight.push_back(queue.pindex->nHeight);
}
return true;
}
void RegisterNodeSignals(CNodeSignals& nodeSignals)
{
nodeSignals.GetHeight.connect(&GetHeight);
nodeSignals.ProcessMessages.connect(&ProcessMessages);
nodeSignals.SendMessages.connect(&SendMessages);
nodeSignals.InitializeNode.connect(&InitializeNode);
nodeSignals.FinalizeNode.connect(&FinalizeNode);
}
void UnregisterNodeSignals(CNodeSignals& nodeSignals)
{
nodeSignals.GetHeight.disconnect(&GetHeight);
nodeSignals.ProcessMessages.disconnect(&ProcessMessages);
nodeSignals.SendMessages.disconnect(&SendMessages);
nodeSignals.InitializeNode.disconnect(&InitializeNode);
nodeSignals.FinalizeNode.disconnect(&FinalizeNode);
}
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator)
{
// Find the first block the caller has in the main chain
BOOST_FOREACH(const uint256& hash, locator.vHave) {
BlockMap::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end())
{
CBlockIndex* pindex = (*mi).second;
if (chain.Contains(pindex))
return pindex;
}
}
return chain.Genesis();
}
CCoinsViewCache *pcoinsTip = NULL;
CBlockTreeDB *pblocktree = NULL;
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
//
bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
uint256 hash = tx.GetHash();
if (mapOrphanTransactions.count(hash))
return false;
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 10,000 orphans, each of which is at most 5,000 bytes big is
// at most 500 megabytes of orphans:
unsigned int sz = tx.GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION);
if (sz > 5000)
{
LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
return false;
}
mapOrphanTransactions[hash].tx = tx;
mapOrphanTransactions[hash].fromPeer = peer;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash);
LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash.ToString(),
mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
return true;
}
void static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end())
return;
BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin)
{
map<uint256, set<uint256> >::iterator itPrev = mapOrphanTransactionsByPrev.find(txin.prevout.hash);
if (itPrev == mapOrphanTransactionsByPrev.end())
continue;
itPrev->second.erase(hash);
if (itPrev->second.empty())
mapOrphanTransactionsByPrev.erase(itPrev);
}
mapOrphanTransactions.erase(it);
}
void EraseOrphansFor(NodeId peer)
{
int nErased = 0;
map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
while (iter != mapOrphanTransactions.end())
{
map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
if (maybeErase->second.fromPeer == peer)
{
EraseOrphanTx(maybeErase->second.tx.GetHash());
++nErased;
}
}
if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer);
}
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
unsigned int nEvicted = 0;
while (mapOrphanTransactions.size() > nMaxOrphans)
{
// Evict a random orphan:
uint256 randomhash = GetRandHash();
map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
if (it == mapOrphanTransactions.end())
it = mapOrphanTransactions.begin();
EraseOrphanTx(it->first);
++nEvicted;
}
return nEvicted;
}
bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime)
{
if (tx.nLockTime == 0)
return true;
if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime))
return true;
BOOST_FOREACH(const CTxIn& txin, tx.vin) {
if (!(txin.nSequence == CTxIn::SEQUENCE_FINAL))
return false;
}
return true;
}
bool CheckFinalTx(const CTransaction &tx, int flags)
{
AssertLockHeld(cs_main);
// By convention a negative value for flags indicates that the
// current network-enforced consensus rules should be used. In
// a future soft-fork scenario that would mean checking which
// rules would be enforced for the next block and setting the
// appropriate flags. At the present time no soft-forks are
// scheduled, so no flags are set.
flags = std::max(flags, 0);
// CheckFinalTx() uses chainActive.Height()+1 to evaluate
// nLockTime because when IsFinalTx() is called within
// CBlock::AcceptBlock(), the height of the block *being*
// evaluated is what is used. Thus if we want to know if a
// transaction can be part of the *next* block, we need to call
// IsFinalTx() with one more than chainActive.Height().
const int nBlockHeight = chainActive.Height() + 1;
// BIP113 will require that time-locked transactions have nLockTime set to
// less than the median time of the previous block they're contained in.
// When the next block is created its previous block will be the current
// chain tip, so we use that to calculate the median time passed to
// IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
? chainActive.Tip()->GetMedianTimePast()
: GetAdjustedTime();
return IsFinalTx(tx, nBlockHeight, nBlockTime);
}
/**
* Calculates the block height and previous block's median time past at
* which the transaction will be considered final in the context of BIP 68.
* Also removes from the vector of input heights any entries which did not
* correspond to sequence locked inputs as they do not affect the calculation.
*/
static std::pair<int, int64_t> CalculateSequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block)
{
assert(prevHeights->size() == tx.vin.size());
// Will be set to the equivalent height- and time-based nLockTime
// values that would be necessary to satisfy all relative lock-
// time constraints given our view of block chain history.
// The semantics of nLockTime are the last invalid height/time, so
// use -1 to have the effect of any height or time being valid.
int nMinHeight = -1;
int64_t nMinTime = -1;
// tx.nVersion is signed integer so requires cast to unsigned otherwise
// we would be doing a signed comparison and half the range of nVersion
// wouldn't support BIP 68.
bool fEnforceBIP68 = static_cast<uint32_t>(tx.nVersion) >= 2
&& flags & LOCKTIME_VERIFY_SEQUENCE;
// Do not enforce sequence numbers as a relative lock time
// unless we have been instructed to
if (!fEnforceBIP68) {
return std::make_pair(nMinHeight, nMinTime);
}
for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
const CTxIn& txin = tx.vin[txinIndex];
// Sequence numbers with the most significant bit set are not
// treated as relative lock-times, nor are they given any
// consensus-enforced meaning at this point.
if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG) {
// The height of this input is not relevant for sequence locks
(*prevHeights)[txinIndex] = 0;
continue;
}
int nCoinHeight = (*prevHeights)[txinIndex];
if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG) {
int64_t nCoinTime = block.GetAncestor(std::max(nCoinHeight-1, 0))->GetMedianTimePast();
// NOTE: Subtract 1 to maintain nLockTime semantics
// BIP 68 relative lock times have the semantics of calculating
// the first block or time at which the transaction would be
// valid. When calculating the effective block time or height
// for the entire transaction, we switch to using the
// semantics of nLockTime which is the last invalid block
// time or height. Thus we subtract 1 from the calculated
// time or height.
// Time-based relative lock-times are measured from the
// smallest allowed timestamp of the block containing the
// txout being spent, which is the median time past of the
// block prior.
nMinTime = std::max(nMinTime, nCoinTime + (int64_t)((txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) - 1);
} else {
nMinHeight = std::max(nMinHeight, nCoinHeight + (int)(txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) - 1);
}
}
return std::make_pair(nMinHeight, nMinTime);
}
static bool EvaluateSequenceLocks(const CBlockIndex& block, std::pair<int, int64_t> lockPair)
{
assert(block.pprev);
int64_t nBlockTime = block.pprev->GetMedianTimePast();
if (lockPair.first >= block.nHeight || lockPair.second >= nBlockTime)
return false;
return true;
}
bool SequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block)
{
return EvaluateSequenceLocks(block, CalculateSequenceLocks(tx, flags, prevHeights, block));
}
bool TestLockPointValidity(const LockPoints* lp)
{
AssertLockHeld(cs_main);
assert(lp);
// If there are relative lock times then the maxInputBlock will be set
// If there are no relative lock times, the LockPoints don't depend on the chain
if (lp->maxInputBlock) {
// Check whether chainActive is an extension of the block at which the LockPoints
// calculation was valid. If not LockPoints are no longer valid
if (!chainActive.Contains(lp->maxInputBlock)) {
return false;
}
}
// LockPoints still valid
return true;
}
bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool useExistingLockPoints)
{
AssertLockHeld(cs_main);
AssertLockHeld(mempool.cs);
CBlockIndex* tip = chainActive.Tip();
CBlockIndex index;
index.pprev = tip;
// CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
// height based locks because when SequenceLocks() is called within
// ConnectBlock(), the height of the block *being*
// evaluated is what is used.
// Thus if we want to know if a transaction can be part of the
// *next* block, we need to use one more than chainActive.Height()
index.nHeight = tip->nHeight + 1;
std::pair<int, int64_t> lockPair;
if (useExistingLockPoints) {
assert(lp);
lockPair.first = lp->height;
lockPair.second = lp->time;
}
else {
// pcoinsTip contains the UTXO set for chainActive.Tip()
CCoinsViewMemPool viewMemPool(pcoinsTip, mempool);
std::vector<int> prevheights;
prevheights.resize(tx.vin.size());
for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
const CTxIn& txin = tx.vin[txinIndex];
CCoins coins;
if (!viewMemPool.GetCoins(txin.prevout.hash, coins)) {
return error("%s: Missing input", __func__);
}
if (coins.nHeight == MEMPOOL_HEIGHT) {
// Assume all mempool transaction confirm in the next block
prevheights[txinIndex] = tip->nHeight + 1;
} else {
prevheights[txinIndex] = coins.nHeight;
}
}
lockPair = CalculateSequenceLocks(tx, flags, &prevheights, index);
if (lp) {
lp->height = lockPair.first;
lp->time = lockPair.second;
// Also store the hash of the block with the highest height of
// all the blocks which have sequence locked prevouts.
// This hash needs to still be on the chain
// for these LockPoint calculations to be valid
// Note: It is impossible to correctly calculate a maxInputBlock
// if any of the sequence locked inputs depend on unconfirmed txs,
// except in the special case where the relative lock time/height
// is 0, which is equivalent to no sequence lock. Since we assume
// input height of tip+1 for mempool txs and test the resulting
// lockPair from CalculateSequenceLocks against tip+1. We know
// EvaluateSequenceLocks will fail if there was a non-zero sequence
// lock on a mempool input, so we can use the return value of
// CheckSequenceLocks to indicate the LockPoints validity
int maxInputHeight = 0;
BOOST_FOREACH(int height, prevheights) {
// Can ignore mempool inputs since we'll fail if they had non-zero locks
if (height != tip->nHeight+1) {
maxInputHeight = std::max(maxInputHeight, height);
}
}
lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
}
}
return EvaluateSequenceLocks(index, lockPair);
}
unsigned int GetLegacySigOpCount(const CTransaction& tx)
{
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
nSigOps += txin.scriptSig.GetSigOpCount(false);
}
BOOST_FOREACH(const CTxOut& txout, tx.vout)
{
nSigOps += txout.scriptPubKey.GetSigOpCount(false);
}
return nSigOps;
}
unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs)
{
if (tx.IsCoinBase())
return 0;
unsigned int nSigOps = 0;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]);
if (prevout.scriptPubKey.IsPayToScriptHash())
nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig);
}
return nSigOps;
}
bool CheckTransaction(const CTransaction& tx, CValidationState &state)
{
// Basic checks that don't depend on any context
if (tx.vin.empty())
return state.DoS(10, false, REJECT_INVALID, "bad-txns-vin-empty");
if (tx.vout.empty())
return state.DoS(10, false, REJECT_INVALID, "bad-txns-vout-empty");
// Size limits
if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-oversize");
// Check for negative or overflow output values
CAmount nValueOut = 0;
BOOST_FOREACH(const CTxOut& txout, tx.vout)
{
if (txout.nValue < 0)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-vout-negative");
if (txout.nValue > MAX_MONEY)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-vout-toolarge");
nValueOut += txout.nValue;
if (!MoneyRange(nValueOut))
return state.DoS(100, false, REJECT_INVALID, "bad-txns-txouttotal-toolarge");
}
// Check for duplicate inputs
set<COutPoint> vInOutPoints;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
if (vInOutPoints.count(txin.prevout))
return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputs-duplicate");
vInOutPoints.insert(txin.prevout);
}
if (tx.IsCoinBase())
{
if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100)
return state.DoS(100, false, REJECT_INVALID, "bad-cb-length");
}
else
{
BOOST_FOREACH(const CTxIn& txin, tx.vin)
if (txin.prevout.IsNull())
return state.DoS(10, false, REJECT_INVALID, "bad-txns-prevout-null");
}
return true;
}
void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) {
int expired = pool.Expire(GetTime() - age);
if (expired != 0)
LogPrint("mempool", "Expired %i transactions from the memory pool\n", expired);
std::vector<uint256> vNoSpendsRemaining;
pool.TrimToSize(limit, &vNoSpendsRemaining);
BOOST_FOREACH(const uint256& removed, vNoSpendsRemaining)
pcoinsTip->Uncache(removed);
}
/** Convert CValidationState to a human-readable message for logging */
std::string FormatStateMessage(const CValidationState &state)
{
return strprintf("%s%s (code %i)",
state.GetRejectReason(),
state.GetDebugMessage().empty() ? "" : ", "+state.GetDebugMessage(),
state.GetRejectCode());
}
bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const CTransaction& tx, bool fLimitFree,
bool* pfMissingInputs, bool fOverrideMempoolLimit, const CAmount nAbsurdFee,
std::vector<uint256>& vHashTxnToUncache)
{
const uint256 hash = tx.GetHash();
AssertLockHeld(cs_main);
if (pfMissingInputs)
*pfMissingInputs = false;
if (!CheckTransaction(tx, state))
return false; // state filled in by CheckTransaction
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "coinbase");
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
string reason;
if (fRequireStandard && !IsStandardTx(tx, reason))
return state.DoS(0, false, REJECT_NONSTANDARD, reason);
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
return state.DoS(0, false, REJECT_NONSTANDARD, "non-final");
// is it already in the memory pool?
if (pool.exists(hash))
return state.Invalid(false, REJECT_ALREADY_KNOWN, "txn-already-in-mempool");
// Check for conflicts with in-memory transactions
set<uint256> setConflicts;
{
LOCK(pool.cs); // protect pool.mapNextTx
BOOST_FOREACH(const CTxIn &txin, tx.vin)
{
if (pool.mapNextTx.count(txin.prevout))
{
const CTransaction *ptxConflicting = pool.mapNextTx[txin.prevout].ptx;
if (!setConflicts.count(ptxConflicting->GetHash()))
{
// Allow opt-out of transaction replacement by setting
// nSequence >= maxint-1 on all inputs.
//
// maxint-1 is picked to still allow use of nLockTime by
// non-replacable transactions. All inputs rather than just one
// is for the sake of multi-party protocols, where we don't
// want a single party to be able to disable replacement.
//
// The opt-out ignores descendants as anyone relying on
// first-seen mempool behavior should be checking all
// unconfirmed ancestors anyway; doing otherwise is hopelessly
// insecure.
bool fReplacementOptOut = true;
if (fEnableReplacement)
{
BOOST_FOREACH(const CTxIn &txin, ptxConflicting->vin)
{
if (txin.nSequence < std::numeric_limits<unsigned int>::max()-1)
{
fReplacementOptOut = false;
break;
}
}
}
if (fReplacementOptOut)
return state.Invalid(false, REJECT_CONFLICT, "txn-mempool-conflict");
setConflicts.insert(ptxConflicting->GetHash());
}
}
}
}
{
CCoinsView dummy;
CCoinsViewCache view(&dummy);
CAmount nValueIn = 0;
LockPoints lp;
{
LOCK(pool.cs);
CCoinsViewMemPool viewMemPool(pcoinsTip, pool);
view.SetBackend(viewMemPool);
// do we already have it?
bool fHadTxInCache = pcoinsTip->HaveCoinsInCache(hash);
if (view.HaveCoins(hash)) {
if (!fHadTxInCache)
vHashTxnToUncache.push_back(hash);
return state.Invalid(false, REJECT_ALREADY_KNOWN, "txn-already-known");
}
// do all inputs exist?
// Note that this does not check for the presence of actual outputs (see the next check for that),
// and only helps with filling in pfMissingInputs (to determine missing vs spent).
BOOST_FOREACH(const CTxIn txin, tx.vin) {
if (!pcoinsTip->HaveCoinsInCache(txin.prevout.hash))
vHashTxnToUncache.push_back(txin.prevout.hash);
if (!view.HaveCoins(txin.prevout.hash)) {
if (pfMissingInputs)
*pfMissingInputs = true;
return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
}
}
// are the actual inputs available?
if (!view.HaveInputs(tx))
return state.Invalid(false, REJECT_DUPLICATE, "bad-txns-inputs-spent");
// Bring the best block into scope
view.GetBestBlock();
nValueIn = view.GetValueIn(tx);
// we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
view.SetBackend(dummy);
// Only accept BIP68 sequence locked transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
// Must keep pool.cs for this unless we change CheckSequenceLocks to take a
// CoinsViewCache instead of create its own
if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final");
}
// Check for non-standard pay-to-script-hash in inputs
if (fRequireStandard && !AreInputsStandard(tx, view))
return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
unsigned int nSigOps = GetLegacySigOpCount(tx);
nSigOps += GetP2SHSigOpCount(tx, view);
CAmount nValueOut = tx.GetValueOut();
CAmount nFees = nValueIn-nValueOut;
// nModifiedFees includes any fee deltas from PrioritiseTransaction
CAmount nModifiedFees = nFees;
double nPriorityDummy = 0;
pool.ApplyDeltas(hash, nPriorityDummy, nModifiedFees);
CAmount inChainInputValue;
double dPriority = view.GetPriority(tx, chainActive.Height(), inChainInputValue);
// Keep track of transactions that spend a coinbase, which we re-scan
// during reorgs to ensure COINBASE_MATURITY is still met.
bool fSpendsCoinbase = false;
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
const CCoins *coins = view.AccessCoins(txin.prevout.hash);
if (coins->IsCoinBase()) {
fSpendsCoinbase = true;
break;
}
}
CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), pool.HasNoInputsOf(tx), inChainInputValue, fSpendsCoinbase, nSigOps, lp);
unsigned int nSize = entry.GetTxSize();
// Check that the transaction doesn't have an excessive number of
// sigops, making it impossible to mine. Since the coinbase transaction
// itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
// MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
// merely non-standard transaction.
if ((nSigOps > MAX_STANDARD_TX_SIGOPS) || (nBytesPerSigOp && nSigOps > nSize / nBytesPerSigOp))
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false,
strprintf("%d", nSigOps));
CAmount mempoolRejectFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
if (mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nFees, mempoolRejectFee));
} else if (GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY) && nModifiedFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(entry.GetPriority(chainActive.Height() + 1))) {
// Require that free transactions have sufficient priority to be mined in the next block.
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority");
}
// Continuously rate-limit free (really, very-low-fee) transactions
// This mitigates 'penny-flooding' -- sending thousands of free transactions just to
// be annoying or make others' transactions take longer to confirm.
if (fLimitFree && nModifiedFees < ::minRelayTxFee.GetFee(nSize))
{
static CCriticalSection csFreeLimiter;
static double dFreeCount;
static int64_t nLastTime;
int64_t nNow = GetTime();
LOCK(csFreeLimiter);
// Use an exponentially decaying ~10-minute window:
dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime));
nLastTime = nNow;
// -limitfreerelay unit is thousand-bytes-per-minute
// At default rate it would take over a month to fill 1GB
if (dFreeCount + nSize >= GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) * 10 * 1000)
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "rate limited free transaction");
LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
dFreeCount += nSize;
}
if (nAbsurdFee && nFees > nAbsurdFee)
return state.Invalid(false,
REJECT_HIGHFEE, "absurdly-high-fee",
strprintf("%d > %d", nFees, nAbsurdFee));
// Calculate in-mempool ancestors, up to a limit.
CTxMemPool::setEntries setAncestors;
size_t nLimitAncestors = GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
size_t nLimitAncestorSize = GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000;
size_t nLimitDescendants = GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
size_t nLimitDescendantSize = GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
std::string errString;
if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) {
return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString);
}
// A transaction that spends outputs that would be replaced by it is invalid. Now
// that we have the set of all ancestors we can detect this
// pathological case by making sure setConflicts and setAncestors don't
// intersect.
BOOST_FOREACH(CTxMemPool::txiter ancestorIt, setAncestors)
{
const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
if (setConflicts.count(hashAncestor))
{
return state.DoS(10, false,
REJECT_INVALID, "bad-txns-spends-conflicting-tx", false,
strprintf("%s spends conflicting transaction %s",
hash.ToString(),
hashAncestor.ToString()));
}
}
// Check if it's economically rational to mine this transaction rather
// than the ones it replaces.
CAmount nConflictingFees = 0;
size_t nConflictingSize = 0;
uint64_t nConflictingCount = 0;
CTxMemPool::setEntries allConflicting;
// If we don't hold the lock allConflicting might be incomplete; the
// subsequent RemoveStaged() and addUnchecked() calls don't guarantee
// mempool consistency for us.
LOCK(pool.cs);
if (setConflicts.size())
{
CFeeRate newFeeRate(nModifiedFees, nSize);
set<uint256> setConflictsParents;
const int maxDescendantsToVisit = 100;
CTxMemPool::setEntries setIterConflicting;
BOOST_FOREACH(const uint256 &hashConflicting, setConflicts)
{
CTxMemPool::txiter mi = pool.mapTx.find(hashConflicting);
if (mi == pool.mapTx.end())
continue;
// Save these to avoid repeated lookups
setIterConflicting.insert(mi);
// Don't allow the replacement to reduce the feerate of the
// mempool.
//
// We usually don't want to accept replacements with lower
// feerates than what they replaced as that would lower the
// feerate of the next block. Requiring that the feerate always
// be increased is also an easy-to-reason about way to prevent
// DoS attacks via replacements.
//
// The mining code doesn't (currently) take children into
// account (CPFP) so we only consider the feerates of
// transactions being directly replaced, not their indirect
// descendants. While that does mean high feerate children are
// ignored when deciding whether or not to replace, we do
// require the replacement to pay more overall fees too,
// mitigating most cases.
CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
if (newFeeRate <= oldFeeRate)
{
return state.DoS(0, false,
REJECT_INSUFFICIENTFEE, "insufficient fee", false,
strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
hash.ToString(),
newFeeRate.ToString(),
oldFeeRate.ToString()));
}
BOOST_FOREACH(const CTxIn &txin, mi->GetTx().vin)
{
setConflictsParents.insert(txin.prevout.hash);
}
nConflictingCount += mi->GetCountWithDescendants();
}
// This potentially overestimates the number of actual descendants
// but we just want to be conservative to avoid doing too much
// work.
if (nConflictingCount <= maxDescendantsToVisit) {
// If not too many to replace, then calculate the set of
// transactions that would have to be evicted
BOOST_FOREACH(CTxMemPool::txiter it, setIterConflicting) {
pool.CalculateDescendants(it, allConflicting);
}
BOOST_FOREACH(CTxMemPool::txiter it, allConflicting) {
nConflictingFees += it->GetModifiedFee();
nConflictingSize += it->GetTxSize();
}
} else {
return state.DoS(0, false,
REJECT_NONSTANDARD, "too many potential replacements", false,
strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
hash.ToString(),
nConflictingCount,
maxDescendantsToVisit));
}
for (unsigned int j = 0; j < tx.vin.size(); j++)
{
// We don't want to accept replacements that require low
// feerate junk to be mined first. Ideally we'd keep track of
// the ancestor feerates and make the decision based on that,
// but for now requiring all new inputs to be confirmed works.
if (!setConflictsParents.count(tx.vin[j].prevout.hash))
{
// Rather than check the UTXO set - potentially expensive -
// it's cheaper to just check if the new input refers to a
// tx that's in the mempool.
if (pool.mapTx.find(tx.vin[j].prevout.hash) != pool.mapTx.end())
return state.DoS(0, false,
REJECT_NONSTANDARD, "replacement-adds-unconfirmed", false,
strprintf("replacement %s adds unconfirmed input, idx %d",
hash.ToString(), j));
}
}
// The replacement must pay greater fees than the transactions it
// replaces - if we did the bandwidth used by those conflicting
// transactions would not be paid for.
if (nModifiedFees < nConflictingFees)
{
return state.DoS(0, false,
REJECT_INSUFFICIENTFEE, "insufficient fee", false,
strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
}
// Finally in addition to paying more fees than the conflicts the
// new transaction must pay for its own bandwidth.
CAmount nDeltaFees = nModifiedFees - nConflictingFees;
if (nDeltaFees < ::minRelayTxFee.GetFee(nSize))
{
return state.DoS(0, false,
REJECT_INSUFFICIENTFEE, "insufficient fee", false,
strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
hash.ToString(),
FormatMoney(nDeltaFees),
FormatMoney(::minRelayTxFee.GetFee(nSize))));
}
}
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
if (!CheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true))
return false; // state filled in by CheckInputs
// Check again against just the consensus-critical mandatory script
// verification flags, in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain
// CHECKSIG NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks, however allowing such transactions into the mempool
// can be exploited as a DoS attack.
if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true))
{
return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
__func__, hash.ToString(), FormatStateMessage(state));
}
// Remove conflicting transactions from the mempool
BOOST_FOREACH(const CTxMemPool::txiter it, allConflicting)
{
LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
it->GetTx().GetHash().ToString(),
hash.ToString(),
FormatMoney(nModifiedFees - nConflictingFees),
(int)nSize - (int)nConflictingSize);
}
pool.RemoveStaged(allConflicting, false);
// Store transaction in memory
pool.addUnchecked(hash, entry, setAncestors, !IsInitialBlockDownload());
// trim mempool and check if tx was trimmed
if (!fOverrideMempoolLimit) {
LimitMempoolSize(pool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
if (!pool.exists(hash))
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full");
}
}
SyncWithWallets(tx, NULL, NULL);
return true;
}
bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,
bool* pfMissingInputs, bool fOverrideMempoolLimit, const CAmount nAbsurdFee)
{
std::vector<uint256> vHashTxToUncache;
bool res = AcceptToMemoryPoolWorker(pool, state, tx, fLimitFree, pfMissingInputs, fOverrideMempoolLimit, nAbsurdFee, vHashTxToUncache);
if (!res) {
BOOST_FOREACH(const uint256& hashTx, vHashTxToUncache)
pcoinsTip->Uncache(hashTx);
}
return res;
}
/** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */
bool GetTransaction(const uint256 &hash, CTransaction &txOut, const Consensus::Params& consensusParams, uint256 &hashBlock, bool fAllowSlow)
{
CBlockIndex *pindexSlow = NULL;
LOCK(cs_main);
if (mempool.lookup(hash, txOut))
{
return true;
}
if (fTxIndex) {
CDiskTxPos postx;
if (pblocktree->ReadTxIndex(hash, postx)) {
CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION);
if (file.IsNull())
return error("%s: OpenBlockFile failed", __func__);
CBlockHeader header;
try {
file >> header;
fseek(file.Get(), postx.nTxOffset, SEEK_CUR);
file >> txOut;
} catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
hashBlock = header.GetHash();
if (txOut.GetHash() != hash)
return error("%s: txid mismatch", __func__);
return true;
}
}
if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it
int nHeight = -1;
{
CCoinsViewCache &view = *pcoinsTip;
const CCoins* coins = view.AccessCoins(hash);
if (coins)
nHeight = coins->nHeight;
}
if (nHeight > 0)
pindexSlow = chainActive[nHeight];
}
if (pindexSlow) {
CBlock block;
if (ReadBlockFromDisk(block, pindexSlow, consensusParams)) {
BOOST_FOREACH(const CTransaction &tx, block.vtx) {
if (tx.GetHash() == hash) {
txOut = tx;
hashBlock = pindexSlow->GetBlockHash();
return true;
}
}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////
//
// CBlock and CBlockIndex
//
bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("WriteBlockToDisk: OpenBlockFile failed");
// Write index header
unsigned int nSize = fileout.GetSerializeSize(block);
fileout << FLATDATA(messageStart) << nSize;
// Write block
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("WriteBlockToDisk: ftell failed");
pos.nPos = (unsigned int)fileOutPos;
fileout << block;
return true;
}
bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
{
block.SetNull();
// Open history file to read
CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull())
return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
// Read block
try {
filein >> block;
}
catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
}
// Check the header
if (!CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
return true;
}
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
{
if (!ReadBlockFromDisk(block, pindex->GetBlockPos(), consensusParams))
return false;
if (block.GetHash() != pindex->GetBlockHash())
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
pindex->ToString(), pindex->GetBlockPos().ToString());
return true;
}
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
{
int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
// Force block reward to zero when right shift is undefined.
if (halvings >= 64)
return 0;
CAmount nSubsidy = 50 * COIN;
// Subsidy is cut in half every 210,000 blocks which will occur approximately every 4 years.
nSubsidy >>= halvings;
return nSubsidy;
}
bool IsInitialBlockDownload()
{
const CChainParams& chainParams = Params();
LOCK(cs_main);
if (fImporting || fReindex)
return true;
if (fCheckpointsEnabled && chainActive.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()))
return true;
static bool lockIBDState = false;
if (lockIBDState)
return false;
bool state = (chainActive.Height() < pindexBestHeader->nHeight - 24 * 6 ||
pindexBestHeader->GetBlockTime() < GetTime() - nMaxTipAge);
if (!state)
lockIBDState = true;
return state;
}
bool fLargeWorkForkFound = false;
bool fLargeWorkInvalidChainFound = false;
CBlockIndex *pindexBestForkTip = NULL, *pindexBestForkBase = NULL;
+static void AlertNotify(const std::string& strMessage, bool fThread)
+{
+ uiInterface.NotifyAlertChanged();
+ std::string strCmd = GetArg("-alertnotify", "");
+ if (strCmd.empty()) return;
+
+ // Alert text should be plain ascii coming from a trusted source, but to
+ // be safe we first strip anything not in safeChars, then add single quotes around
+ // the whole string before passing it to the shell:
+ std::string singleQuote("'");
+ std::string safeStatus = SanitizeString(strMessage);
+ safeStatus = singleQuote+safeStatus+singleQuote;
+ boost::replace_all(strCmd, "%s", safeStatus);
+
+ if (fThread)
+ boost::thread t(runCommand, strCmd); // thread runs free
+ else
+ runCommand(strCmd);
+}
+
void CheckForkWarningConditions()
{
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
// (we assume we don't get stuck on a fork before the last checkpoint)
if (IsInitialBlockDownload())
return;
// If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
// of our head, drop it
if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72)
pindexBestForkTip = NULL;
if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6)))
{
if (!fLargeWorkForkFound && pindexBestForkBase)
{
std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") +
pindexBestForkBase->phashBlock->ToString() + std::string("'");
- CAlert::Notify(warning, true);
+ AlertNotify(warning, true);
}
if (pindexBestForkTip && pindexBestForkBase)
{
LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(),
pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
fLargeWorkForkFound = true;
}
else
{
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
fLargeWorkInvalidChainFound = true;
}
}
else
{
fLargeWorkForkFound = false;
fLargeWorkInvalidChainFound = false;
}
}
void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
{
AssertLockHeld(cs_main);
// If we are on a fork that is sufficiently large, set a warning flag
CBlockIndex* pfork = pindexNewForkTip;
CBlockIndex* plonger = chainActive.Tip();
while (pfork && pfork != plonger)
{
while (plonger && plonger->nHeight > pfork->nHeight)
plonger = plonger->pprev;
if (pfork == plonger)
break;
pfork = pfork->pprev;
}
// We define a condition where we should warn the user about as a fork of at least 7 blocks
// with a tip within 72 blocks (+/- 12 hours if no one mines it) of ours
// We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
// hash rate operating on the fork.
// or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
// We define it this way because it allows us to only store the highest fork tip (+ base) which meets
// the 7-block condition and from this always have the most-likely-to-cause-warning fork
if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) &&
pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
chainActive.Height() - pindexNewForkTip->nHeight < 72)
{
pindexBestForkTip = pindexNewForkTip;
pindexBestForkBase = pfork;
}
CheckForkWarningConditions();
}
// Requires cs_main.
void Misbehaving(NodeId pnode, int howmuch)
{
if (howmuch == 0)
return;
CNodeState *state = State(pnode);
if (state == NULL)
return;
state->nMisbehavior += howmuch;
int banscore = GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
{
LogPrintf("%s: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
state->fShouldBan = true;
} else
LogPrintf("%s: %s (%d -> %d)\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
}
void static InvalidChainFound(CBlockIndex* pindexNew)
{
if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
pindexBestInvalid = pindexNew;
LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
pindexNew->GetBlockTime()));
CBlockIndex *tip = chainActive.Tip();
assert (tip);
LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__,
tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime()));
CheckForkWarningConditions();
}
void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
int nDoS = 0;
if (state.IsInvalid(nDoS)) {
std::map<uint256, NodeId>::iterator it = mapBlockSource.find(pindex->GetBlockHash());
if (it != mapBlockSource.end() && State(it->second)) {
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), pindex->GetBlockHash()};
State(it->second)->rejects.push_back(reject);
if (nDoS > 0)
Misbehaving(it->second, nDoS);
}
}
if (!state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
InvalidChainFound(pindex);
}
}
void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCache &inputs, CTxUndo &txundo, int nHeight)
{
// mark inputs spent
if (!tx.IsCoinBase()) {
txundo.vprevout.reserve(tx.vin.size());
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
CCoinsModifier coins = inputs.ModifyCoins(txin.prevout.hash);
unsigned nPos = txin.prevout.n;
if (nPos >= coins->vout.size() || coins->vout[nPos].IsNull())
assert(false);
// mark an outpoint spent, and construct undo information
txundo.vprevout.push_back(CTxInUndo(coins->vout[nPos]));
coins->Spend(nPos);
if (coins->vout.size() == 0) {
CTxInUndo& undo = txundo.vprevout.back();
undo.nHeight = coins->nHeight;
undo.fCoinBase = coins->fCoinBase;
undo.nVersion = coins->nVersion;
}
}
}
// add outputs
inputs.ModifyNewCoins(tx.GetHash(), tx.IsCoinBase())->FromTx(tx, nHeight);
}
void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCache &inputs, int nHeight)
{
CTxUndo txundo;
UpdateCoins(tx, state, inputs, txundo, nHeight);
}
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
if (!VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, cacheStore), &error)) {
return false;
}
return true;
}
int GetSpendHeight(const CCoinsViewCache& inputs)
{
LOCK(cs_main);
CBlockIndex* pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
return pindexPrev->nHeight + 1;
}
namespace Consensus {
bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight)
{
// This doesn't trigger the DoS code on purpose; if it did, it would make it easier
// for an attacker to attempt to split the network.
if (!inputs.HaveInputs(tx))
return state.Invalid(false, 0, "", "Inputs unavailable");
CAmount nValueIn = 0;
CAmount nFees = 0;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
const COutPoint &prevout = tx.vin[i].prevout;
const CCoins *coins = inputs.AccessCoins(prevout.hash);
assert(coins);
// If prev is coinbase, check that it's matured
if (coins->IsCoinBase()) {
if (nSpendHeight - coins->nHeight < COINBASE_MATURITY)
return state.Invalid(false,
REJECT_INVALID, "bad-txns-premature-spend-of-coinbase",
strprintf("tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight));
}
// Check for negative or overflow input values
nValueIn += coins->vout[prevout.n].nValue;
if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn))
return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputvalues-outofrange");
}
if (nValueIn < tx.GetValueOut())
return state.DoS(100, false, REJECT_INVALID, "bad-txns-in-belowout", false,
strprintf("value in (%s) < value out (%s)", FormatMoney(nValueIn), FormatMoney(tx.GetValueOut())));
// Tally transaction fees
CAmount nTxFee = nValueIn - tx.GetValueOut();
if (nTxFee < 0)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-negative");
nFees += nTxFee;
if (!MoneyRange(nFees))
return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-outofrange");
return true;
}
}// namespace Consensus
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, std::vector<CScriptCheck> *pvChecks)
{
if (!tx.IsCoinBase())
{
if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs)))
return false;
if (pvChecks)
pvChecks->reserve(tx.vin.size());
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
// Helps prevent CPU exhaustion attacks.
// Skip ECDSA signature verification when connecting blocks before the
// last block chain checkpoint. Assuming the checkpoints are valid this
// is safe because block merkle hashes are still computed and checked,
// and any change will be caught at the next checkpoint. Of course, if
// the checkpoint is for a chain that's invalid due to false scriptSigs
// this optimisation would allow an invalid chain to be accepted.
if (fScriptChecks) {
for (unsigned int i = 0; i < tx.vin.size(); i++) {
const COutPoint &prevout = tx.vin[i].prevout;
const CCoins* coins = inputs.AccessCoins(prevout.hash);
assert(coins);
// Verify signature
CScriptCheck check(*coins, tx, i, flags, cacheStore);
if (pvChecks) {
pvChecks->push_back(CScriptCheck());
check.swap(pvChecks->back());
} else if (!check()) {
if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
// Check whether the failure was caused by a
// non-mandatory script verification check, such as
// non-standard DER encodings or non-null dummy
// arguments; if so, don't trigger DoS protection to
// avoid splitting the network between upgraded and
// non-upgraded nodes.
CScriptCheck check2(*coins, tx, i,
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore);
if (check2())
return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
}
// Failures of other flags indicate a transaction that is
// invalid in new blocks, e.g. a invalid P2SH. We DoS ban
// such nodes as they are not following the protocol. That
// said during an upgrade careful thought should be taken
// as to the correct behavior - we may want to continue
// peering with non-upgraded nodes even after a soft-fork
// super-majority vote has passed.
return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
}
}
}
}
return true;
}
namespace {
bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("%s: OpenUndoFile failed", __func__);
// Write index header
unsigned int nSize = fileout.GetSerializeSize(blockundo);
fileout << FLATDATA(messageStart) << nSize;
// Write undo data
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("%s: ftell failed", __func__);
pos.nPos = (unsigned int)fileOutPos;
fileout << blockundo;
// calculate & write checksum
CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
hasher << hashBlock;
hasher << blockundo;
fileout << hasher.GetHash();
return true;
}
bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uint256& hashBlock)
{
// Open history file to read
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull())
return error("%s: OpenBlockFile failed", __func__);
// Read block
uint256 hashChecksum;
try {
filein >> blockundo;
filein >> hashChecksum;
}
catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
// Verify checksum
CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
hasher << hashBlock;
hasher << blockundo;
if (hashChecksum != hasher.GetHash())
return error("%s: Checksum mismatch", __func__);
return true;
}
/** Abort with a message */
bool AbortNode(const std::string& strMessage, const std::string& userMessage="")
{
strMiscWarning = strMessage;
LogPrintf("*** %s\n", strMessage);
uiInterface.ThreadSafeMessageBox(
userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage,
"", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="")
{
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
}
} // anon namespace
/**
* Apply the undo operation of a CTxInUndo to the given chain state.
* @param undo The undo object.
* @param view The coins view to which to apply the changes.
* @param out The out point that corresponds to the tx input.
* @return True on success.
*/
static bool ApplyTxInUndo(const CTxInUndo& undo, CCoinsViewCache& view, const COutPoint& out)
{
bool fClean = true;
CCoinsModifier coins = view.ModifyCoins(out.hash);
if (undo.nHeight != 0) {
// undo data contains height: this is the last output of the prevout tx being spent
if (!coins->IsPruned())
fClean = fClean && error("%s: undo data overwriting existing transaction", __func__);
coins->Clear();
coins->fCoinBase = undo.fCoinBase;
coins->nHeight = undo.nHeight;
coins->nVersion = undo.nVersion;
} else {
if (coins->IsPruned())
fClean = fClean && error("%s: undo data adding output to missing transaction", __func__);
}
if (coins->IsAvailable(out.n))
fClean = fClean && error("%s: undo data overwriting existing output", __func__);
if (coins->vout.size() < out.n+1)
coins->vout.resize(out.n+1);
coins->vout[out.n] = undo.txout;
return fClean;
}
bool DisconnectBlock(const CBlock& block, CValidationState& state, const CBlockIndex* pindex, CCoinsViewCache& view, bool* pfClean)
{
assert(pindex->GetBlockHash() == view.GetBestBlock());
if (pfClean)
*pfClean = false;
bool fClean = true;
CBlockUndo blockUndo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (pos.IsNull())
return error("DisconnectBlock(): no undo data available");
if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash()))
return error("DisconnectBlock(): failure reading undo data");
if (blockUndo.vtxundo.size() + 1 != block.vtx.size())
return error("DisconnectBlock(): block and undo data inconsistent");
// undo transactions in reverse order
for (int i = block.vtx.size() - 1; i >= 0; i--) {
const CTransaction &tx = block.vtx[i];
uint256 hash = tx.GetHash();
// Check that all outputs are available and match the outputs in the block itself
// exactly.
{
CCoinsModifier outs = view.ModifyCoins(hash);
outs->ClearUnspendable();
CCoins outsBlock(tx, pindex->nHeight);
// The CCoins serialization does not serialize negative numbers.
// No network rules currently depend on the version here, so an inconsistency is harmless
// but it must be corrected before txout nversion ever influences a network rule.
if (outsBlock.nVersion < 0)
outs->nVersion = outsBlock.nVersion;
if (*outs != outsBlock)
fClean = fClean && error("DisconnectBlock(): added transaction mismatch? database corrupted");
// remove outputs
outs->Clear();
}
// restore inputs
if (i > 0) { // not coinbases
const CTxUndo &txundo = blockUndo.vtxundo[i-1];
if (txundo.vprevout.size() != tx.vin.size())
return error("DisconnectBlock(): transaction and undo data inconsistent");
for (unsigned int j = tx.vin.size(); j-- > 0;) {
const COutPoint &out = tx.vin[j].prevout;
const CTxInUndo &undo = txundo.vprevout[j];
if (!ApplyTxInUndo(undo, view, out))
fClean = false;
}
}
}
// move best block pointer to prevout block
view.SetBestBlock(pindex->pprev->GetBlockHash());
if (pfClean) {
*pfClean = fClean;
return true;
}
return fClean;
}
void static FlushBlockFile(bool fFinalize = false)
{
LOCK(cs_LastBlockFile);
CDiskBlockPos posOld(nLastBlockFile, 0);
FILE *fileOld = OpenBlockFile(posOld);
if (fileOld) {
if (fFinalize)
TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
FileCommit(fileOld);
fclose(fileOld);
}
fileOld = OpenUndoFile(posOld);
if (fileOld) {
if (fFinalize)
TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
FileCommit(fileOld);
fclose(fileOld);
}
}
bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize);
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
void ThreadScriptCheck() {
RenameThread("bitcoin-scriptch");
scriptcheckqueue.Thread();
}
//
// Called periodically asynchronously; alerts if it smells like
// we're being fed a bad chain (blocks being generated much
// too slowly or too quickly).
//
void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const CBlockIndex *const &bestHeader,
int64_t nPowTargetSpacing)
{
if (bestHeader == NULL || initialDownloadCheck()) return;
static int64_t lastAlertTime = 0;
int64_t now = GetAdjustedTime();
if (lastAlertTime > now-60*60*24) return; // Alert at most once per day
const int SPAN_HOURS=4;
const int SPAN_SECONDS=SPAN_HOURS*60*60;
int BLOCKS_EXPECTED = SPAN_SECONDS / nPowTargetSpacing;
boost::math::poisson_distribution<double> poisson(BLOCKS_EXPECTED);
std::string strWarning;
int64_t startTime = GetAdjustedTime()-SPAN_SECONDS;
LOCK(cs);
const CBlockIndex* i = bestHeader;
int nBlocks = 0;
while (i->GetBlockTime() >= startTime) {
++nBlocks;
i = i->pprev;
if (i == NULL) return; // Ran out of chain, we must not be fully sync'ed
}
// How likely is it to find that many by chance?
double p = boost::math::pdf(poisson, nBlocks);
LogPrint("partitioncheck", "%s: Found %d blocks in the last %d hours\n", __func__, nBlocks, SPAN_HOURS);
LogPrint("partitioncheck", "%s: likelihood: %g\n", __func__, p);
// Aim for one false-positive about every fifty years of normal running:
const int FIFTY_YEARS = 50*365*24*60*60;
double alertThreshold = 1.0 / (FIFTY_YEARS / SPAN_SECONDS);
if (p <= alertThreshold && nBlocks < BLOCKS_EXPECTED)
{
// Many fewer blocks than expected: alert!
strWarning = strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"),
nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
}
else if (p <= alertThreshold && nBlocks > BLOCKS_EXPECTED)
{
// Many more blocks than expected: alert!
strWarning = strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"),
nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
}
if (!strWarning.empty())
{
strMiscWarning = strWarning;
- CAlert::Notify(strWarning, true);
+ AlertNotify(strWarning, true);
lastAlertTime = now;
}
}
// Protected by cs_main
static VersionBitsCache versionbitscache;
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
{
LOCK(cs_main);
int32_t nVersion = VERSIONBITS_TOP_BITS;
for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
ThresholdState state = VersionBitsState(pindexPrev, params, (Consensus::DeploymentPos)i, versionbitscache);
if (state == THRESHOLD_LOCKED_IN || state == THRESHOLD_STARTED) {
nVersion |= VersionBitsMask(params, (Consensus::DeploymentPos)i);
}
}
return nVersion;
}
/**
* Threshold condition checker that triggers when unknown versionbits are seen on the network.
*/
class WarningBitsConditionChecker : public AbstractThresholdConditionChecker
{
private:
int bit;
public:
WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
int64_t BeginTime(const Consensus::Params& params) const { return 0; }
int64_t EndTime(const Consensus::Params& params) const { return std::numeric_limits<int64_t>::max(); }
int Period(const Consensus::Params& params) const { return params.nMinerConfirmationWindow; }
int Threshold(const Consensus::Params& params) const { return params.nRuleChangeActivationThreshold; }
bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const
{
return ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
((pindex->nVersion >> bit) & 1) != 0 &&
((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
}
};
// Protected by cs_main
static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS];
static int64_t nTimeCheck = 0;
static int64_t nTimeForks = 0;
static int64_t nTimeVerify = 0;
static int64_t nTimeConnect = 0;
static int64_t nTimeIndex = 0;
static int64_t nTimeCallbacks = 0;
static int64_t nTimeTotal = 0;
bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool fJustCheck)
{
const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
int64_t nTimeStart = GetTimeMicros();
// Check it again in case a previous version let a bad block in
if (!CheckBlock(block, state, !fJustCheck, !fJustCheck))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
// verify that the view's current state corresponds to the previous block
uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash();
assert(hashPrevBlock == view.GetBestBlock());
// Special case for the genesis block, skipping connection of its transactions
// (its coinbase is unspendable)
if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
if (!fJustCheck)
view.SetBestBlock(pindex->GetBlockHash());
return true;
}
bool fScriptChecks = true;
if (fCheckpointsEnabled) {
CBlockIndex *pindexLastCheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindex->nHeight) == pindex) {
// This block is an ancestor of a checkpoint: disable script checks
fScriptChecks = false;
}
}
int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001);
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
// If such overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance -- even after
// being sent to another address.
// See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction ids entirely.
// This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
// Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
// two in the chain that violate it. This prevents exploiting the issue against nodes during their
// initial block download.
bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock invocations which don't have a hash.
!((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
(pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
// Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
// with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
// time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
// before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
// duplicate transactions descending from the known pairs either.
// If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
//Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
if (fEnforceBIP30) {
BOOST_FOREACH(const CTransaction& tx, block.vtx) {
const CCoins* coins = view.AccessCoins(tx.GetHash());
if (coins && !coins->IsPruned())
return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
REJECT_INVALID, "bad-txns-BIP30");
}
}
// BIP16 didn't become active until Apr 1 2012
int64_t nBIP16SwitchTime = 1333238400;
bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime);
unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE;
// Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks,
// when 75% of the network has upgraded:
if (block.nVersion >= 3 && IsSuperMajority(3, pindex->pprev, chainparams.GetConsensus().nMajorityEnforceBlockUpgrade, chainparams.GetConsensus())) {
flags |= SCRIPT_VERIFY_DERSIG;
}
// Start enforcing CHECKLOCKTIMEVERIFY, (BIP65) for block.nVersion=4
// blocks, when 75% of the network has upgraded:
if (block.nVersion >= 4 && IsSuperMajority(4, pindex->pprev, chainparams.GetConsensus().nMajorityEnforceBlockUpgrade, chainparams.GetConsensus())) {
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
}
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
CBlockUndo blockundo;
CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL);
std::vector<int> prevheights;
int nLockTimeFlags = 0;
CAmount nFees = 0;
int nInputs = 0;
unsigned int nSigOps = 0;
CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
std::vector<std::pair<uint256, CDiskTxPos> > vPos;
vPos.reserve(block.vtx.size());
blockundo.vtxundo.reserve(block.vtx.size() - 1);
for (unsigned int i = 0; i < block.vtx.size(); i++)
{
const CTransaction &tx = block.vtx[i];
nInputs += tx.vin.size();
nSigOps += GetLegacySigOpCount(tx);
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
if (!tx.IsCoinBase())
{
if (!view.HaveInputs(tx))
return state.DoS(100, error("ConnectBlock(): inputs missing/spent"),
REJECT_INVALID, "bad-txns-inputs-missingorspent");
// Check that transaction is BIP68 final
// BIP68 lock checks (as opposed to nLockTime checks) must
// be in ConnectBlock because they require the UTXO set
prevheights.resize(tx.vin.size());
for (size_t j = 0; j < tx.vin.size(); j++) {
prevheights[j] = view.AccessCoins(tx.vin[j].prevout.hash)->nHeight;
}
if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
return state.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__),
REJECT_INVALID, "bad-txns-nonfinal");
}
if (fStrictPayToScriptHash)
{
// Add in sigops done by pay-to-script-hash inputs;
// this is to prevent a "rogue miner" from creating
// an incredibly-expensive-to-validate block.
nSigOps += GetP2SHSigOpCount(tx, view);
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
}
nFees += view.GetValueIn(tx)-tx.GetValueOut();
std::vector<CScriptCheck> vChecks;
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, nScriptCheckThreads ? &vChecks : NULL))
return error("ConnectBlock(): CheckInputs on %s failed with %s",
tx.GetHash().ToString(), FormatStateMessage(state));
control.Add(vChecks);
}
CTxUndo undoDummy;
if (i > 0) {
blockundo.vtxundo.push_back(CTxUndo());
}
UpdateCoins(tx, state, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
vPos.push_back(std::make_pair(tx.GetHash(), pos));
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
}
int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001);
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
if (block.vtx[0].GetValueOut() > blockReward)
return state.DoS(100,
error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
block.vtx[0].GetValueOut(), blockReward),
REJECT_INVALID, "bad-cb-amount");
if (!control.Wait())
return state.DoS(100, false);
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * 0.000001);
if (fJustCheck)
return true;
// Write undo information to disk
if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS))
{
if (pindex->GetUndoPos().IsNull()) {
CDiskBlockPos pos;
if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40))
return error("ConnectBlock(): FindUndoPos failed");
if (!UndoWriteToDisk(blockundo, pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
return AbortNode(state, "Failed to write undo data");
// update nUndoPos in block index
pindex->nUndoPos = pos.nPos;
pindex->nStatus |= BLOCK_HAVE_UNDO;
}
pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
setDirtyBlockIndex.insert(pindex);
}
if (fTxIndex)
if (!pblocktree->WriteTxIndex(vPos))
return AbortNode(state, "Failed to write transaction index");
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001);
// Watch for changes to the previous coinbase transaction.
static uint256 hashPrevBestCoinBase;
GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase);
hashPrevBestCoinBase = block.vtx[0].GetHash();
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
return true;
}
enum FlushStateMode {
FLUSH_STATE_NONE,
FLUSH_STATE_IF_NEEDED,
FLUSH_STATE_PERIODIC,
FLUSH_STATE_ALWAYS
};
/**
* Update the on-disk chain state.
* The caches and indexes are flushed depending on the mode we're called with
* if they're too large, if it's been a while since the last write,
* or always and in all cases if we're in prune mode and are deleting files.
*/
bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) {
const CChainParams& chainparams = Params();
LOCK2(cs_main, cs_LastBlockFile);
static int64_t nLastWrite = 0;
static int64_t nLastFlush = 0;
static int64_t nLastSetChain = 0;
std::set<int> setFilesToPrune;
bool fFlushForPrune = false;
try {
if (fPruneMode && fCheckForPruning && !fReindex) {
FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight());
fCheckForPruning = false;
if (!setFilesToPrune.empty()) {
fFlushForPrune = true;
if (!fHavePruned) {
pblocktree->WriteFlag("prunedblockfiles", true);
fHavePruned = true;
}
}
}
int64_t nNow = GetTimeMicros();
// Avoid writing/flushing immediately after startup.
if (nLastWrite == 0) {
nLastWrite = nNow;
}
if (nLastFlush == 0) {
nLastFlush = nNow;
}
if (nLastSetChain == 0) {
nLastSetChain = nNow;
}
size_t cacheSize = pcoinsTip->DynamicMemoryUsage();
// The cache is large and close to the limit, but we have time now (not in the middle of a block processing).
bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize * (10.0/9) > nCoinCacheUsage;
// The cache is over the limit, we have to write now.
bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nCoinCacheUsage;
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
// It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
// Combine all conditions that result in a full cache flush.
bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
// Write blocks and block index to disk.
if (fDoFullFlush || fPeriodicWrite) {
// Depend on nMinDiskSpace to ensure we can write block index
if (!CheckDiskSpace(0))
return state.Error("out of disk space");
// First make sure all block and undo data is flushed to disk.
FlushBlockFile();
// Then update all block file information (which may refer to block and undo files).
{
std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
vFiles.reserve(setDirtyFileInfo.size());
for (set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
vFiles.push_back(make_pair(*it, &vinfoBlockFile[*it]));
setDirtyFileInfo.erase(it++);
}
std::vector<const CBlockIndex*> vBlocks;
vBlocks.reserve(setDirtyBlockIndex.size());
for (set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
vBlocks.push_back(*it);
setDirtyBlockIndex.erase(it++);
}
if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
return AbortNode(state, "Files to write to block index database");
}
}
// Finally remove any pruned files
if (fFlushForPrune)
UnlinkPrunedFiles(setFilesToPrune);
nLastWrite = nNow;
}
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
if (fDoFullFlush) {
// Typical CCoins structures on disk are around 128 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is already
// an overestimation, as most will delete an existing entry or
// overwrite one. Still, use a conservative safety factor of 2.
if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip->GetCacheSize()))
return state.Error("out of disk space");
// Flush the chainstate (which may refer to block index entries).
if (!pcoinsTip->Flush())
return AbortNode(state, "Failed to write to coin database");
nLastFlush = nNow;
}
if (fDoFullFlush || ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) {
// Update best block in wallet (so we can detect restored wallets).
GetMainSignals().SetBestChain(chainActive.GetLocator());
nLastSetChain = nNow;
}
} catch (const std::runtime_error& e) {
return AbortNode(state, std::string("System error while flushing: ") + e.what());
}
return true;
}
void FlushStateToDisk() {
CValidationState state;
FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
}
void PruneAndFlush() {
CValidationState state;
fCheckForPruning = true;
FlushStateToDisk(state, FLUSH_STATE_NONE);
}
/** Update chainActive and related internal data structures. */
void static UpdateTip(CBlockIndex *pindexNew) {
const CChainParams& chainParams = Params();
chainActive.SetTip(pindexNew);
// New best block
nTimeBestReceived = GetTime();
mempool.AddTransactionsUpdated(1);
LogPrintf("%s: new best=%s height=%d bits=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__,
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), chainActive.Tip()->nBits,
log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)chainActive.Tip()->nChainTx,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize());
cvBlockChange.notify_all();
// Check the version of the last 100 blocks to see if we need to upgrade:
static bool fWarned = false;
if (!IsInitialBlockDownload())
{
int nUpgraded = 0;
const CBlockIndex* pindex = chainActive.Tip();
for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
WarningBitsConditionChecker checker(bit);
ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]);
if (state == THRESHOLD_ACTIVE || state == THRESHOLD_LOCKED_IN) {
if (state == THRESHOLD_ACTIVE) {
strMiscWarning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit);
if (!fWarned) {
- CAlert::Notify(strMiscWarning, true);
+ AlertNotify(strMiscWarning, true);
fWarned = true;
}
} else {
LogPrintf("%s: unknown new rules are about to activate (versionbit %i)\n", __func__, bit);
}
}
}
for (int i = 0; i < 100 && pindex != NULL; i++)
{
int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus());
if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0)
++nUpgraded;
pindex = pindex->pprev;
}
if (nUpgraded > 0)
LogPrintf("%s: %d of last 100 blocks have unexpected version\n", __func__, nUpgraded);
if (nUpgraded > 100/2)
{
// strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
strMiscWarning = _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
if (!fWarned) {
- CAlert::Notify(strMiscWarning, true);
+ AlertNotify(strMiscWarning, true);
fWarned = true;
}
}
}
}
/** Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and manually re-limit mempool size after this, with cs_main held. */
bool static DisconnectTip(CValidationState& state, const Consensus::Params& consensusParams)
{
CBlockIndex *pindexDelete = chainActive.Tip();
assert(pindexDelete);
// Read block from disk.
CBlock block;
if (!ReadBlockFromDisk(block, pindexDelete, consensusParams))
return AbortNode(state, "Failed to read block");
// Apply the block atomically to the chain state.
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(pcoinsTip);
if (!DisconnectBlock(block, state, pindexDelete, view))
return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
assert(view.Flush());
}
LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
return false;
// Resurrect mempool transactions from the disconnected block.
std::vector<uint256> vHashUpdate;
BOOST_FOREACH(const CTransaction &tx, block.vtx) {
// ignore validation errors in resurrected transactions
list<CTransaction> removed;
CValidationState stateDummy;
if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL, true)) {
mempool.removeRecursive(tx, removed);
} else if (mempool.exists(tx.GetHash())) {
vHashUpdate.push_back(tx.GetHash());
}
}
// AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
// no in-mempool children, which is generally not true when adding
// previously-confirmed transactions back to the mempool.
// UpdateTransactionsFromBlock finds descendants of any transactions in this
// block that were added back and cleans up the mempool state.
mempool.UpdateTransactionsFromBlock(vHashUpdate);
// Update chainActive and related variables.
UpdateTip(pindexDelete->pprev);
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
BOOST_FOREACH(const CTransaction &tx, block.vtx) {
SyncWithWallets(tx, pindexDelete->pprev, NULL);
}
return true;
}
static int64_t nTimeReadFromDisk = 0;
static int64_t nTimeConnectTotal = 0;
static int64_t nTimeFlush = 0;
static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
/**
* Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
* corresponding to pindexNew, to bypass loading it again from disk.
*/
bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const CBlock* pblock)
{
assert(pindexNew->pprev == chainActive.Tip());
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
CBlock block;
if (!pblock) {
if (!ReadBlockFromDisk(block, pindexNew, chainparams.GetConsensus()))
return AbortNode(state, "Failed to read block");
pblock = &block;
}
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
{
CCoinsViewCache view(pcoinsTip);
bool rv = ConnectBlock(*pblock, state, pindexNew, view);
GetMainSignals().BlockChecked(*pblock, state);
if (!rv) {
if (state.IsInvalid())
InvalidBlockFound(pindexNew, state);
return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString());
}
mapBlockSource.erase(pindexNew->GetBlockHash());
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
assert(view.Flush());
}
int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
return false;
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
// Remove conflicting transactions from the mempool.
list<CTransaction> txConflicted;
mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted, !IsInitialBlockDownload());
// Update chainActive & related variables.
UpdateTip(pindexNew);
// Tell wallet about transactions that went from mempool
// to conflicted:
BOOST_FOREACH(const CTransaction &tx, txConflicted) {
SyncWithWallets(tx, pindexNew, NULL);
}
// ... and about transactions that got confirmed:
BOOST_FOREACH(const CTransaction &tx, pblock->vtx) {
SyncWithWallets(tx, pindexNew, pblock);
}
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
return true;
}
/**
* Return the tip of the chain with the most work in it, that isn't
* known to be invalid (it's however far from certain to be valid).
*/
static CBlockIndex* FindMostWorkChain() {
do {
CBlockIndex *pindexNew = NULL;
// Find the best candidate header.
{
std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
if (it == setBlockIndexCandidates.rend())
return NULL;
pindexNew = *it;
}
// Check whether all blocks on the path between the currently active chain and the candidate are valid.
// Just going until the active chain is an optimization, as we know all blocks in it are valid already.
CBlockIndex *pindexTest = pindexNew;
bool fInvalidAncestor = false;
while (pindexTest && !chainActive.Contains(pindexTest)) {
assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
// Pruned nodes may have entries in setBlockIndexCandidates for
// which block files have been deleted. Remove those as candidates
// for the most work chain if we come across them; we can't switch
// to a chain unless we have all the non-active-chain parent blocks.
bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
if (fFailedChain || fMissingData) {
// Candidate chain is not usable (either invalid or missing data)
if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindexNew;
CBlockIndex *pindexFailed = pindexNew;
// Remove the entire chain from the set.
while (pindexTest != pindexFailed) {
if (fFailedChain) {
pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
} else if (fMissingData) {
// If we're missing data, then add back to mapBlocksUnlinked,
// so that if the block arrives in the future we can try adding
// to setBlockIndexCandidates again.
mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed));
}
setBlockIndexCandidates.erase(pindexFailed);
pindexFailed = pindexFailed->pprev;
}
setBlockIndexCandidates.erase(pindexTest);
fInvalidAncestor = true;
break;
}
pindexTest = pindexTest->pprev;
}
if (!fInvalidAncestor)
return pindexNew;
} while(true);
}
/** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
static void PruneBlockIndexCandidates() {
// Note that we can't delete the current block itself, as we may need to return to it later in case a
// reorganization to a better block fails.
std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) {
setBlockIndexCandidates.erase(it++);
}
// Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
assert(!setBlockIndexCandidates.empty());
}
/**
* Try to make some progress towards making pindexMostWork the active block.
* pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
*/
static bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const CBlock* pblock)
{
AssertLockHeld(cs_main);
bool fInvalidFound = false;
const CBlockIndex *pindexOldTip = chainActive.Tip();
const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork);
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
if (!DisconnectTip(state, chainparams.GetConsensus()))
return false;
fBlocksDisconnected = true;
}
// Build list of new blocks to connect.
std::vector<CBlockIndex*> vpindexToConnect;
bool fContinue = true;
int nHeight = pindexFork ? pindexFork->nHeight : -1;
while (fContinue && nHeight != pindexMostWork->nHeight) {
// Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
// a few blocks along the way.
int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
vpindexToConnect.clear();
vpindexToConnect.reserve(nTargetHeight - nHeight);
CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
while (pindexIter && pindexIter->nHeight != nHeight) {
vpindexToConnect.push_back(pindexIter);
pindexIter = pindexIter->pprev;
}
nHeight = nTargetHeight;
// Connect new blocks.
BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) {
if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
if (!state.CorruptionPossible())
InvalidChainFound(vpindexToConnect.back());
state = CValidationState();
fInvalidFound = true;
fContinue = false;
break;
} else {
// A system error occurred (disk space, database error, ...).
return false;
}
} else {
PruneBlockIndexCandidates();
if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
// We're in a better position than we were. Return temporarily to release the lock.
fContinue = false;
break;
}
}
}
}
if (fBlocksDisconnected) {
mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
LimitMempoolSize(mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
}
mempool.check(pcoinsTip);
// Callbacks/notifications for a new best chain.
if (fInvalidFound)
CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
else
CheckForkWarningConditions();
return true;
}
/**
* Make the best chain active, in multiple steps. The result is either failure
* or an activated best chain. pblock is either NULL or a pointer to a block
* that is already loaded (to avoid loading it again from disk).
*/
bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, const CBlock *pblock) {
CBlockIndex *pindexMostWork = NULL;
do {
boost::this_thread::interruption_point();
CBlockIndex *pindexNewTip = NULL;
const CBlockIndex *pindexFork;
bool fInitialDownload;
{
LOCK(cs_main);
CBlockIndex *pindexOldTip = chainActive.Tip();
pindexMostWork = FindMostWorkChain();
// Whether we have anything to do at all.
if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip())
return true;
if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL))
return false;
pindexNewTip = chainActive.Tip();
pindexFork = chainActive.FindFork(pindexOldTip);
fInitialDownload = IsInitialBlockDownload();
}
// When we reach this point, we switched to a new tip (stored in pindexNewTip).
// Notifications/callbacks that can run without cs_main
// Always notify the UI if a new block tip was connected
if (pindexFork != pindexNewTip) {
uiInterface.NotifyBlockTip(fInitialDownload, pindexNewTip);
if (!fInitialDownload) {
// Find the hashes of all blocks that weren't previously in the best chain.
std::vector<uint256> vHashes;
CBlockIndex *pindexToAnnounce = pindexNewTip;
while (pindexToAnnounce != pindexFork) {
vHashes.push_back(pindexToAnnounce->GetBlockHash());
pindexToAnnounce = pindexToAnnounce->pprev;
if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
// Limit announcements in case of a huge reorganization.
// Rely on the peer's synchronization mechanism in that case.
break;
}
}
// Relay inventory, but don't relay old inventory during initial block download.
int nBlockEstimate = 0;
if (fCheckpointsEnabled)
nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints());
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes) {
if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) {
BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) {
pnode->PushBlockHash(hash);
}
}
}
}
// Notify external listeners about the new tip.
if (!vHashes.empty()) {
GetMainSignals().UpdatedBlockTip(pindexNewTip);
}
}
}
} while(pindexMostWork != chainActive.Tip());
CheckBlockIndex(chainparams.GetConsensus());
// Write changes periodically to disk, after relay.
if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) {
return false;
}
return true;
}
bool InvalidateBlock(CValidationState& state, const Consensus::Params& consensusParams, CBlockIndex *pindex)
{
AssertLockHeld(cs_main);
// Mark the block itself as invalid.
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
while (chainActive.Contains(pindex)) {
CBlockIndex *pindexWalk = chainActive.Tip();
pindexWalk->nStatus |= BLOCK_FAILED_CHILD;
setDirtyBlockIndex.insert(pindexWalk);
setBlockIndexCandidates.erase(pindexWalk);
// ActivateBestChain considers blocks already in chainActive
// unconditionally valid already, so force disconnect away from it.
if (!DisconnectTip(state, consensusParams)) {
mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
return false;
}
}
LimitMempoolSize(mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
// The resulting new best tip may not be in setBlockIndexCandidates anymore, so
// add it again.
BlockMap::iterator it = mapBlockIndex.begin();
while (it != mapBlockIndex.end()) {
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) {
setBlockIndexCandidates.insert(it->second);
}
it++;
}
InvalidChainFound(pindex);
mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
return true;
}
bool ReconsiderBlock(CValidationState& state, CBlockIndex *pindex) {
AssertLockHeld(cs_main);
int nHeight = pindex->nHeight;
// Remove the invalidity flag from this block and all its descendants.
BlockMap::iterator it = mapBlockIndex.begin();
while (it != mapBlockIndex.end()) {
if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
it->second->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(it->second);
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) {
setBlockIndexCandidates.insert(it->second);
}
if (it->second == pindexBestInvalid) {
// Reset invalid block marker if it was pointing to one of those.
pindexBestInvalid = NULL;
}
}
it++;
}
// Remove the invalidity flag from all ancestors too.
while (pindex != NULL) {
if (pindex->nStatus & BLOCK_FAILED_MASK) {
pindex->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(pindex);
}
pindex = pindex->pprev;
}
return true;
}
CBlockIndex* AddToBlockIndex(const CBlockHeader& block)
{
// Check for duplicate
uint256 hash = block.GetHash();
BlockMap::iterator it = mapBlockIndex.find(hash);
if (it != mapBlockIndex.end())
return it->second;
// Construct new block index object
CBlockIndex* pindexNew = new CBlockIndex(block);
assert(pindexNew);
// We assign the sequence id to blocks only when the full data is available,
// to avoid miners withholding blocks but broadcasting headers, to get a
// competitive advantage.
pindexNew->nSequenceId = 0;
BlockMap::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
if (miPrev != mapBlockIndex.end())
{
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
pindexNew->BuildSkip();
}
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork)
pindexBestHeader = pindexNew;
setDirtyBlockIndex.insert(pindexNew);
return pindexNew;
}
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos)
{
pindexNew->nTx = block.vtx.size();
pindexNew->nChainTx = 0;
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus |= BLOCK_HAVE_DATA;
pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
setDirtyBlockIndex.insert(pindexNew);
if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) {
// If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
deque<CBlockIndex*> queue;
queue.push_back(pindexNew);
// Recursively process any descendant blocks that now may be eligible to be connected.
while (!queue.empty()) {
CBlockIndex *pindex = queue.front();
queue.pop_front();
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
{
LOCK(cs_nBlockSequenceId);
pindex->nSequenceId = nBlockSequenceId++;
}
if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
while (range.first != range.second) {
std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
queue.push_back(it->second);
range.first++;
mapBlocksUnlinked.erase(it);
}
}
} else {
if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
}
}
return true;
}
bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
{
LOCK(cs_LastBlockFile);
unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
if (vinfoBlockFile.size() <= nFile) {
vinfoBlockFile.resize(nFile + 1);
}
if (!fKnown) {
while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
nFile++;
if (vinfoBlockFile.size() <= nFile) {
vinfoBlockFile.resize(nFile + 1);
}
}
pos.nFile = nFile;
pos.nPos = vinfoBlockFile[nFile].nSize;
}
if ((int)nFile != nLastBlockFile) {
if (!fKnown) {
LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
}
FlushBlockFile(!fKnown);
nLastBlockFile = nFile;
}
vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
if (fKnown)
vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
else
vinfoBlockFile[nFile].nSize += nAddSize;
if (!fKnown) {
unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (fPruneMode)
fCheckForPruning = true;
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenBlockFile(pos);
if (file) {
LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return state.Error("out of disk space");
}
}
setDirtyFileInfo.insert(nFile);
return true;
}
bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize)
{
pos.nFile = nFile;
LOCK(cs_LastBlockFile);
unsigned int nNewSize;
pos.nPos = vinfoBlockFile[nFile].nUndoSize;
nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
setDirtyFileInfo.insert(nFile);
unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (fPruneMode)
fCheckForPruning = true;
if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenUndoFile(pos);
if (file) {
LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return state.Error("out of disk space");
}
return true;
}
bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool fCheckPOW)
{
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, Params().GetConsensus()))
return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed");
// Check timestamp
if (block.GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
return true;
}
bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bool fCheckMerkleRoot)
{
// These are checks that are independent of context.
if (block.fChecked)
return true;
// Check that the header is valid (particularly PoW). This is mostly
// redundant with the call in AcceptBlockHeader.
if (!CheckBlockHeader(block, state, fCheckPOW))
return false;
// Check the merkle root.
if (fCheckMerkleRoot) {
bool mutated;
uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
if (block.hashMerkleRoot != hashMerkleRoot2)
return state.DoS(100, false, REJECT_INVALID, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
// Check for merkle tree malleability (CVE-2012-2459): repeating sequences
// of transactions in a block without affecting the merkle root of a block,
// while still invalidating it.
if (mutated)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-duplicate", true, "duplicate transaction");
}
// All potential-corruption validation must be done before we do any
// transaction validation, as otherwise we may mark the header as invalid
// because we receive the wrong transactions for it.
// Size limits
if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed");
// First transaction must be coinbase, the rest must not be
if (block.vtx.empty() || !block.vtx[0].IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase");
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (block.vtx[i].IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "bad-cb-multiple", false, "more than one coinbase");
// Check transactions
BOOST_FOREACH(const CTransaction& tx, block.vtx)
if (!CheckTransaction(tx, state))
return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(),
strprintf("Transaction check failed (tx hash %s) %s", tx.GetHash().ToString(), state.GetDebugMessage()));
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTransaction& tx, block.vtx)
{
nSigOps += GetLegacySigOpCount(tx);
}
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
if (fCheckPOW && fCheckMerkleRoot)
block.fChecked = true;
return true;
}
static bool CheckIndexAgainstCheckpoint(const CBlockIndex* pindexPrev, CValidationState& state, const CChainParams& chainparams, const uint256& hash)
{
if (*pindexPrev->phashBlock == chainparams.GetConsensus().hashGenesisBlock)
return true;
int nHeight = pindexPrev->nHeight+1;
// Don't accept any forks from the main chain prior to last checkpoint
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
if (pcheckpoint && nHeight < pcheckpoint->nHeight)
return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight));
return true;
}
bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex * const pindexPrev)
{
const Consensus::Params& consensusParams = Params().GetConsensus();
// Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work");
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
// Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
for (int32_t version = 2; version < 5; ++version) // check for version 2, 3 and 4 upgrades
if (block.nVersion < version && IsSuperMajority(version, pindexPrev, consensusParams.nMajorityRejectBlockOutdated, consensusParams))
return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(v%d)", version - 1),
strprintf("rejected nVersion=%d block", version - 1));
return true;
}
bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIndex * const pindexPrev)
{
const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1;
const Consensus::Params& consensusParams = Params().GetConsensus();
// Check that all transactions are finalized
BOOST_FOREACH(const CTransaction& tx, block.vtx) {
int nLockTimeFlags = 0;
int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
? pindexPrev->GetMedianTimePast()
: block.GetBlockTime();
if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) {
return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false, "non-final transaction");
}
}
// Enforce block.nVersion=2 rule that the coinbase starts with serialized block height
// if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet):
if (block.nVersion >= 2 && IsSuperMajority(2, pindexPrev, consensusParams.nMajorityEnforceBlockUpgrade, consensusParams))
{
CScript expect = CScript() << nHeight;
if (block.vtx[0].vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(), block.vtx[0].vin[0].scriptSig.begin())) {
return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false, "block height mismatch in coinbase");
}
}
return true;
}
static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex=NULL)
{
AssertLockHeld(cs_main);
// Check for duplicate
uint256 hash = block.GetHash();
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
CBlockIndex *pindex = NULL;
if (hash != chainparams.GetConsensus().hashGenesisBlock) {
if (miSelf != mapBlockIndex.end()) {
// Block header is already known.
pindex = miSelf->second;
if (ppindex)
*ppindex = pindex;
if (pindex->nStatus & BLOCK_FAILED_MASK)
return state.Invalid(error("%s: block is marked invalid", __func__), 0, "duplicate");
return true;
}
if (!CheckBlockHeader(block, state))
return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
// Get prev block index
CBlockIndex* pindexPrev = NULL;
BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
if (mi == mapBlockIndex.end())
return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk");
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK)
return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
assert(pindexPrev);
if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, hash))
return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str());
if (!ContextualCheckBlockHeader(block, state, pindexPrev))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
}
if (pindex == NULL)
pindex = AddToBlockIndex(block);
if (ppindex)
*ppindex = pindex;
return true;
}
/** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, CDiskBlockPos* dbp)
{
AssertLockHeld(cs_main);
CBlockIndex *&pindex = *ppindex;
if (!AcceptBlockHeader(block, state, chainparams, &pindex))
return false;
// Try to process all requested blocks that we don't have, but only
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
// regardless of whether pruning is enabled; it should generally be safe to
// not process unrequested blocks.
bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP));
// TODO: deal better with return value and error conditions for duplicate
// and unrequested blocks.
if (fAlreadyHave) return true;
if (!fRequested) { // If we didn't ask for it:
if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
if (!fHasMoreWork) return true; // Don't process less-work chains
if (fTooFarAhead) return true; // Block height is too high
}
if ((!CheckBlock(block, state)) || !ContextualCheckBlock(block, state, pindex->pprev)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
}
return error("%s: %s", __func__, FormatStateMessage(state));
}
int nHeight = pindex->nHeight;
// Write block to history file
try {
unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
if (dbp != NULL)
blockPos = *dbp;
if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL))
return error("AcceptBlock(): FindBlockPos failed");
if (dbp == NULL)
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
AbortNode(state, "Failed to write block");
if (!ReceivedBlockTransactions(block, state, pindex, blockPos))
return error("AcceptBlock(): ReceivedBlockTransactions failed");
} catch (const std::runtime_error& e) {
return AbortNode(state, std::string("System error: ") + e.what());
}
if (fCheckForPruning)
FlushStateToDisk(state, FLUSH_STATE_NONE); // we just allocated more disk space for block files
return true;
}
static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams)
{
unsigned int nFound = 0;
for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++)
{
if (pstart->nVersion >= minVersion)
++nFound;
pstart = pstart->pprev;
}
return (nFound >= nRequired);
}
bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, const CNode* pfrom, const CBlock* pblock, bool fForceProcessing, CDiskBlockPos* dbp)
{
{
LOCK(cs_main);
bool fRequested = MarkBlockAsReceived(pblock->GetHash());
fRequested |= fForceProcessing;
// Store to disk
CBlockIndex *pindex = NULL;
bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp);
if (pindex && pfrom) {
mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
}
CheckBlockIndex(chainparams.GetConsensus());
if (!ret)
return error("%s: AcceptBlock FAILED", __func__);
}
if (!ActivateBestChain(state, chainparams, pblock))
return error("%s: ActivateBestChain failed", __func__);
return true;
}
bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
{
AssertLockHeld(cs_main);
assert(pindexPrev && pindexPrev == chainActive.Tip());
if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, block.GetHash()))
return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str());
CCoinsViewCache viewNew(pcoinsTip);
CBlockIndex indexDummy(block);
indexDummy.pprev = pindexPrev;
indexDummy.nHeight = pindexPrev->nHeight + 1;
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, pindexPrev))
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state));
if (!CheckBlock(block, state, fCheckPOW, fCheckMerkleRoot))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
if (!ContextualCheckBlock(block, state, pindexPrev))
return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state));
if (!ConnectBlock(block, state, &indexDummy, viewNew, true))
return false;
assert(state.IsValid());
return true;
}
/**
* BLOCK PRUNING CODE
*/
/* Calculate the amount of disk space the block & undo files currently use */
uint64_t CalculateCurrentUsage()
{
uint64_t retval = 0;
BOOST_FOREACH(const CBlockFileInfo &file, vinfoBlockFile) {
retval += file.nSize + file.nUndoSize;
}
return retval;
}
/* Prune a block file (modify associated database entries)*/
void PruneOneBlockFile(const int fileNumber)
{
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) {
CBlockIndex* pindex = it->second;
if (pindex->nFile == fileNumber) {
pindex->nStatus &= ~BLOCK_HAVE_DATA;
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
pindex->nFile = 0;
pindex->nDataPos = 0;
pindex->nUndoPos = 0;
setDirtyBlockIndex.insert(pindex);
// Prune from mapBlocksUnlinked -- any block we prune would have
// to be downloaded again in order to consider its chain, at which
// point it would be considered as a candidate for
// mapBlocksUnlinked or setBlockIndexCandidates.
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev);
while (range.first != range.second) {
std::multimap<CBlockIndex *, CBlockIndex *>::iterator it = range.first;
range.first++;
if (it->second == pindex) {
mapBlocksUnlinked.erase(it);
}
}
}
}
vinfoBlockFile[fileNumber].SetNull();
setDirtyFileInfo.insert(fileNumber);
}
void UnlinkPrunedFiles(std::set<int>& setFilesToPrune)
{
for (set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
CDiskBlockPos pos(*it, 0);
boost::filesystem::remove(GetBlockPosFilename(pos, "blk"));
boost::filesystem::remove(GetBlockPosFilename(pos, "rev"));
LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
}
}
/* Calculate the block/rev files that should be deleted to remain under target*/
void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
{
LOCK2(cs_main, cs_LastBlockFile);
if (chainActive.Tip() == NULL || nPruneTarget == 0) {
return;
}
if ((uint64_t)chainActive.Tip()->nHeight <= nPruneAfterHeight) {
return;
}
unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
uint64_t nCurrentUsage = CalculateCurrentUsage();
// We don't check to prune until after we've allocated new space for files
// So we should leave a buffer under our target to account for another allocation
// before the next pruning.
uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
uint64_t nBytesToPrune;
int count=0;
if (nCurrentUsage + nBuffer >= nPruneTarget) {
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
if (vinfoBlockFile[fileNumber].nSize == 0)
continue;
if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target?
break;
// don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
continue;
PruneOneBlockFile(fileNumber);
// Queue up the files for removal
setFilesToPrune.insert(fileNumber);
nCurrentUsage -= nBytesToPrune;
count++;
}
}
LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
nLastBlockWeCanPrune, count);
}
bool CheckDiskSpace(uint64_t nAdditionalBytes)
{
uint64_t nFreeBytesAvailable = boost::filesystem::space(GetDataDir()).available;
// Check for nMinDiskSpace bytes (currently 50MB)
if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
return true;
}
FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly)
{
if (pos.IsNull())
return NULL;
boost::filesystem::path path = GetBlockPosFilename(pos, prefix);
boost::filesystem::create_directories(path.parent_path());
FILE* file = fopen(path.string().c_str(), "rb+");
if (!file && !fReadOnly)
file = fopen(path.string().c_str(), "wb+");
if (!file) {
LogPrintf("Unable to open file %s\n", path.string());
return NULL;
}
if (pos.nPos) {
if (fseek(file, pos.nPos, SEEK_SET)) {
LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string());
fclose(file);
return NULL;
}
}
return file;
}
FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "blk", fReadOnly);
}
FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "rev", fReadOnly);
}
boost::filesystem::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix)
{
return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile);
}
CBlockIndex * InsertBlockIndex(uint256 hash)
{
if (hash.IsNull())
return NULL;
// Return existing
BlockMap::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end())
return (*mi).second;
// Create new
CBlockIndex* pindexNew = new CBlockIndex();
if (!pindexNew)
throw runtime_error("LoadBlockIndex(): new CBlockIndex failed");
mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
return pindexNew;
}
bool static LoadBlockIndexDB()
{
const CChainParams& chainparams = Params();
if (!pblocktree->LoadBlockIndexGuts())
return false;
boost::this_thread::interruption_point();
// Calculate nChainWork
vector<pair<int, CBlockIndex*> > vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
vSortedByHeight.push_back(make_pair(pindex->nHeight, pindex));
}
sort(vSortedByHeight.begin(), vSortedByHeight.end());
BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight)
{
CBlockIndex* pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
// We can link the chain of blocks for which we've received transactions at some point.
// Pruned nodes may have deleted the block.
if (pindex->nTx > 0) {
if (pindex->pprev) {
if (pindex->pprev->nChainTx) {
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
} else {
pindex->nChainTx = 0;
mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex));
}
} else {
pindex->nChainTx = pindex->nTx;
}
}
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL))
setBlockIndexCandidates.insert(pindex);
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindex;
if (pindex->pprev)
pindex->BuildSkip();
if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
pindexBestHeader = pindex;
}
// Load block file info
pblocktree->ReadLastBlockFile(nLastBlockFile);
vinfoBlockFile.resize(nLastBlockFile + 1);
LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
}
LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
for (int nFile = nLastBlockFile + 1; true; nFile++) {
CBlockFileInfo info;
if (pblocktree->ReadBlockFileInfo(nFile, info)) {
vinfoBlockFile.push_back(info);
} else {
break;
}
}
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
set<int> setBlkDataFiles;
BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
if (pindex->nStatus & BLOCK_HAVE_DATA) {
setBlkDataFiles.insert(pindex->nFile);
}
}
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
{
CDiskBlockPos pos(*it, 0);
if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
return false;
}
}
// Check whether we have ever pruned block & undo files
pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
if (fHavePruned)
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
// Check whether we need to continue reindexing
bool fReindexing = false;
pblocktree->ReadReindexing(fReindexing);
fReindex |= fReindexing;
// Check whether we have a transaction index
pblocktree->ReadFlag("txindex", fTxIndex);
LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled");
// Load pointer to end of best chain
BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
if (it == mapBlockIndex.end())
return true;
chainActive.SetTip(it->second);
PruneBlockIndexCandidates();
LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__,
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
Checkpoints::GuessVerificationProgress(chainparams.Checkpoints(), chainActive.Tip()));
return true;
}
CVerifyDB::CVerifyDB()
{
uiInterface.ShowProgress(_("Verifying blocks..."), 0);
}
CVerifyDB::~CVerifyDB()
{
uiInterface.ShowProgress("", 100);
}
bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
{
LOCK(cs_main);
if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL)
return true;
// Verify blocks in the best chain
if (nCheckDepth <= 0)
nCheckDepth = 1000000000; // suffices until the year 19000
if (nCheckDepth > chainActive.Height())
nCheckDepth = chainActive.Height();
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CCoinsViewCache coins(coinsview);
CBlockIndex* pindexState = chainActive.Tip();
CBlockIndex* pindexFailure = NULL;
int nGoodTransactions = 0;
CValidationState state;
for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev)
{
boost::this_thread::interruption_point();
uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))));
if (pindex->nHeight < chainActive.Height()-nCheckDepth)
break;
CBlock block;
// check level 0: read from disk
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state))
return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (!pos.IsNull()) {
if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash()))
return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
}
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) {
bool fClean = true;
if (!DisconnectBlock(block, state, pindex, coins, &fClean))
return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
pindexState = pindex->pprev;
if (!fClean) {
nGoodTransactions = 0;
pindexFailure = pindex;
} else
nGoodTransactions += block.vtx.size();
}
if (ShutdownRequested())
return true;
}
if (pindexFailure)
return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
CBlockIndex *pindex = pindexState;
while (pindex != chainActive.Tip()) {
boost::this_thread::interruption_point();
uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))));
pindex = chainActive.Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
if (!ConnectBlock(block, state, pindex, coins))
return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
}
}
LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions);
return true;
}
void UnloadBlockIndex()
{
LOCK(cs_main);
setBlockIndexCandidates.clear();
chainActive.SetTip(NULL);
pindexBestInvalid = NULL;
pindexBestHeader = NULL;
mempool.clear();
mapOrphanTransactions.clear();
mapOrphanTransactionsByPrev.clear();
nSyncStarted = 0;
mapBlocksUnlinked.clear();
vinfoBlockFile.clear();
nLastBlockFile = 0;
nBlockSequenceId = 1;
mapBlockSource.clear();
mapBlocksInFlight.clear();
nQueuedValidatedHeaders = 0;
nPreferredDownload = 0;
setDirtyBlockIndex.clear();
setDirtyFileInfo.clear();
mapNodeState.clear();
recentRejects.reset(NULL);
versionbitscache.Clear();
for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
warningcache[b].clear();
}
BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) {
delete entry.second;
}
mapBlockIndex.clear();
fHavePruned = false;
}
bool LoadBlockIndex()
{
// Load block index from databases
if (!fReindex && !LoadBlockIndexDB())
return false;
return true;
}
bool InitBlockIndex(const CChainParams& chainparams)
{
LOCK(cs_main);
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
// Check whether we're already initialized
if (chainActive.Genesis() != NULL)
return true;
// Use the provided setting for -txindex in the new database
fTxIndex = GetBoolArg("-txindex", DEFAULT_TXINDEX);
pblocktree->WriteFlag("txindex", fTxIndex);
LogPrintf("Initializing databases...\n");
// Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
if (!fReindex) {
try {
CBlock &block = const_cast<CBlock&>(chainparams.GenesisBlock());
// Start new block file
unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
CValidationState state;
if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime()))
return error("LoadBlockIndex(): FindBlockPos failed");
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
return error("LoadBlockIndex(): writing genesis block to disk failed");
CBlockIndex *pindex = AddToBlockIndex(block);
if (!ReceivedBlockTransactions(block, state, pindex, blockPos))
return error("LoadBlockIndex(): genesis block not accepted");
if (!ActivateBestChain(state, chainparams, &block))
return error("LoadBlockIndex(): genesis block cannot be activated");
// Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
return FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
} catch (const std::runtime_error& e) {
return error("LoadBlockIndex(): failed to initialize block database: %s", e.what());
}
}
return true;
}
bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskBlockPos *dbp)
{
// Map of disk positions for blocks with unknown parent (only used for reindex)
static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent;
int64_t nStart = GetTimeMillis();
int nLoaded = 0;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION);
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
boost::this_thread::interruption_point();
blkdat.SetPos(nRewind);
nRewind++; // start one byte further next time, in case of failure
blkdat.SetLimit(); // remove former limit
unsigned int nSize = 0;
try {
// locate a header
unsigned char buf[MESSAGE_START_SIZE];
blkdat.FindByte(chainparams.MessageStart()[0]);
nRewind = blkdat.GetPos()+1;
blkdat >> FLATDATA(buf);
if (memcmp(buf, chainparams.MessageStart(), MESSAGE_START_SIZE))
continue;
// read size
blkdat >> nSize;
if (nSize < 80 || nSize > MAX_BLOCK_SIZE)
continue;
} catch (const std::exception&) {
// no valid block header found; don't complain
break;
}
try {
// read block
uint64_t nBlockPos = blkdat.GetPos();
if (dbp)
dbp->nPos = nBlockPos;
blkdat.SetLimit(nBlockPos + nSize);
blkdat.SetPos(nBlockPos);
CBlock block;
blkdat >> block;
nRewind = blkdat.GetPos();
// detect out of order blocks, and store them for later
uint256 hash = block.GetHash();
if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) {
LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
block.hashPrevBlock.ToString());
if (dbp)
mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
continue;
}
// process in case the block isn't known yet
if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
CValidationState state;
if (ProcessNewBlock(state, chainparams, NULL, &block, true, dbp))
nLoaded++;
if (state.IsError())
break;
} else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) {
LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight);
}
// Recursively process earlier encountered successors of this block
deque<uint256> queue;
queue.push_back(hash);
while (!queue.empty()) {
uint256 head = queue.front();
queue.pop_front();
std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) {
std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
if (ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()))
{
LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(),
head.ToString());
CValidationState dummy;
if (ProcessNewBlock(dummy, chainparams, NULL, &block, true, &it->second))
{
nLoaded++;
queue.push_back(block.GetHash());
}
}
range.first++;
mapBlocksUnknownParent.erase(it);
}
}
} catch (const std::exception& e) {
LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
}
}
} catch (const std::runtime_error& e) {
AbortNode(std::string("System error: ") + e.what());
}
if (nLoaded > 0)
LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
return nLoaded > 0;
}
void static CheckBlockIndex(const Consensus::Params& consensusParams)
{
if (!fCheckBlockIndex) {
return;
}
LOCK(cs_main);
// During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
// so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
// iterating the block tree require that chainActive has been initialized.)
if (chainActive.Height() < 0) {
assert(mapBlockIndex.size() <= 1);
return;
}
// Build forward-pointing map of the entire block tree.
std::multimap<CBlockIndex*,CBlockIndex*> forward;
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
forward.insert(std::make_pair(it->second->pprev, it->second));
}
assert(forward.size() == mapBlockIndex.size());
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(NULL);
CBlockIndex *pindex = rangeGenesis.first->second;
rangeGenesis.first++;
assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent NULL.
// Iterate over the entire block tree, using depth-first search.
// Along the way, remember whether there are blocks on the path from genesis
// block being explored which are the first to have certain properties.
size_t nNodes = 0;
int nHeight = 0;
CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid.
CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0.
CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
CBlockIndex* pindexFirstNotScriptsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
while (pindex != NULL) {
nNodes++;
if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
if (pindex->pprev != NULL && pindexFirstNotScriptsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
// Begin: actual consistency checks.
if (pindex->pprev == NULL) {
// Genesis block checks.
assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block.
}
if (pindex->nChainTx == 0) assert(pindex->nSequenceId == 0); // nSequenceId can't be set for blocks that aren't linked
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
// HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
if (!fHavePruned) {
// If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
assert(pindexFirstMissing == pindexFirstNeverProcessed);
} else {
// If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
}
if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
// All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0));
assert(pindex->nHeight == nHeight); // nHeight must be consistent.
assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == NULL); // SCRIPTS valid implies all parents are SCRIPTS valid
if (pindexFirstInvalid == NULL) {
// Checks for not-invalid blocks.
assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
}
if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) {
if (pindexFirstInvalid == NULL) {
// If this block sorts at least as good as the current tip and
// is valid and we have all data for its parents, it must be in
// setBlockIndexCandidates. chainActive.Tip() must also be there
// even if some data has been pruned.
if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) {
assert(setBlockIndexCandidates.count(pindex));
}
// If some parent is missing, then it could be that this block was in
// setBlockIndexCandidates but had to be removed because of the missing data.
// In this case it must be in mapBlocksUnlinked -- see test below.
}
} else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
assert(setBlockIndexCandidates.count(pindex) == 0);
}
// Check whether this block is in mapBlocksUnlinked.
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev);
bool foundInUnlinked = false;
while (rangeUnlinked.first != rangeUnlinked.second) {
assert(rangeUnlinked.first->first == pindex->pprev);
if (rangeUnlinked.first->second == pindex) {
foundInUnlinked = true;
break;
}
rangeUnlinked.first++;
}
if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) {
// If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
assert(foundInUnlinked);
}
if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) {
// We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
assert(fHavePruned); // We must have pruned.
// This block may have entered mapBlocksUnlinked if:
// - it has a descendant that at some point had more work than the
// tip, and
// - we tried switching to that descendant but were missing
// data for some intermediate block between chainActive and the
// tip.
// So if this block is itself better than chainActive.Tip() and it wasn't in
// setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == NULL) {
assert(foundInUnlinked);
}
}
}
// assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
// End: actual consistency checks.
// Try descending into the first subnode.
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
if (range.first != range.second) {
// A subnode was found.
pindex = range.first->second;
nHeight++;
continue;
}
// This is a leaf node.
// Move upwards until we reach a node of which we have not yet visited the last child.
while (pindex) {
// We are going to either move to a parent or a sibling of pindex.
// If pindex was the first with a certain property, unset the corresponding variable.
if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL;
if (pindex == pindexFirstMissing) pindexFirstMissing = NULL;
if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL;
if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL;
if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL;
if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL;
if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = NULL;
// Find our parent.
CBlockIndex* pindexPar = pindex->pprev;
// Find which child we just visited.
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
while (rangePar.first->second != pindex) {
assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
rangePar.first++;
}
// Proceed to the next one.
rangePar.first++;
if (rangePar.first != rangePar.second) {
// Move to the sibling.
pindex = rangePar.first->second;
break;
} else {
// Move up further.
pindex = pindexPar;
nHeight--;
continue;
}
}
}
// Check that we actually traversed the entire map.
assert(nNodes == forward.size());
}
-//////////////////////////////////////////////////////////////////////////////
-//
-// CAlert
-//
-
std::string GetWarnings(const std::string& strFor)
{
- int nPriority = 0;
string strStatusBar;
string strRPC;
string strGUI;
if (!CLIENT_VERSION_IS_RELEASE) {
strStatusBar = "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
strGUI = _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
}
if (GetBoolArg("-testsafemode", DEFAULT_TESTSAFEMODE))
strStatusBar = strRPC = strGUI = "testsafemode enabled";
// Misc warnings like out of disk space and clock is wrong
if (strMiscWarning != "")
{
- nPriority = 1000;
strStatusBar = strGUI = strMiscWarning;
}
if (fLargeWorkForkFound)
{
- nPriority = 2000;
strStatusBar = strRPC = "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
strGUI = _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
}
else if (fLargeWorkInvalidChainFound)
{
- nPriority = 2000;
strStatusBar = strRPC = "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
strGUI = _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
}
- // Alerts
- {
- LOCK(cs_mapAlerts);
- BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
- {
- const CAlert& alert = item.second;
- if (alert.AppliesToMe() && alert.nPriority > nPriority)
- {
- nPriority = alert.nPriority;
- strStatusBar = strGUI = alert.strStatusBar;
- }
- }
- }
-
if (strFor == "gui")
return strGUI;
else if (strFor == "statusbar")
return strStatusBar;
else if (strFor == "rpc")
return strRPC;
assert(!"GetWarnings(): invalid parameter");
return "error";
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
switch (inv.type)
{
case MSG_TX:
{
assert(recentRejects);
if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
{
// If the chain tip has changed previously rejected transactions
// might be now valid, e.g. due to a nLockTime'd tx becoming valid,
// or a double-spend. Reset the rejects filter and give those
// txs a second chance.
hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
recentRejects->reset();
}
return recentRejects->contains(inv.hash) ||
mempool.exists(inv.hash) ||
mapOrphanTransactions.count(inv.hash) ||
pcoinsTip->HaveCoins(inv.hash);
}
case MSG_BLOCK:
return mapBlockIndex.count(inv.hash);
}
// Don't know what it is, just say we already got one
return true;
}
void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
vector<CInv> vNotFound;
LOCK(cs_main);
while (it != pfrom->vRecvGetData.end()) {
// Don't bother if send buffer is too full to respond anyway
if (pfrom->nSendSize >= SendBufferSize())
break;
const CInv &inv = *it;
{
boost::this_thread::interruption_point();
it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
{
bool send = false;
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
if (mi != mapBlockIndex.end())
{
if (chainActive.Contains(mi->second)) {
send = true;
} else {
static const int nOneMonth = 30 * 24 * 60 * 60;
// To prevent fingerprinting attacks, only send blocks outside of the active
// chain if they are valid, and no more than a month older (both in time, and in
// best equivalent proof of work) than the best header chain we know about.
send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
(pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
(GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth);
if (!send) {
LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
}
}
// disconnect node in case we have reached the outbound limit for serving historical blocks
// never disconnect whitelisted nodes
static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
if (send && CNode::OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
{
LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
//disconnect node
pfrom->fDisconnect = true;
send = false;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
{
// Send block from disk
CBlock block;
if (!ReadBlockFromDisk(block, (*mi).second, consensusParams))
assert(!"cannot load block from disk");
if (inv.type == MSG_BLOCK)
pfrom->PushMessage(NetMsgType::BLOCK, block);
else // MSG_FILTERED_BLOCK)
{
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
{
CMerkleBlock merkleBlock(block, *pfrom->pfilter);
pfrom->PushMessage(NetMsgType::MERKLEBLOCK, merkleBlock);
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
pfrom->PushMessage(NetMsgType::TX, block.vtx[pair.first]);
}
// else
// no response
}
// Trigger the peer node to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
pfrom->PushMessage(NetMsgType::INV, vInv);
pfrom->hashContinue.SetNull();
}
}
}
else if (inv.IsKnownType())
{
// Send stream from relay memory
bool pushed = false;
{
LOCK(cs_mapRelay);
map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
if (mi != mapRelay.end()) {
pfrom->PushMessage(inv.GetCommand(), (*mi).second);
pushed = true;
}
}
if (!pushed && inv.type == MSG_TX) {
CTransaction tx;
if (mempool.lookup(inv.hash, tx)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << tx;
pfrom->PushMessage(NetMsgType::TX, ss);
pushed = true;
}
}
if (!pushed) {
vNotFound.push_back(inv);
}
}
// Track requests for our stuff.
GetMainSignals().Inventory(inv.hash);
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
break;
}
}
pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
if (!vNotFound.empty()) {
// Let the peer know that we didn't find what it asked for, so it doesn't
// have to wait around forever. Currently only SPV clients actually care
// about this message: it's needed when they are recursively walking the
// dependencies of relevant unconfirmed transactions. SPV clients want to
// do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool.
pfrom->PushMessage(NetMsgType::NOTFOUND, vNotFound);
}
}
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived)
{
const CChainParams& chainparams = Params();
RandAddSeedPerfmon();
LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
{
LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
}
if (!(nLocalServices & NODE_BLOOM) &&
(strCommand == NetMsgType::FILTERLOAD ||
strCommand == NetMsgType::FILTERADD ||
strCommand == NetMsgType::FILTERCLEAR))
{
if (pfrom->nVersion >= NO_BLOOM_VERSION) {
Misbehaving(pfrom->GetId(), 100);
return false;
} else {
pfrom->fDisconnect = true;
return false;
}
}
if (strCommand == NetMsgType::VERSION)
{
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, string("Duplicate version message"));
Misbehaving(pfrom->GetId(), 1);
return false;
}
int64_t nTime;
CAddress addrMe;
CAddress addrFrom;
uint64_t nNonce = 1;
vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
if (pfrom->nVersion < MIN_PEER_PROTO_VERSION)
{
// disconnect from peers older than this proto version
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION));
pfrom->fDisconnect = true;
return false;
}
if (pfrom->nVersion == 10300)
pfrom->nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
if (!vRecv.empty()) {
vRecv >> LIMITED_STRING(pfrom->strSubVer, MAX_SUBVERSION_LENGTH);
pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer);
}
if (!vRecv.empty())
vRecv >> pfrom->nStartingHeight;
if (!vRecv.empty())
vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message
else
pfrom->fRelayTxes = true;
// Disconnect if we connected to ourself
if (nNonce == nLocalHostNonce && nNonce > 1)
{
LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
pfrom->fDisconnect = true;
return true;
}
pfrom->addrLocal = addrMe;
if (pfrom->fInbound && addrMe.IsRoutable())
{
SeenLocal(addrMe);
}
// Be shy and don't send version until we hear
if (pfrom->fInbound)
pfrom->PushVersion();
pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
// Potentially mark this peer as a preferred download peer.
UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
// Change version
pfrom->PushMessage(NetMsgType::VERACK);
pfrom->ssSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
if (!pfrom->fInbound)
{
// Advertise our address
if (fListen && !IsInitialBlockDownload())
{
CAddress addr = GetLocalAddress(&pfrom->addr);
if (addr.IsRoutable())
{
LogPrintf("ProcessMessages: advertising address %s\n", addr.ToString());
pfrom->PushAddress(addr);
} else if (IsPeerAddrLocalGood(pfrom)) {
addr.SetIP(pfrom->addrLocal);
LogPrintf("ProcessMessages: advertising address %s\n", addr.ToString());
pfrom->PushAddress(addr);
}
}
// Get recent addresses
if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || addrman.size() < 1000)
{
pfrom->PushMessage(NetMsgType::GETADDR);
pfrom->fGetAddr = true;
}
addrman.Good(pfrom->addr);
} else {
if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom)
{
addrman.Add(addrFrom, addrFrom);
addrman.Good(addrFrom);
}
}
- // Relay alerts
- {
- LOCK(cs_mapAlerts);
- BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
- item.second.RelayTo(pfrom);
- }
-
pfrom->fSuccessfullyConnected = true;
string remoteAddr;
if (fLogIPs)
remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
pfrom->cleanSubVer, pfrom->nVersion,
pfrom->nStartingHeight, addrMe.ToString(), pfrom->id,
remoteAddr);
int64_t nTimeOffset = nTime - GetTime();
pfrom->nTimeOffset = nTimeOffset;
AddTimeData(pfrom->addr, nTimeOffset);
}
else if (pfrom->nVersion == 0)
{
// Must have a version message before anything else
Misbehaving(pfrom->GetId(), 1);
return false;
}
else if (strCommand == NetMsgType::VERACK)
{
pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
// Mark this node as currently connected, so we update its timestamp later.
if (pfrom->fNetworkNode) {
LOCK(cs_main);
State(pfrom->GetId())->fCurrentlyConnected = true;
}
if (pfrom->nVersion >= SENDHEADERS_VERSION) {
// Tell our peer we prefer to receive headers rather than inv's
// We send this to non-NODE NETWORK peers as well, because even
// non-NODE NETWORK peers can announce blocks (such as pruning
// nodes)
pfrom->PushMessage(NetMsgType::SENDHEADERS);
}
}
else if (strCommand == NetMsgType::ADDR)
{
vector<CAddress> vAddr;
vRecv >> vAddr;
// Don't want addr from older versions unless seeding
if (pfrom->nVersion < CADDR_TIME_VERSION && addrman.size() > 1000)
return true;
if (vAddr.size() > 1000)
{
Misbehaving(pfrom->GetId(), 20);
return error("message addr size() = %u", vAddr.size());
}
// Store the new addresses
vector<CAddress> vAddrOk;
int64_t nNow = GetAdjustedTime();
int64_t nSince = nNow - 10 * 60;
BOOST_FOREACH(CAddress& addr, vAddr)
{
boost::this_thread::interruption_point();
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
// Relay to a limited number of other nodes
{
LOCK(cs_vNodes);
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the addrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
uint64_t hashAddr = addr.GetHash();
uint256 hashRand = ArithToUint256(UintToArith256(hashSalt) ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60)));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
multimap<uint256, CNode*> mapMix;
BOOST_FOREACH(CNode* pnode, vNodes)
{
if (pnode->nVersion < CADDR_TIME_VERSION)
continue;
unsigned int nPointer;
memcpy(&nPointer, &pnode, sizeof(nPointer));
uint256 hashKey = ArithToUint256(UintToArith256(hashRand) ^ nPointer);
hashKey = Hash(BEGIN(hashKey), END(hashKey));
mapMix.insert(make_pair(hashKey, pnode));
}
int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
((*mi).second)->PushAddress(addr);
}
}
// Do not store addresses outside our network
if (fReachable)
vAddrOk.push_back(addr);
}
addrman.Add(vAddrOk, pfrom->addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom->fGetAddr = false;
if (pfrom->fOneShot)
pfrom->fDisconnect = true;
}
else if (strCommand == NetMsgType::SENDHEADERS)
{
LOCK(cs_main);
State(pfrom->GetId())->fPreferHeaders = true;
}
else if (strCommand == NetMsgType::INV)
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
Misbehaving(pfrom->GetId(), 20);
return error("message inv size() = %u", vInv.size());
}
bool fBlocksOnly = GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
// Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
if (pfrom->fWhitelisted && GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))
fBlocksOnly = false;
LOCK(cs_main);
std::vector<CInv> vToFetch;
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
const CInv &inv = vInv[nInv];
boost::this_thread::interruption_point();
pfrom->AddInventoryKnown(inv);
bool fAlreadyHave = AlreadyHave(inv);
LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
if (inv.type == MSG_BLOCK) {
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
// First request the headers preceding the announced block. In the normal fully-synced
// case where a new block is announced that succeeds the current tip (no reorganization),
// there are no such headers.
// Secondly, and only when we are close to being synced, we request the announced block directly,
// to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
// time the block arrives, the header chain leading up to it is already validated. Not
// doing this will result in the received block being rejected as an orphan in case it is
// not a direct successor.
pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash);
CNodeState *nodestate = State(pfrom->GetId());
if (CanDirectFetch(chainparams.GetConsensus()) &&
nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
vToFetch.push_back(inv);
// Mark block as in flight already, even though the actual "getdata" message only goes out
// later (within the same cs_main lock, though).
MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus());
}
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
}
}
else
{
if (fBlocksOnly)
LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload())
pfrom->AskFor(inv);
}
// Track requests for our stuff
GetMainSignals().Inventory(inv.hash);
if (pfrom->nSendSize > (SendBufferSize() * 2)) {
Misbehaving(pfrom->GetId(), 50);
return error("send buffer size() = %u", pfrom->nSendSize);
}
}
if (!vToFetch.empty())
pfrom->PushMessage(NetMsgType::GETDATA, vToFetch);
}
else if (strCommand == NetMsgType::GETDATA)
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
Misbehaving(pfrom->GetId(), 20);
return error("message getdata size() = %u", vInv.size());
}
if (fDebug || (vInv.size() != 1))
LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
if ((fDebug && vInv.size() > 0) || (vInv.size() == 1))
LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
ProcessGetData(pfrom, chainparams.GetConsensus());
}
else if (strCommand == NetMsgType::GETBLOCKS)
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
LOCK(cs_main);
// Find the last block the caller has in the main chain
CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
// Send the rest of the chain
if (pindex)
pindex = chainActive.Next(pindex);
int nLimit = 500;
LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
if (pindex->GetBlockHash() == hashStop)
{
LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
// If pruning, don't inv blocks unless we have on disk and are likely to still have
// for some reasonable time window (1 hour) that block relay might require.
const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
{
LogPrint("net", " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
if (--nLimit <= 0)
{
// When this block is requested, we'll send an inv that'll
// trigger the peer to getblocks the next batch of inventory.
LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
}
}
else if (strCommand == NetMsgType::GETHEADERS)
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
LOCK(cs_main);
if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
return true;
}
CNodeState *nodestate = State(pfrom->GetId());
CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
BlockMap::iterator mi = mapBlockIndex.find(hashStop);
if (mi == mapBlockIndex.end())
return true;
pindex = (*mi).second;
}
else
{
// Find the last block the caller has in the main chain
pindex = FindForkInGlobalIndex(chainActive, locator);
if (pindex)
pindex = chainActive.Next(pindex);
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
vector<CBlock> vHeaders;
int nLimit = MAX_HEADERS_RESULTS;
LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString(), pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}
// pindex can be NULL either if we sent chainActive.Tip() OR
// if our peer has chainActive.Tip() (and thus we are sending an empty
// headers message). In both cases it's safe to update
// pindexBestHeaderSent to be our tip.
nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
pfrom->PushMessage(NetMsgType::HEADERS, vHeaders);
}
else if (strCommand == NetMsgType::TX)
{
// Stop processing the transaction early if
// We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY) && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
{
LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom->id);
return true;
}
vector<uint256> vWorkQueue;
vector<uint256> vEraseQueue;
CTransaction tx;
vRecv >> tx;
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
LOCK(cs_main);
bool fMissingInputs = false;
CValidationState state;
pfrom->setAskFor.erase(inv.hash);
mapAlreadyAskedFor.erase(inv);
if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs))
{
mempool.check(pcoinsTip);
RelayTransaction(tx);
vWorkQueue.push_back(inv.hash);
LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
pfrom->id,
tx.GetHash().ToString(),
mempool.size(), mempool.DynamicMemoryUsage() / 1000);
// Recursively process any orphan transactions that depended on this one
set<NodeId> setMisbehaving;
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
map<uint256, set<uint256> >::iterator itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue[i]);
if (itByPrev == mapOrphanTransactionsByPrev.end())
continue;
for (set<uint256>::iterator mi = itByPrev->second.begin();
mi != itByPrev->second.end();
++mi)
{
const uint256& orphanHash = *mi;
const CTransaction& orphanTx = mapOrphanTransactions[orphanHash].tx;
NodeId fromPeer = mapOrphanTransactions[orphanHash].fromPeer;
bool fMissingInputs2 = false;
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
// resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
// anyone relaying LegitTxX banned)
CValidationState stateDummy;
if (setMisbehaving.count(fromPeer))
continue;
if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2))
{
LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
RelayTransaction(orphanTx);
vWorkQueue.push_back(orphanHash);
vEraseQueue.push_back(orphanHash);
}
else if (!fMissingInputs2)
{
int nDos = 0;
if (stateDummy.IsInvalid(nDos) && nDos > 0)
{
// Punish peer that gave us an invalid orphan tx
Misbehaving(fromPeer, nDos);
setMisbehaving.insert(fromPeer);
LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString());
}
// Has inputs but not accepted to mempool
// Probably non-standard or insufficient fee/priority
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
vEraseQueue.push_back(orphanHash);
assert(recentRejects);
recentRejects->insert(orphanHash);
}
mempool.check(pcoinsTip);
}
}
BOOST_FOREACH(uint256 hash, vEraseQueue)
EraseOrphanTx(hash);
}
else if (fMissingInputs)
{
AddOrphanTx(tx, pfrom->GetId());
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
if (nEvicted > 0)
LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
} else {
assert(recentRejects);
recentRejects->insert(tx.GetHash());
if (pfrom->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
// Always relay transactions received from whitelisted peers, even
// if they were already in the mempool or rejected from it due
// to policy, allowing the node to function as a gateway for
// nodes hidden behind it.
//
// Never relay transactions that we would assign a non-zero DoS
// score for, as we expect peers to do the same with us in that
// case.
int nDoS = 0;
if (!state.IsInvalid(nDoS) || nDoS == 0) {
LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id);
RelayTransaction(tx);
} else {
LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state));
}
}
}
int nDoS = 0;
if (state.IsInvalid(nDoS))
{
LogPrint("mempoolrej", "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
pfrom->id,
FormatStateMessage(state));
if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
if (nDoS > 0)
Misbehaving(pfrom->GetId(), nDoS);
}
FlushStateToDisk(state, FLUSH_STATE_PERIODIC);
}
else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing
{
std::vector<CBlockHeader> headers;
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize(vRecv);
if (nCount > MAX_HEADERS_RESULTS) {
Misbehaving(pfrom->GetId(), 20);
return error("headers message size = %u", nCount);
}
headers.resize(nCount);
for (unsigned int n = 0; n < nCount; n++) {
vRecv >> headers[n];
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
}
LOCK(cs_main);
if (nCount == 0) {
// Nothing interesting. Stop asking this peers for more headers.
return true;
}
CBlockIndex *pindexLast = NULL;
BOOST_FOREACH(const CBlockHeader& header, headers) {
CValidationState state;
if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) {
Misbehaving(pfrom->GetId(), 20);
return error("non-continuous headers sequence");
}
if (!AcceptBlockHeader(header, state, chainparams, &pindexLast)) {
int nDoS;
if (state.IsInvalid(nDoS)) {
if (nDoS > 0)
Misbehaving(pfrom->GetId(), nDoS);
return error("invalid header received");
}
}
}
if (pindexLast)
UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
if (nCount == MAX_HEADERS_RESULTS && pindexLast) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256());
}
bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
CNodeState *nodestate = State(pfrom->GetId());
// If this set of headers is valid and ends in a block with at least as
// much work as our tip, download as much as possible.
if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
vector<CBlockIndex *> vToFetch;
CBlockIndex *pindexWalk = pindexLast;
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
!mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
// We don't have this block, and it's not yet in flight.
vToFetch.push_back(pindexWalk);
}
pindexWalk = pindexWalk->pprev;
}
// If pindexWalk still isn't on our main chain, we're looking at a
// very large reorg at a time we think we're close to caught up to
// the main chain -- this shouldn't really happen. Bail out on the
// direct fetch and rely on parallel download instead.
if (!chainActive.Contains(pindexWalk)) {
LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
pindexLast->GetBlockHash().ToString(),
pindexLast->nHeight);
} else {
vector<CInv> vGetData;
// Download as much as possible, from earliest to latest.
BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) {
if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
// Can't download any more from this peer
break;
}
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex);
LogPrint("net", "Requesting block %s from peer=%d\n",
pindex->GetBlockHash().ToString(), pfrom->id);
}
if (vGetData.size() > 1) {
LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
}
if (vGetData.size() > 0) {
pfrom->PushMessage(NetMsgType::GETDATA, vGetData);
}
}
}
CheckBlockIndex(chainparams.GetConsensus());
}
else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
{
CBlock block;
vRecv >> block;
CInv inv(MSG_BLOCK, block.GetHash());
LogPrint("net", "received block %s peer=%d\n", inv.hash.ToString(), pfrom->id);
pfrom->AddInventoryKnown(inv);
CValidationState state;
// Process all blocks from whitelisted peers, even if not requested,
// unless we're still syncing with the network.
// Such an unrequested block may still be processed, subject to the
// conditions in AcceptBlock().
bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
ProcessNewBlock(state, chainparams, pfrom, &block, forceProcessing, NULL);
int nDoS;
if (state.IsInvalid(nDoS)) {
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
if (nDoS > 0) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), nDoS);
}
}
}
else if (strCommand == NetMsgType::GETADDR)
{
// This asymmetric behavior for inbound and outbound connections was introduced
// to prevent a fingerprinting attack: an attacker can send specific fake addresses
// to users' AddrMan and later request them by sending getaddr messages.
// Making nodes which are behind NAT and can only make outgoing connections ignore
// the getaddr message mitigates the attack.
if (!pfrom->fInbound) {
LogPrint("net", "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id);
return true;
}
pfrom->vAddrToSend.clear();
vector<CAddress> vAddr = addrman.GetAddr();
BOOST_FOREACH(const CAddress &addr, vAddr)
pfrom->PushAddress(addr);
}
else if (strCommand == NetMsgType::MEMPOOL)
{
if (CNode::OutboundTargetReached(false) && !pfrom->fWhitelisted)
{
LogPrint("net", "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
pfrom->fDisconnect = true;
return true;
}
LOCK2(cs_main, pfrom->cs_filter);
std::vector<uint256> vtxid;
mempool.queryHashes(vtxid);
vector<CInv> vInv;
BOOST_FOREACH(uint256& hash, vtxid) {
CInv inv(MSG_TX, hash);
if (pfrom->pfilter) {
CTransaction tx;
bool fInMemPool = mempool.lookup(hash, tx);
if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
if (!pfrom->pfilter->IsRelevantAndUpdate(tx)) continue;
}
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
pfrom->PushMessage(NetMsgType::INV, vInv);
vInv.clear();
}
}
if (vInv.size() > 0)
pfrom->PushMessage(NetMsgType::INV, vInv);
}
else if (strCommand == NetMsgType::PING)
{
if (pfrom->nVersion > BIP0031_VERSION)
{
uint64_t nonce = 0;
vRecv >> nonce;
// Echo the message back with the nonce. This allows for two useful features:
//
// 1) A remote node can quickly check if the connection is operational
// 2) Remote nodes can measure the latency of the network thread. If this node
// is overloaded it won't respond to pings quickly and the remote node can
// avoid sending us more work, like chain download requests.
//
// The nonce stops the remote getting confused between different pings: without
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
pfrom->PushMessage(NetMsgType::PONG, nonce);
}
}
else if (strCommand == NetMsgType::PONG)
{
int64_t pingUsecEnd = nTimeReceived;
uint64_t nonce = 0;
size_t nAvail = vRecv.in_avail();
bool bPingFinished = false;
std::string sProblem;
if (nAvail >= sizeof(nonce)) {
vRecv >> nonce;
// Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
if (pfrom->nPingNonceSent != 0) {
if (nonce == pfrom->nPingNonceSent) {
// Matching pong received, this ping is no longer outstanding
bPingFinished = true;
int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
if (pingUsecTime > 0) {
// Successful ping time measurement, replace previous
pfrom->nPingUsecTime = pingUsecTime;
pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime, pingUsecTime);
} else {
// This should never happen
sProblem = "Timing mishap";
}
} else {
// Nonce mismatches are normal when pings are overlapping
sProblem = "Nonce mismatch";
if (nonce == 0) {
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Nonce zero";
}
}
} else {
sProblem = "Unsolicited pong without ping";
}
} else {
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Short payload";
}
if (!(sProblem.empty())) {
LogPrint("net", "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
pfrom->id,
sProblem,
pfrom->nPingNonceSent,
nonce,
nAvail);
}
if (bPingFinished) {
pfrom->nPingNonceSent = 0;
}
}
- else if (fAlerts && strCommand == NetMsgType::ALERT)
- {
- CAlert alert;
- vRecv >> alert;
-
- uint256 alertHash = alert.GetHash();
- if (pfrom->setKnown.count(alertHash) == 0)
- {
- if (alert.ProcessAlert(chainparams.AlertKey()))
- {
- // Relay
- pfrom->setKnown.insert(alertHash);
- {
- LOCK(cs_vNodes);
- BOOST_FOREACH(CNode* pnode, vNodes)
- alert.RelayTo(pnode);
- }
- }
- else {
- // Small DoS penalty so peers that send us lots of
- // duplicate/expired/invalid-signature/whatever alerts
- // eventually get banned.
- // This isn't a Misbehaving(100) (immediate ban) because the
- // peer might be an older or different implementation with
- // a different signature key, etc.
- Misbehaving(pfrom->GetId(), 10);
- }
- }
- }
-
-
else if (strCommand == NetMsgType::FILTERLOAD)
{
CBloomFilter filter;
vRecv >> filter;
if (!filter.IsWithinSizeConstraints())
// There is no excuse for sending a too-large filter
Misbehaving(pfrom->GetId(), 100);
else
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter(filter);
pfrom->pfilter->UpdateEmptyFull();
}
pfrom->fRelayTxes = true;
}
else if (strCommand == NetMsgType::FILTERADD)
{
vector<unsigned char> vData;
vRecv >> vData;
// Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
// and thus, the maximum size any matched object can have) in a filteradd message
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE)
{
Misbehaving(pfrom->GetId(), 100);
} else {
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
pfrom->pfilter->insert(vData);
else
Misbehaving(pfrom->GetId(), 100);
}
}
else if (strCommand == NetMsgType::FILTERCLEAR)
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter();
pfrom->fRelayTxes = true;
}
else if (strCommand == NetMsgType::REJECT)
{
if (fDebug) {
try {
string strMsg; unsigned char ccode; string strReason;
vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
ostringstream ss;
ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX)
{
uint256 hash;
vRecv >> hash;
ss << ": hash " << hash.ToString();
}
LogPrint("net", "Reject %s\n", SanitizeString(ss.str()));
} catch (const std::ios_base::failure&) {
// Avoid feedback loops by preventing reject messages from triggering a new reject message.
LogPrint("net", "Unparseable reject message received\n");
}
}
}
else
{
// Ignore unknown commands for extensibility
LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
}
return true;
}
// requires LOCK(cs_vRecvMsg)
bool ProcessMessages(CNode* pfrom)
{
const CChainParams& chainparams = Params();
//if (fDebug)
// LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
//
// Message format
// (4) message start
// (12) command
// (4) size
// (4) checksum
// (x) data
//
bool fOk = true;
if (!pfrom->vRecvGetData.empty())
ProcessGetData(pfrom, chainparams.GetConsensus());
// this maintains the order of responses
if (!pfrom->vRecvGetData.empty()) return fOk;
std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin();
while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) {
// Don't bother if send buffer is too full to respond anyway
if (pfrom->nSendSize >= SendBufferSize())
break;
// get next message
CNetMessage& msg = *it;
//if (fDebug)
// LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
// msg.hdr.nMessageSize, msg.vRecv.size(),
// msg.complete() ? "Y" : "N");
// end, if an incomplete message is found
if (!msg.complete())
break;
// at this point, any failure means we can delete the current message
it++;
// Scan for message start
if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), MESSAGE_START_SIZE) != 0) {
LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id);
fOk = false;
break;
}
// Read header
CMessageHeader& hdr = msg.hdr;
if (!hdr.IsValid(chainparams.MessageStart()))
{
LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id);
continue;
}
string strCommand = hdr.GetCommand();
// Message size
unsigned int nMessageSize = hdr.nMessageSize;
// Checksum
CDataStream& vRecv = msg.vRecv;
uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize);
unsigned int nChecksum = ReadLE32((unsigned char*)&hash);
if (nChecksum != hdr.nChecksum)
{
LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", __func__,
SanitizeString(strCommand), nMessageSize, nChecksum, hdr.nChecksum);
continue;
}
// Process message
bool fRet = false;
try
{
fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime);
boost::this_thread::interruption_point();
}
catch (const std::ios_base::failure& e)
{
pfrom->PushMessage(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, string("error parsing message"));
if (strstr(e.what(), "end of data"))
{
// Allow exceptions from under-length message on vRecv
LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else if (strstr(e.what(), "size too large"))
{
// Allow exceptions from over-long size
LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else
{
PrintExceptionContinue(&e, "ProcessMessages()");
}
}
catch (const boost::thread_interrupted&) {
throw;
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "ProcessMessages()");
} catch (...) {
PrintExceptionContinue(NULL, "ProcessMessages()");
}
if (!fRet)
LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id);
break;
}
// In case the connection got shut down, its receive buffer was wiped
if (!pfrom->fDisconnect)
pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it);
return fOk;
}
bool SendMessages(CNode* pto)
{
const Consensus::Params& consensusParams = Params().GetConsensus();
{
// Don't send anything until we get its version message
if (pto->nVersion == 0)
return true;
//
// Message: ping
//
bool pingSend = false;
if (pto->fPingQueued) {
// RPC ping request by user
pingSend = true;
}
if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
// Ping automatically sent as a latency probe & keepalive.
pingSend = true;
}
if (pingSend) {
uint64_t nonce = 0;
while (nonce == 0) {
GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
}
pto->fPingQueued = false;
pto->nPingUsecStart = GetTimeMicros();
if (pto->nVersion > BIP0031_VERSION) {
pto->nPingNonceSent = nonce;
pto->PushMessage(NetMsgType::PING, nonce);
} else {
// Peer is too old to support ping command with nonce, pong will never arrive.
pto->nPingNonceSent = 0;
pto->PushMessage(NetMsgType::PING);
}
}
TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
if (!lockMain)
return true;
// Address refresh broadcast
int64_t nNow = GetTimeMicros();
if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
AdvertiseLocal(pto);
pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
//
// Message: addr
//
if (pto->nNextAddrSend < nNow) {
pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
vector<CAddress> vAddr;
vAddr.reserve(pto->vAddrToSend.size());
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
{
if (!pto->addrKnown.contains(addr.GetKey()))
{
pto->addrKnown.insert(addr.GetKey());
vAddr.push_back(addr);
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
{
pto->PushMessage(NetMsgType::ADDR, vAddr);
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
pto->PushMessage(NetMsgType::ADDR, vAddr);
}
CNodeState &state = *State(pto->GetId());
if (state.fShouldBan) {
if (pto->fWhitelisted)
LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString());
else {
pto->fDisconnect = true;
if (pto->addr.IsLocal())
LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString());
else
{
CNode::Ban(pto->addr, BanReasonNodeMisbehaving);
}
}
state.fShouldBan = false;
}
BOOST_FOREACH(const CBlockReject& reject, state.rejects)
pto->PushMessage(NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock);
state.rejects.clear();
// Start block sync
if (pindexBestHeader == NULL)
pindexBestHeader = chainActive.Tip();
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
state.fSyncStarted = true;
nSyncStarted++;
const CBlockIndex *pindexStart = pindexBestHeader;
/* If possible, start at the block preceding the currently
best known header. This ensures that we always get a
non-empty list of headers back as long as the peer
is up-to-date. With a non-empty response, we can initialise
the peer's known best block. This wouldn't be possible
if we requested starting at pindexBestHeader and
got back an empty response. */
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
pto->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256());
}
}
// Resend wallet transactions that haven't gotten in a block yet
// Except during reindex, importing and IBD, when old wallet
// transactions become unconfirmed and spams other nodes.
if (!fReindex && !fImporting && !IsInitialBlockDownload())
{
GetMainSignals().Broadcast(nTimeBestReceived);
}
//
// Try sending block announcements via headers
//
{
// If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
// list of block hashes we're relaying, and our peer wants
// headers announcements, then find the first header
// not yet known to our peer but would connect, and send.
// If no header would connect, or if we have too many
// blocks, or if the peer doesn't want headers, just
// add all to the inv queue.
LOCK(pto->cs_inventory);
vector<CBlock> vHeaders;
bool fRevertToInv = (!state.fPreferHeaders || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
CBlockIndex *pBestIndex = NULL; // last header queued for delivery
ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
if (!fRevertToInv) {
bool fFoundStartingHeader = false;
// Try to find first header that our peer doesn't have, and
// then send all headers past that one. If we come across any
// headers that aren't on chainActive, give up.
BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) {
BlockMap::iterator mi = mapBlockIndex.find(hash);
assert(mi != mapBlockIndex.end());
CBlockIndex *pindex = mi->second;
if (chainActive[pindex->nHeight] != pindex) {
// Bail out if we reorged away from this block
fRevertToInv = true;
break;
}
assert(pBestIndex == NULL || pindex->pprev == pBestIndex);
pBestIndex = pindex;
if (fFoundStartingHeader) {
// add this to the headers message
vHeaders.push_back(pindex->GetBlockHeader());
} else if (PeerHasHeader(&state, pindex)) {
continue; // keep looking for the first new block
} else if (pindex->pprev == NULL || PeerHasHeader(&state, pindex->pprev)) {
// Peer doesn't have this header but they do have the prior one.
// Start sending headers.
fFoundStartingHeader = true;
vHeaders.push_back(pindex->GetBlockHeader());
} else {
// Peer doesn't have this header or the prior one -- nothing will
// connect, so bail out.
fRevertToInv = true;
break;
}
}
}
if (fRevertToInv) {
// If falling back to using an inv, just try to inv the tip.
// The last entry in vBlockHashesToAnnounce was our tip at some point
// in the past.
if (!pto->vBlockHashesToAnnounce.empty()) {
const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
assert(mi != mapBlockIndex.end());
CBlockIndex *pindex = mi->second;
// Warn if we're announcing a block that is not on the main chain.
// This should be very rare and could be optimized out.
// Just log for now.
if (chainActive[pindex->nHeight] != pindex) {
LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
}
// If the peer announced this block to us, don't inv it back.
// (Since block announcements may not be via inv's, we can't solely rely on
// setInventoryKnown to track this.)
if (!PeerHasHeader(&state, pindex)) {
pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__,
pto->id, hashToAnnounce.ToString());
}
}
} else if (!vHeaders.empty()) {
if (vHeaders.size() > 1) {
LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
vHeaders.size(),
vHeaders.front().GetHash().ToString(),
vHeaders.back().GetHash().ToString(), pto->id);
} else {
LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id);
}
pto->PushMessage(NetMsgType::HEADERS, vHeaders);
state.pindexBestHeaderSent = pBestIndex;
}
pto->vBlockHashesToAnnounce.clear();
}
//
// Message: inventory
//
vector<CInv> vInv;
vector<CInv> vInvWait;
{
bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) {
fSendTrickle = true;
pto->nNextInvSend = PoissonNextSend(nNow, AVG_INVENTORY_BROADCAST_INTERVAL);
}
LOCK(pto->cs_inventory);
vInv.reserve(std::min<size_t>(1000, pto->vInventoryToSend.size()));
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash))
continue;
// trickle out tx inv to protect privacy
if (inv.type == MSG_TX && !fSendTrickle)
{
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0);
if (fTrickleWait)
{
vInvWait.push_back(inv);
continue;
}
}
pto->filterInventoryKnown.insert(inv.hash);
vInv.push_back(inv);
if (vInv.size() >= 1000)
{
pto->PushMessage(NetMsgType::INV, vInv);
vInv.clear();
}
}
pto->vInventoryToSend = vInvWait;
}
if (!vInv.empty())
pto->PushMessage(NetMsgType::INV, vInv);
// Detect whether we're stalling
nNow = GetTimeMicros();
if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id);
pto->fDisconnect = true;
}
// In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval
// (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to
// timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
// to unreasonably increase our timeout.
// We also compare the block download timeout originally calculated against the time at which we'd disconnect
// if we assumed the block were being requested now (ignoring blocks we've requested from this peer, since we're
// only looking at this peer's oldest request). This way a large queue in the past doesn't result in a
// permanently large window for this block to be delivered (ie if the number of blocks in flight is decreasing
// more quickly than once every 5 minutes, then we'll shorten the download window for this block).
if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) {
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - state.nBlocksInFlightValidHeaders, consensusParams);
if (queuedBlock.nTimeDisconnect > nTimeoutIfRequestedNow) {
LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto->id, queuedBlock.hash.ToString(), queuedBlock.nTimeDisconnect, nTimeoutIfRequestedNow);
queuedBlock.nTimeDisconnect = nTimeoutIfRequestedNow;
}
if (queuedBlock.nTimeDisconnect < nNow) {
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id);
pto->fDisconnect = true;
}
}
//
// Message: getdata (blocks)
//
vector<CInv> vGetData;
if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
vector<CBlockIndex*> vToDownload;
NodeId staller = -1;
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
pindex->nHeight, pto->id);
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->nStallingSince == 0) {
State(staller)->nStallingSince = nNow;
LogPrint("net", "Stall started peer=%d\n", staller);
}
}
}
//
// Message: getdata (non-blocks)
//
while (!pto->fDisconnect && !pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
{
const CInv& inv = (*pto->mapAskFor.begin()).second;
if (!AlreadyHave(inv))
{
if (fDebug)
LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id);
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
pto->PushMessage(NetMsgType::GETDATA, vGetData);
vGetData.clear();
}
} else {
//If we're not going to ask, don't expect a response.
pto->setAskFor.erase(inv.hash);
}
pto->mapAskFor.erase(pto->mapAskFor.begin());
}
if (!vGetData.empty())
pto->PushMessage(NetMsgType::GETDATA, vGetData);
}
return true;
}
std::string CBlockFileInfo::ToString() const {
return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast));
}
ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
return VersionBitsState(chainActive.Tip(), params, pos, versionbitscache);
}
class CMainCleanup
{
public:
CMainCleanup() {}
~CMainCleanup() {
// block headers
BlockMap::iterator it1 = mapBlockIndex.begin();
for (; it1 != mapBlockIndex.end(); it1++)
delete (*it1).second;
mapBlockIndex.clear();
// orphan transactions
mapOrphanTransactions.clear();
mapOrphanTransactionsByPrev.clear();
}
} instance_of_cmaincleanup;
diff --git a/src/main.h b/src/main.h
index ecb1efa5cb..a011ba4e55 100644
--- a/src/main.h
+++ b/src/main.h
@@ -1,570 +1,567 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_MAIN_H
#define BITCOIN_MAIN_H
#if defined(HAVE_CONFIG_H)
#include "config/bitcoin-config.h"
#endif
#include "amount.h"
#include "chain.h"
#include "coins.h"
#include "net.h"
#include "script/script_error.h"
#include "sync.h"
#include "versionbits.h"
#include <algorithm>
#include <exception>
#include <map>
#include <set>
#include <stdint.h>
#include <string>
#include <utility>
#include <vector>
#include <boost/unordered_map.hpp>
class CBlockIndex;
class CBlockTreeDB;
class CBloomFilter;
class CChainParams;
class CInv;
class CScriptCheck;
class CTxMemPool;
class CValidationInterface;
class CValidationState;
struct CNodeStateStats;
struct LockPoints;
-/** Default for accepting alerts from the P2P network. */
-static const bool DEFAULT_ALERTS = true;
/** Default for DEFAULT_WHITELISTRELAY. */
static const bool DEFAULT_WHITELISTRELAY = true;
/** Default for DEFAULT_WHITELISTFORCERELAY. */
static const bool DEFAULT_WHITELISTFORCERELAY = true;
/** Default for -minrelaytxfee, minimum relay fee for transactions */
static const unsigned int DEFAULT_MIN_RELAY_TX_FEE = 1000;
//! -maxtxfee default
static const CAmount DEFAULT_TRANSACTION_MAXFEE = 0.1 * COIN;
//! Discourage users to set fees higher than this amount (in satoshis) per kB
static const CAmount HIGH_TX_FEE_PER_KB = 0.01 * COIN;
//! -maxtxfee will warn if called with a higher fee than this amount (in satoshis)
static const CAmount HIGH_MAX_TX_FEE = 100 * HIGH_TX_FEE_PER_KB;
/** Default for -maxorphantx, maximum number of orphan transactions kept in memory */
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100;
/** Default for -limitancestorcount, max number of in-mempool ancestors */
static const unsigned int DEFAULT_ANCESTOR_LIMIT = 25;
/** Default for -limitancestorsize, maximum kilobytes of tx + all in-mempool ancestors */
static const unsigned int DEFAULT_ANCESTOR_SIZE_LIMIT = 101;
/** Default for -limitdescendantcount, max number of in-mempool descendants */
static const unsigned int DEFAULT_DESCENDANT_LIMIT = 25;
/** Default for -limitdescendantsize, maximum kilobytes of in-mempool descendants */
static const unsigned int DEFAULT_DESCENDANT_SIZE_LIMIT = 101;
/** Default for -mempoolexpiry, expiration time for mempool transactions in hours */
static const unsigned int DEFAULT_MEMPOOL_EXPIRY = 72;
/** The maximum size of a blk?????.dat file (since 0.8) */
static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
/** The pre-allocation chunk size for rev?????.dat files (since 0.8) */
static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
/** Maximum number of script-checking threads allowed */
static const int MAX_SCRIPTCHECK_THREADS = 16;
/** -par default (number of script-checking threads, 0 = auto) */
static const int DEFAULT_SCRIPTCHECK_THREADS = 0;
/** Number of blocks that can be requested at any given time from a single peer. */
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
/** Timeout in seconds during which a peer must stall block download progress before being disconnected. */
static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
* less than this number, we reached its tip. Changing this value is a protocol upgrade. */
static const unsigned int MAX_HEADERS_RESULTS = 2000;
/** Size of the "block download window": how far ahead of our current height do we fetch?
* Larger windows tolerate larger download speed differences between peer, but increase the potential
* degree of disordering of blocks on disk (which make reindexing and in the future perhaps pruning
* harder). We'll probably want to make this a per-peer adaptive value at some point. */
static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
/** Time to wait (in seconds) between writing blocks/block index to disk. */
static const unsigned int DATABASE_WRITE_INTERVAL = 60 * 60;
/** Time to wait (in seconds) between flushing chainstate to disk. */
static const unsigned int DATABASE_FLUSH_INTERVAL = 24 * 60 * 60;
/** Maximum length of reject messages. */
static const unsigned int MAX_REJECT_MESSAGE_LENGTH = 111;
/** Average delay between local address broadcasts in seconds. */
static const unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 24 * 60;
/** Average delay between peer address broadcasts in seconds. */
static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
/** Average delay between trickled inventory broadcasts in seconds.
* Blocks, whitelisted receivers, and a random 25% of transactions bypass this. */
static const unsigned int AVG_INVENTORY_BROADCAST_INTERVAL = 5;
static const unsigned int DEFAULT_LIMITFREERELAY = 15;
static const bool DEFAULT_RELAYPRIORITY = true;
static const int64_t DEFAULT_MAX_TIP_AGE = 24 * 60 * 60;
/** Default for -permitbaremultisig */
static const bool DEFAULT_PERMIT_BAREMULTISIG = true;
static const unsigned int DEFAULT_BYTES_PER_SIGOP = 20;
static const bool DEFAULT_CHECKPOINTS_ENABLED = true;
static const bool DEFAULT_TXINDEX = false;
static const unsigned int DEFAULT_BANSCORE_THRESHOLD = 100;
static const bool DEFAULT_TESTSAFEMODE = false;
/** Default for -mempoolreplacement */
static const bool DEFAULT_ENABLE_REPLACEMENT = true;
/** Maximum number of headers to announce when relaying blocks with headers message.*/
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
static const bool DEFAULT_PEERBLOOMFILTERS = true;
struct BlockHasher
{
size_t operator()(const uint256& hash) const { return hash.GetCheapHash(); }
};
extern CScript COINBASE_FLAGS;
extern CCriticalSection cs_main;
extern CTxMemPool mempool;
typedef boost::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
extern BlockMap mapBlockIndex;
extern uint64_t nLastBlockTx;
extern uint64_t nLastBlockSize;
extern const std::string strMessageMagic;
extern CWaitableCriticalSection csBestBlock;
extern CConditionVariable cvBlockChange;
extern bool fImporting;
extern bool fReindex;
extern int nScriptCheckThreads;
extern bool fTxIndex;
extern bool fIsBareMultisigStd;
extern bool fRequireStandard;
extern unsigned int nBytesPerSigOp;
extern bool fCheckBlockIndex;
extern bool fCheckpointsEnabled;
extern size_t nCoinCacheUsage;
/** A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) */
extern CFeeRate minRelayTxFee;
/** Absolute maximum transaction fee (in satoshis) used by wallet and mempool (rejects high fee in sendrawtransaction) */
extern CAmount maxTxFee;
-extern bool fAlerts;
/** If the tip is older than this (in seconds), the node is considered to be in initial block download. */
extern int64_t nMaxTipAge;
extern bool fEnableReplacement;
/** Best header we've seen so far (used for getheaders queries' starting points). */
extern CBlockIndex *pindexBestHeader;
/** Minimum disk space required - used in CheckDiskSpace() */
static const uint64_t nMinDiskSpace = 52428800;
/** Pruning-related variables and constants */
/** True if any block files have ever been pruned. */
extern bool fHavePruned;
/** True if we're running in -prune mode. */
extern bool fPruneMode;
/** Number of MiB of block files that we're trying to stay below. */
extern uint64_t nPruneTarget;
/** Block files containing a block-height within MIN_BLOCKS_TO_KEEP of chainActive.Tip() will not be pruned. */
static const unsigned int MIN_BLOCKS_TO_KEEP = 288;
static const signed int DEFAULT_CHECKBLOCKS = MIN_BLOCKS_TO_KEEP;
static const unsigned int DEFAULT_CHECKLEVEL = 3;
// Require that user allocate at least 550MB for block & undo files (blk???.dat and rev???.dat)
// At 1MB per block, 288 blocks = 288MB.
// Add 15% for Undo data = 331MB
// Add 20% for Orphan block rate = 397MB
// We want the low water mark after pruning to be at least 397 MB and since we prune in
// full block file chunks, we need the high water mark which triggers the prune to be
// one 128MB block file + added 15% undo data = 147MB greater for a total of 545MB
// Setting the target to > than 550MB will make it likely we can respect the target.
static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
/** Register with a network node to receive its signals */
void RegisterNodeSignals(CNodeSignals& nodeSignals);
/** Unregister a network node */
void UnregisterNodeSignals(CNodeSignals& nodeSignals);
/**
* Process an incoming block. This only returns after the best known valid
* block is made active. Note that it does not, however, guarantee that the
* specific block passed to it has been checked for validity!
*
* @param[out] state This may be set to an Error state if any error occurred processing it, including during validation/connection/etc of otherwise unrelated blocks during reorganisation; or it may be set to an Invalid state if pblock is itself invalid (but this is not guaranteed even when the block is checked). If you want to *possibly* get feedback on whether pblock is valid, you must also install a CValidationInterface (see validationinterface.h) - this will have its BlockChecked method called whenever *any* block completes validation.
* @param[in] pfrom The node which we are receiving the block from; it is added to mapBlockSource and may be penalised if the block is invalid.
* @param[in] pblock The block we want to process.
* @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources and whitelisted peers.
* @param[out] dbp If pblock is stored to disk (or already there), this will be set to its location.
* @return True if state.IsValid()
*/
bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, const CNode* pfrom, const CBlock* pblock, bool fForceProcessing, CDiskBlockPos* dbp);
/** Check whether enough disk space is available for an incoming block */
bool CheckDiskSpace(uint64_t nAdditionalBytes = 0);
/** Open a block file (blk?????.dat) */
FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly = false);
/** Open an undo file (rev?????.dat) */
FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false);
/** Translation to a filesystem path */
boost::filesystem::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix);
/** Import blocks from an external file */
bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskBlockPos *dbp = NULL);
/** Initialize a new block tree database + block data on disk */
bool InitBlockIndex(const CChainParams& chainparams);
/** Load the block tree and coins database from disk */
bool LoadBlockIndex();
/** Unload database information */
void UnloadBlockIndex();
/** Process protocol messages received from a given node */
bool ProcessMessages(CNode* pfrom);
/**
* Send queued protocol messages to be sent to a give node.
*
* @param[in] pto The node which we are sending messages to.
*/
bool SendMessages(CNode* pto);
/** Run an instance of the script checking thread */
void ThreadScriptCheck();
/** Try to detect Partition (network isolation) attacks against us */
void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const CBlockIndex *const &bestHeader, int64_t nPowTargetSpacing);
/** Check whether we are doing an initial block download (synchronizing from disk or network) */
bool IsInitialBlockDownload();
/** Format a string that describes several potential problems detected by the core.
* strFor can have three values:
* - "rpc": get critical warnings, which should put the client in safe mode if non-empty
* - "statusbar": get all warnings
* - "gui": get all warnings, translated (where possible) for GUI
* This function only returns the highest priority warning of the set selected by strFor.
*/
std::string GetWarnings(const std::string& strFor);
/** Retrieve a transaction (from memory pool, or from disk, if possible) */
bool GetTransaction(const uint256 &hash, CTransaction &tx, const Consensus::Params& params, uint256 &hashBlock, bool fAllowSlow = false);
/** Find the best known block, and make it the tip of the block chain */
bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, const CBlock* pblock = NULL);
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams);
/**
* Prune block and undo files (blk???.dat and undo???.dat) so that the disk space used is less than a user-defined target.
* The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new
* space is allocated in a block or undo file, staying below the target. Changing back to unpruned requires a reindex
* (which in this case means the blockchain must be re-downloaded.)
*
* Pruning functions are called from FlushStateToDisk when the global fCheckForPruning flag has been set.
* Block and undo files are deleted in lock-step (when blk00003.dat is deleted, so is rev00003.dat.)
* Pruning cannot take place until the longest chain is at least a certain length (100000 on mainnet, 1000 on testnet, 1000 on regtest).
* Pruning will never delete a block within a defined distance (currently 288) from the active chain's tip.
* The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks that were stored in the deleted files.
* A db flag records the fact that at least some block files have been pruned.
*
* @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
*/
void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
/**
* Actually unlink the specified files
*/
void UnlinkPrunedFiles(std::set<int>& setFilesToPrune);
/** Create a new block index entry for a given block hash */
CBlockIndex * InsertBlockIndex(uint256 hash);
/** Get statistics from node state */
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats);
/** Increase a node's misbehavior score. */
void Misbehaving(NodeId nodeid, int howmuch);
/** Flush all state, indexes and buffers to disk. */
void FlushStateToDisk();
/** Prune block files and flush state to disk. */
void PruneAndFlush();
/** (try to) add transaction to memory pool **/
bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,
bool* pfMissingInputs, bool fOverrideMempoolLimit=false, const CAmount nAbsurdFee=0);
/** Convert CValidationState to a human-readable message for logging */
std::string FormatStateMessage(const CValidationState &state);
/** Get the BIP9 state for a given deployment at the current tip. */
ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos);
struct CNodeStateStats {
int nMisbehavior;
int nSyncHeight;
int nCommonHeight;
std::vector<int> vHeightInFlight;
};
struct CDiskTxPos : public CDiskBlockPos
{
unsigned int nTxOffset; // after header
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
READWRITE(*(CDiskBlockPos*)this);
READWRITE(VARINT(nTxOffset));
}
CDiskTxPos(const CDiskBlockPos &blockIn, unsigned int nTxOffsetIn) : CDiskBlockPos(blockIn.nFile, blockIn.nPos), nTxOffset(nTxOffsetIn) {
}
CDiskTxPos() {
SetNull();
}
void SetNull() {
CDiskBlockPos::SetNull();
nTxOffset = 0;
}
};
/**
* Count ECDSA signature operations the old-fashioned (pre-0.6) way
* @return number of sigops this transaction's outputs will produce when spent
* @see CTransaction::FetchInputs
*/
unsigned int GetLegacySigOpCount(const CTransaction& tx);
/**
* Count ECDSA signature operations in pay-to-script-hash inputs.
*
* @param[in] mapInputs Map of previous transactions that have outputs we're spending
* @return maximum number of sigops required to validate this transaction's inputs
* @see CTransaction::FetchInputs
*/
unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& mapInputs);
/**
* Check whether all inputs of this transaction are valid (no double spends, scripts & sigs, amounts)
* This does not modify the UTXO set. If pvChecks is not NULL, script checks are pushed onto it
* instead of being performed inline.
*/
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &view, bool fScriptChecks,
unsigned int flags, bool cacheStore, std::vector<CScriptCheck> *pvChecks = NULL);
/** Apply the effects of this transaction on the UTXO set represented by view */
void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCache &inputs, int nHeight);
/** Context-independent validity checks */
bool CheckTransaction(const CTransaction& tx, CValidationState& state);
/**
* Check if transaction is final and can be included in a block with the
* specified height and time. Consensus critical.
*/
bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime);
/**
* Check if transaction will be final in the next block to be created.
*
* Calls IsFinalTx() with current block height and appropriate block time.
*
* See consensus/consensus.h for flag definitions.
*/
bool CheckFinalTx(const CTransaction &tx, int flags = -1);
/**
* Test whether the LockPoints height and time are still valid on the current chain
*/
bool TestLockPointValidity(const LockPoints* lp);
/**
* Check if transaction is final per BIP 68 sequence numbers and can be included in a block.
* Consensus critical. Takes as input a list of heights at which tx's inputs (in order) confirmed.
*/
bool SequenceLocks(const CTransaction &tx, int flags, std::vector<int>* prevHeights, const CBlockIndex& block);
/**
* Check if transaction will be BIP 68 final in the next block to be created.
*
* Simulates calling SequenceLocks() with data from the tip of the current active chain.
* Optionally stores in LockPoints the resulting height and time calculated and the hash
* of the block needed for calculation or skips the calculation and uses the LockPoints
* passed in for evaluation.
* The LockPoints should not be considered valid if CheckSequenceLocks returns false.
*
* See consensus/consensus.h for flag definitions.
*/
bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp = NULL, bool useExistingLockPoints = false);
/**
* Closure representing one script verification
* Note that this stores references to the spending transaction
*/
class CScriptCheck
{
private:
CScript scriptPubKey;
const CTransaction *ptxTo;
unsigned int nIn;
unsigned int nFlags;
bool cacheStore;
ScriptError error;
public:
CScriptCheck(): ptxTo(0), nIn(0), nFlags(0), cacheStore(false), error(SCRIPT_ERR_UNKNOWN_ERROR) {}
CScriptCheck(const CCoins& txFromIn, const CTransaction& txToIn, unsigned int nInIn, unsigned int nFlagsIn, bool cacheIn) :
scriptPubKey(txFromIn.vout[txToIn.vin[nInIn].prevout.n].scriptPubKey),
ptxTo(&txToIn), nIn(nInIn), nFlags(nFlagsIn), cacheStore(cacheIn), error(SCRIPT_ERR_UNKNOWN_ERROR) { }
bool operator()();
void swap(CScriptCheck &check) {
scriptPubKey.swap(check.scriptPubKey);
std::swap(ptxTo, check.ptxTo);
std::swap(nIn, check.nIn);
std::swap(nFlags, check.nFlags);
std::swap(cacheStore, check.cacheStore);
std::swap(error, check.error);
}
ScriptError GetScriptError() const { return error; }
};
/** Functions for disk access for blocks */
bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart);
bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams);
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams);
/** Functions for validating blocks and updating the block tree */
/** Context-independent validity checks */
bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool fCheckPOW = true);
bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW = true, bool fCheckMerkleRoot = true);
/** Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock(). */
bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex *pindexPrev);
bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIndex *pindexPrev);
/** Apply the effects of this block (with given index) on the UTXO set represented by coins.
* Validity checks that depend on the UTXO set are also done; ConnectBlock()
* can fail if those validity checks fail (among other reasons). */
bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& coins, bool fJustCheck = false);
/** Undo the effects of this block (with given index) on the UTXO set represented by coins.
* In case pfClean is provided, operation will try to be tolerant about errors, and *pfClean
* will be true if no problems were found. Otherwise, the return value will be false in case
* of problems. Note that in any case, coins may be modified. */
bool DisconnectBlock(const CBlock& block, CValidationState& state, const CBlockIndex* pindex, CCoinsViewCache& coins, bool* pfClean = NULL);
/** Check a block is completely valid from start to finish (only works on top of our current best block, with cs_main held) */
bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW = true, bool fCheckMerkleRoot = true);
class CBlockFileInfo
{
public:
unsigned int nBlocks; //! number of blocks stored in file
unsigned int nSize; //! number of used bytes of block file
unsigned int nUndoSize; //! number of used bytes in the undo file
unsigned int nHeightFirst; //! lowest height of block in file
unsigned int nHeightLast; //! highest height of block in file
uint64_t nTimeFirst; //! earliest time of block in file
uint64_t nTimeLast; //! latest time of block in file
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion) {
READWRITE(VARINT(nBlocks));
READWRITE(VARINT(nSize));
READWRITE(VARINT(nUndoSize));
READWRITE(VARINT(nHeightFirst));
READWRITE(VARINT(nHeightLast));
READWRITE(VARINT(nTimeFirst));
READWRITE(VARINT(nTimeLast));
}
void SetNull() {
nBlocks = 0;
nSize = 0;
nUndoSize = 0;
nHeightFirst = 0;
nHeightLast = 0;
nTimeFirst = 0;
nTimeLast = 0;
}
CBlockFileInfo() {
SetNull();
}
std::string ToString() const;
/** update statistics (does not update nSize) */
void AddBlock(unsigned int nHeightIn, uint64_t nTimeIn) {
if (nBlocks==0 || nHeightFirst > nHeightIn)
nHeightFirst = nHeightIn;
if (nBlocks==0 || nTimeFirst > nTimeIn)
nTimeFirst = nTimeIn;
nBlocks++;
if (nHeightIn > nHeightLast)
nHeightLast = nHeightIn;
if (nTimeIn > nTimeLast)
nTimeLast = nTimeIn;
}
};
/** RAII wrapper for VerifyDB: Verify consistency of the block and coin databases */
class CVerifyDB {
public:
CVerifyDB();
~CVerifyDB();
bool VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth);
};
/** Find the last common block between the parameter chain and a locator. */
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator);
/** Mark a block as invalid. */
bool InvalidateBlock(CValidationState& state, const Consensus::Params& consensusParams, CBlockIndex *pindex);
/** Remove invalidity status from a block and its descendants. */
bool ReconsiderBlock(CValidationState& state, CBlockIndex *pindex);
/** The currently-connected chain of blocks (protected by cs_main). */
extern CChain chainActive;
/** Global variable that points to the active CCoinsView (protected by cs_main) */
extern CCoinsViewCache *pcoinsTip;
/** Global variable that points to the active block tree (protected by cs_main) */
extern CBlockTreeDB *pblocktree;
/**
* Return the spend height, which is one more than the inputs.GetBestBlock().
* While checking, GetBestBlock() refers to the parent block. (protected by cs_main)
* This is also true for mempool checks.
*/
int GetSpendHeight(const CCoinsViewCache& inputs);
/**
* Determine what nVersion a new block should use.
*/
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params);
/** Reject codes greater or equal to this can be returned by AcceptToMemPool
* for transactions, to signal internal conditions. They cannot and should not
* be sent over the P2P network.
*/
static const unsigned int REJECT_INTERNAL = 0x100;
/** Too high fee. Can not be triggered by P2P transactions */
static const unsigned int REJECT_HIGHFEE = 0x100;
/** Transaction is already known (either in mempool or blockchain) */
static const unsigned int REJECT_ALREADY_KNOWN = 0x101;
/** Transaction conflicts with a transaction already known */
static const unsigned int REJECT_CONFLICT = 0x102;
#endif // BITCOIN_MAIN_H
diff --git a/src/protocol.cpp b/src/protocol.cpp
index c1c7c0b96b..1ddb65b796 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -1,201 +1,199 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "protocol.h"
#include "util.h"
#include "utilstrencodings.h"
#ifndef WIN32
# include <arpa/inet.h>
#endif
namespace NetMsgType {
const char *VERSION="version";
const char *VERACK="verack";
const char *ADDR="addr";
const char *INV="inv";
const char *GETDATA="getdata";
const char *MERKLEBLOCK="merkleblock";
const char *GETBLOCKS="getblocks";
const char *GETHEADERS="getheaders";
const char *TX="tx";
const char *HEADERS="headers";
const char *BLOCK="block";
const char *GETADDR="getaddr";
const char *MEMPOOL="mempool";
const char *PING="ping";
const char *PONG="pong";
-const char *ALERT="alert";
const char *NOTFOUND="notfound";
const char *FILTERLOAD="filterload";
const char *FILTERADD="filteradd";
const char *FILTERCLEAR="filterclear";
const char *REJECT="reject";
const char *SENDHEADERS="sendheaders";
};
static const char* ppszTypeName[] =
{
"ERROR", // Should never occur
NetMsgType::TX,
NetMsgType::BLOCK,
"filtered block" // Should never occur
};
/** All known message types. Keep this in the same order as the list of
* messages above and in protocol.h.
*/
const static std::string allNetMessageTypes[] = {
NetMsgType::VERSION,
NetMsgType::VERACK,
NetMsgType::ADDR,
NetMsgType::INV,
NetMsgType::GETDATA,
NetMsgType::MERKLEBLOCK,
NetMsgType::GETBLOCKS,
NetMsgType::GETHEADERS,
NetMsgType::TX,
NetMsgType::HEADERS,
NetMsgType::BLOCK,
NetMsgType::GETADDR,
NetMsgType::MEMPOOL,
NetMsgType::PING,
NetMsgType::PONG,
- NetMsgType::ALERT,
NetMsgType::NOTFOUND,
NetMsgType::FILTERLOAD,
NetMsgType::FILTERADD,
NetMsgType::FILTERCLEAR,
NetMsgType::REJECT,
NetMsgType::SENDHEADERS
};
const static std::vector<std::string> allNetMessageTypesVec(allNetMessageTypes, allNetMessageTypes+ARRAYLEN(allNetMessageTypes));
CMessageHeader::CMessageHeader(const MessageStartChars& pchMessageStartIn)
{
memcpy(pchMessageStart, pchMessageStartIn, MESSAGE_START_SIZE);
memset(pchCommand, 0, sizeof(pchCommand));
nMessageSize = -1;
nChecksum = 0;
}
CMessageHeader::CMessageHeader(const MessageStartChars& pchMessageStartIn, const char* pszCommand, unsigned int nMessageSizeIn)
{
memcpy(pchMessageStart, pchMessageStartIn, MESSAGE_START_SIZE);
memset(pchCommand, 0, sizeof(pchCommand));
strncpy(pchCommand, pszCommand, COMMAND_SIZE);
nMessageSize = nMessageSizeIn;
nChecksum = 0;
}
std::string CMessageHeader::GetCommand() const
{
return std::string(pchCommand, pchCommand + strnlen(pchCommand, COMMAND_SIZE));
}
bool CMessageHeader::IsValid(const MessageStartChars& pchMessageStartIn) const
{
// Check start string
if (memcmp(pchMessageStart, pchMessageStartIn, MESSAGE_START_SIZE) != 0)
return false;
// Check the command string for errors
for (const char* p1 = pchCommand; p1 < pchCommand + COMMAND_SIZE; p1++)
{
if (*p1 == 0)
{
// Must be all zeros after the first zero
for (; p1 < pchCommand + COMMAND_SIZE; p1++)
if (*p1 != 0)
return false;
}
else if (*p1 < ' ' || *p1 > 0x7E)
return false;
}
// Message size
if (nMessageSize > MAX_SIZE)
{
LogPrintf("CMessageHeader::IsValid(): (%s, %u bytes) nMessageSize > MAX_SIZE\n", GetCommand(), nMessageSize);
return false;
}
return true;
}
CAddress::CAddress() : CService()
{
Init();
}
CAddress::CAddress(CService ipIn, uint64_t nServicesIn) : CService(ipIn)
{
Init();
nServices = nServicesIn;
}
void CAddress::Init()
{
nServices = NODE_NETWORK;
nTime = 100000000;
}
CInv::CInv()
{
type = 0;
hash.SetNull();
}
CInv::CInv(int typeIn, const uint256& hashIn)
{
type = typeIn;
hash = hashIn;
}
CInv::CInv(const std::string& strType, const uint256& hashIn)
{
unsigned int i;
for (i = 1; i < ARRAYLEN(ppszTypeName); i++)
{
if (strType == ppszTypeName[i])
{
type = i;
break;
}
}
if (i == ARRAYLEN(ppszTypeName))
throw std::out_of_range(strprintf("CInv::CInv(string, uint256): unknown type '%s'", strType));
hash = hashIn;
}
bool operator<(const CInv& a, const CInv& b)
{
return (a.type < b.type || (a.type == b.type && a.hash < b.hash));
}
bool CInv::IsKnownType() const
{
return (type >= 1 && type < (int)ARRAYLEN(ppszTypeName));
}
const char* CInv::GetCommand() const
{
if (!IsKnownType())
throw std::out_of_range(strprintf("CInv::GetCommand(): type=%d unknown type", type));
return ppszTypeName[type];
}
std::string CInv::ToString() const
{
return strprintf("%s %s", GetCommand(), hash.ToString());
}
const std::vector<std::string> &getAllNetMessageTypes()
{
return allNetMessageTypesVec;
}
diff --git a/src/protocol.h b/src/protocol.h
index c8b8d20ead..5504f213f4 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -1,321 +1,314 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef __cplusplus
#error This header can only be compiled as C++.
#endif
#ifndef BITCOIN_PROTOCOL_H
#define BITCOIN_PROTOCOL_H
#include "netbase.h"
#include "serialize.h"
#include "uint256.h"
#include "version.h"
#include <stdint.h>
#include <string>
#define MESSAGE_START_SIZE 4
/** Message header.
* (4) message start.
* (12) command.
* (4) size.
* (4) checksum.
*/
class CMessageHeader
{
public:
typedef unsigned char MessageStartChars[MESSAGE_START_SIZE];
CMessageHeader(const MessageStartChars& pchMessageStartIn);
CMessageHeader(const MessageStartChars& pchMessageStartIn, const char* pszCommand, unsigned int nMessageSizeIn);
std::string GetCommand() const;
bool IsValid(const MessageStartChars& messageStart) const;
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
{
READWRITE(FLATDATA(pchMessageStart));
READWRITE(FLATDATA(pchCommand));
READWRITE(nMessageSize);
READWRITE(nChecksum);
}
// TODO: make private (improves encapsulation)
public:
enum {
COMMAND_SIZE = 12,
MESSAGE_SIZE_SIZE = sizeof(int),
CHECKSUM_SIZE = sizeof(int),
MESSAGE_SIZE_OFFSET = MESSAGE_START_SIZE + COMMAND_SIZE,
CHECKSUM_OFFSET = MESSAGE_SIZE_OFFSET + MESSAGE_SIZE_SIZE,
HEADER_SIZE = MESSAGE_START_SIZE + COMMAND_SIZE + MESSAGE_SIZE_SIZE + CHECKSUM_SIZE
};
char pchMessageStart[MESSAGE_START_SIZE];
char pchCommand[COMMAND_SIZE];
unsigned int nMessageSize;
unsigned int nChecksum;
};
/**
* Bitcoin protocol message types. When adding new message types, don't forget
* to update allNetMessageTypes in protocol.cpp.
*/
namespace NetMsgType {
/**
* The version message provides information about the transmitting node to the
* receiving node at the beginning of a connection.
* @see https://bitcoin.org/en/developer-reference#version
*/
extern const char *VERSION;
/**
* The verack message acknowledges a previously-received version message,
* informing the connecting node that it can begin to send other messages.
* @see https://bitcoin.org/en/developer-reference#verack
*/
extern const char *VERACK;
/**
* The addr (IP address) message relays connection information for peers on the
* network.
* @see https://bitcoin.org/en/developer-reference#addr
*/
extern const char *ADDR;
/**
* The inv message (inventory message) transmits one or more inventories of
* objects known to the transmitting peer.
* @see https://bitcoin.org/en/developer-reference#inv
*/
extern const char *INV;
/**
* The getdata message requests one or more data objects from another node.
* @see https://bitcoin.org/en/developer-reference#getdata
*/
extern const char *GETDATA;
/**
* The merkleblock message is a reply to a getdata message which requested a
* block using the inventory type MSG_MERKLEBLOCK.
* @since protocol version 70001 as described by BIP37.
* @see https://bitcoin.org/en/developer-reference#merkleblock
*/
extern const char *MERKLEBLOCK;
/**
* The getblocks message requests an inv message that provides block header
* hashes starting from a particular point in the block chain.
* @see https://bitcoin.org/en/developer-reference#getblocks
*/
extern const char *GETBLOCKS;
/**
* The getheaders message requests a headers message that provides block
* headers starting from a particular point in the block chain.
* @since protocol version 31800.
* @see https://bitcoin.org/en/developer-reference#getheaders
*/
extern const char *GETHEADERS;
/**
* The tx message transmits a single transaction.
* @see https://bitcoin.org/en/developer-reference#tx
*/
extern const char *TX;
/**
* The headers message sends one or more block headers to a node which
* previously requested certain headers with a getheaders message.
* @since protocol version 31800.
* @see https://bitcoin.org/en/developer-reference#headers
*/
extern const char *HEADERS;
/**
* The block message transmits a single serialized block.
* @see https://bitcoin.org/en/developer-reference#block
*/
extern const char *BLOCK;
/**
* The getaddr message requests an addr message from the receiving node,
* preferably one with lots of IP addresses of other receiving nodes.
* @see https://bitcoin.org/en/developer-reference#getaddr
*/
extern const char *GETADDR;
/**
* The mempool message requests the TXIDs of transactions that the receiving
* node has verified as valid but which have not yet appeared in a block.
* @since protocol version 60002.
* @see https://bitcoin.org/en/developer-reference#mempool
*/
extern const char *MEMPOOL;
/**
* The ping message is sent periodically to help confirm that the receiving
* peer is still connected.
* @see https://bitcoin.org/en/developer-reference#ping
*/
extern const char *PING;
/**
* The pong message replies to a ping message, proving to the pinging node that
* the ponging node is still alive.
* @since protocol version 60001 as described by BIP31.
* @see https://bitcoin.org/en/developer-reference#pong
*/
extern const char *PONG;
-/**
- * The alert message warns nodes of problems that may affect them or the rest
- * of the network.
- * @since protocol version 311.
- * @see https://bitcoin.org/en/developer-reference#alert
- */
-extern const char *ALERT;
/**
* The notfound message is a reply to a getdata message which requested an
* object the receiving node does not have available for relay.
* @ince protocol version 70001.
* @see https://bitcoin.org/en/developer-reference#notfound
*/
extern const char *NOTFOUND;
/**
* The filterload message tells the receiving peer to filter all relayed
* transactions and requested merkle blocks through the provided filter.
* @since protocol version 70001 as described by BIP37.
* Only available with service bit NODE_BLOOM since protocol version
* 70011 as described by BIP111.
* @see https://bitcoin.org/en/developer-reference#filterload
*/
extern const char *FILTERLOAD;
/**
* The filteradd message tells the receiving peer to add a single element to a
* previously-set bloom filter, such as a new public key.
* @since protocol version 70001 as described by BIP37.
* Only available with service bit NODE_BLOOM since protocol version
* 70011 as described by BIP111.
* @see https://bitcoin.org/en/developer-reference#filteradd
*/
extern const char *FILTERADD;
/**
* The filterclear message tells the receiving peer to remove a previously-set
* bloom filter.
* @since protocol version 70001 as described by BIP37.
* Only available with service bit NODE_BLOOM since protocol version
* 70011 as described by BIP111.
* @see https://bitcoin.org/en/developer-reference#filterclear
*/
extern const char *FILTERCLEAR;
/**
* The reject message informs the receiving node that one of its previous
* messages has been rejected.
* @since protocol version 70002 as described by BIP61.
* @see https://bitcoin.org/en/developer-reference#reject
*/
extern const char *REJECT;
/**
* Indicates that a node prefers to receive new block announcements via a
* "headers" message rather than an "inv".
* @since protocol version 70012 as described by BIP130.
* @see https://bitcoin.org/en/developer-reference#sendheaders
*/
extern const char *SENDHEADERS;
};
/* Get a vector of all valid message types (see above) */
const std::vector<std::string> &getAllNetMessageTypes();
/** nServices flags */
enum {
// NODE_NETWORK means that the node is capable of serving the block chain. It is currently
// set by all Bitcoin Core nodes, and is unset by SPV clients or other peers that just want
// network services but don't provide them.
NODE_NETWORK = (1 << 0),
// NODE_GETUTXO means the node is capable of responding to the getutxo protocol request.
// Bitcoin Core does not support this but a patch set called Bitcoin XT does.
// See BIP 64 for details on how this is implemented.
NODE_GETUTXO = (1 << 1),
// NODE_BLOOM means the node is capable and willing to handle bloom-filtered connections.
// Bitcoin Core nodes used to support this by default, without advertising this bit,
// but no longer do as of protocol version 70011 (= NO_BLOOM_VERSION)
NODE_BLOOM = (1 << 2),
// Bits 24-31 are reserved for temporary experiments. Just pick a bit that
// isn't getting used, or one not being used much, and notify the
// bitcoin-development mailing list. Remember that service bits are just
// unauthenticated advertisements, so your code must be robust against
// collisions and other cases where nodes may be advertising a service they
// do not actually support. Other service bits should be allocated via the
// BIP process.
};
/** A CService with information about it as peer */
class CAddress : public CService
{
public:
CAddress();
explicit CAddress(CService ipIn, uint64_t nServicesIn = NODE_NETWORK);
void Init();
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
{
if (ser_action.ForRead())
Init();
if (nType & SER_DISK)
READWRITE(nVersion);
if ((nType & SER_DISK) ||
(nVersion >= CADDR_TIME_VERSION && !(nType & SER_GETHASH)))
READWRITE(nTime);
READWRITE(nServices);
READWRITE(*(CService*)this);
}
// TODO: make private (improves encapsulation)
public:
uint64_t nServices;
// disk and network only
unsigned int nTime;
};
/** inv message data */
class CInv
{
public:
CInv();
CInv(int typeIn, const uint256& hashIn);
CInv(const std::string& strType, const uint256& hashIn);
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action, int nType, int nVersion)
{
READWRITE(type);
READWRITE(hash);
}
friend bool operator<(const CInv& a, const CInv& b);
bool IsKnownType() const;
const char* GetCommand() const;
std::string ToString() const;
// TODO: make private (improves encapsulation)
public:
int type;
uint256 hash;
};
enum {
MSG_TX = 1,
MSG_BLOCK,
// Nodes may always request a MSG_FILTERED_BLOCK in a getdata, however,
// MSG_FILTERED_BLOCK should not appear in any invs except as a part of getdata.
MSG_FILTERED_BLOCK,
};
#endif // BITCOIN_PROTOCOL_H
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index fb502b3c81..d3edfedff2 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -1,282 +1,267 @@
// Copyright (c) 2011-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "clientmodel.h"
#include "bantablemodel.h"
#include "guiconstants.h"
#include "peertablemodel.h"
-#include "alert.h"
#include "chainparams.h"
#include "checkpoints.h"
#include "clientversion.h"
#include "net.h"
#include "txmempool.h"
#include "ui_interface.h"
#include "util.h"
#include <stdint.h>
#include <QDebug>
#include <QTimer>
class CBlockIndex;
static const int64_t nClientStartupTime = GetTime();
static int64_t nLastBlockTipUpdateNotification = 0;
ClientModel::ClientModel(OptionsModel *optionsModel, QObject *parent) :
QObject(parent),
optionsModel(optionsModel),
peerTableModel(0),
banTableModel(0),
pollTimer(0)
{
peerTableModel = new PeerTableModel(this);
banTableModel = new BanTableModel(this);
pollTimer = new QTimer(this);
connect(pollTimer, SIGNAL(timeout()), this, SLOT(updateTimer()));
pollTimer->start(MODEL_UPDATE_DELAY);
subscribeToCoreSignals();
}
ClientModel::~ClientModel()
{
unsubscribeFromCoreSignals();
}
int ClientModel::getNumConnections(unsigned int flags) const
{
LOCK(cs_vNodes);
if (flags == CONNECTIONS_ALL) // Shortcut if we want total
return vNodes.size();
int nNum = 0;
BOOST_FOREACH(const CNode* pnode, vNodes)
if (flags & (pnode->fInbound ? CONNECTIONS_IN : CONNECTIONS_OUT))
nNum++;
return nNum;
}
int ClientModel::getNumBlocks() const
{
LOCK(cs_main);
return chainActive.Height();
}
quint64 ClientModel::getTotalBytesRecv() const
{
return CNode::GetTotalBytesRecv();
}
quint64 ClientModel::getTotalBytesSent() const
{
return CNode::GetTotalBytesSent();
}
QDateTime ClientModel::getLastBlockDate() const
{
LOCK(cs_main);
if (chainActive.Tip())
return QDateTime::fromTime_t(chainActive.Tip()->GetBlockTime());
return QDateTime::fromTime_t(Params().GenesisBlock().GetBlockTime()); // Genesis block's time of current network
}
long ClientModel::getMempoolSize() const
{
return mempool.size();
}
size_t ClientModel::getMempoolDynamicUsage() const
{
return mempool.DynamicMemoryUsage();
}
double ClientModel::getVerificationProgress(const CBlockIndex *tipIn) const
{
CBlockIndex *tip = const_cast<CBlockIndex *>(tipIn);
if (!tip)
{
LOCK(cs_main);
tip = chainActive.Tip();
}
return Checkpoints::GuessVerificationProgress(Params().Checkpoints(), tip);
}
void ClientModel::updateTimer()
{
// no locking required at this point
// the following calls will acquire the required lock
Q_EMIT mempoolSizeChanged(getMempoolSize(), getMempoolDynamicUsage());
Q_EMIT bytesChanged(getTotalBytesRecv(), getTotalBytesSent());
}
void ClientModel::updateNumConnections(int numConnections)
{
Q_EMIT numConnectionsChanged(numConnections);
}
-void ClientModel::updateAlert(const QString &hash, int status)
+void ClientModel::updateAlert()
{
- // Show error message notification for new alert
- if(status == CT_NEW)
- {
- uint256 hash_256;
- hash_256.SetHex(hash.toStdString());
- CAlert alert = CAlert::getAlertByHash(hash_256);
- if(!alert.IsNull())
- {
- Q_EMIT message(tr("Network Alert"), QString::fromStdString(alert.strStatusBar), CClientUIInterface::ICON_ERROR);
- }
- }
-
Q_EMIT alertsChanged(getStatusBarWarnings());
}
bool ClientModel::inInitialBlockDownload() const
{
return IsInitialBlockDownload();
}
enum BlockSource ClientModel::getBlockSource() const
{
if (fReindex)
return BLOCK_SOURCE_REINDEX;
else if (fImporting)
return BLOCK_SOURCE_DISK;
else if (getNumConnections() > 0)
return BLOCK_SOURCE_NETWORK;
return BLOCK_SOURCE_NONE;
}
QString ClientModel::getStatusBarWarnings() const
{
return QString::fromStdString(GetWarnings("gui"));
}
OptionsModel *ClientModel::getOptionsModel()
{
return optionsModel;
}
PeerTableModel *ClientModel::getPeerTableModel()
{
return peerTableModel;
}
BanTableModel *ClientModel::getBanTableModel()
{
return banTableModel;
}
QString ClientModel::formatFullVersion() const
{
return QString::fromStdString(FormatFullVersion());
}
QString ClientModel::formatSubVersion() const
{
return QString::fromStdString(strSubVersion);
}
QString ClientModel::formatBuildDate() const
{
return QString::fromStdString(CLIENT_DATE);
}
bool ClientModel::isReleaseVersion() const
{
return CLIENT_VERSION_IS_RELEASE;
}
QString ClientModel::clientName() const
{
return QString::fromStdString(CLIENT_NAME);
}
QString ClientModel::formatClientStartupTime() const
{
return QDateTime::fromTime_t(nClientStartupTime).toString();
}
void ClientModel::updateBanlist()
{
banTableModel->refresh();
}
// Handlers for core signals
static void ShowProgress(ClientModel *clientmodel, const std::string &title, int nProgress)
{
// emits signal "showProgress"
QMetaObject::invokeMethod(clientmodel, "showProgress", Qt::QueuedConnection,
Q_ARG(QString, QString::fromStdString(title)),
Q_ARG(int, nProgress));
}
static void NotifyNumConnectionsChanged(ClientModel *clientmodel, int newNumConnections)
{
// Too noisy: qDebug() << "NotifyNumConnectionsChanged: " + QString::number(newNumConnections);
QMetaObject::invokeMethod(clientmodel, "updateNumConnections", Qt::QueuedConnection,
Q_ARG(int, newNumConnections));
}
-static void NotifyAlertChanged(ClientModel *clientmodel, const uint256 &hash, ChangeType status)
+static void NotifyAlertChanged(ClientModel *clientmodel)
{
- qDebug() << "NotifyAlertChanged: " + QString::fromStdString(hash.GetHex()) + " status=" + QString::number(status);
- QMetaObject::invokeMethod(clientmodel, "updateAlert", Qt::QueuedConnection,
- Q_ARG(QString, QString::fromStdString(hash.GetHex())),
- Q_ARG(int, status));
+ qDebug() << "NotifyAlertChanged";
+ QMetaObject::invokeMethod(clientmodel, "updateAlert", Qt::QueuedConnection);
}
static void BannedListChanged(ClientModel *clientmodel)
{
qDebug() << QString("%1: Requesting update for peer banlist").arg(__func__);
QMetaObject::invokeMethod(clientmodel, "updateBanlist", Qt::QueuedConnection);
}
static void BlockTipChanged(ClientModel *clientmodel, bool initialSync, const CBlockIndex *pIndex)
{
// lock free async UI updates in case we have a new block tip
// during initial sync, only update the UI if the last update
// was > 250ms (MODEL_UPDATE_DELAY) ago
int64_t now = 0;
if (initialSync)
now = GetTimeMillis();
// if we are in-sync, update the UI regardless of last update time
if (!initialSync || now - nLastBlockTipUpdateNotification > MODEL_UPDATE_DELAY) {
//pass a async signal to the UI thread
QMetaObject::invokeMethod(clientmodel, "numBlocksChanged", Qt::QueuedConnection,
Q_ARG(int, pIndex->nHeight),
Q_ARG(QDateTime, QDateTime::fromTime_t(pIndex->GetBlockTime())),
Q_ARG(double, clientmodel->getVerificationProgress(pIndex)));
nLastBlockTipUpdateNotification = now;
}
}
void ClientModel::subscribeToCoreSignals()
{
// Connect signals to client
uiInterface.ShowProgress.connect(boost::bind(ShowProgress, this, _1, _2));
uiInterface.NotifyNumConnectionsChanged.connect(boost::bind(NotifyNumConnectionsChanged, this, _1));
- uiInterface.NotifyAlertChanged.connect(boost::bind(NotifyAlertChanged, this, _1, _2));
+ uiInterface.NotifyAlertChanged.connect(boost::bind(NotifyAlertChanged, this));
uiInterface.BannedListChanged.connect(boost::bind(BannedListChanged, this));
uiInterface.NotifyBlockTip.connect(boost::bind(BlockTipChanged, this, _1, _2));
}
void ClientModel::unsubscribeFromCoreSignals()
{
// Disconnect signals from client
uiInterface.ShowProgress.disconnect(boost::bind(ShowProgress, this, _1, _2));
uiInterface.NotifyNumConnectionsChanged.disconnect(boost::bind(NotifyNumConnectionsChanged, this, _1));
- uiInterface.NotifyAlertChanged.disconnect(boost::bind(NotifyAlertChanged, this, _1, _2));
+ uiInterface.NotifyAlertChanged.disconnect(boost::bind(NotifyAlertChanged, this));
uiInterface.BannedListChanged.disconnect(boost::bind(BannedListChanged, this));
uiInterface.NotifyBlockTip.disconnect(boost::bind(BlockTipChanged, this, _1, _2));
}
diff --git a/src/qt/clientmodel.h b/src/qt/clientmodel.h
index 62c9f71ac7..2fef6131c3 100644
--- a/src/qt/clientmodel.h
+++ b/src/qt/clientmodel.h
@@ -1,110 +1,110 @@
// Copyright (c) 2011-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_QT_CLIENTMODEL_H
#define BITCOIN_QT_CLIENTMODEL_H
#include <QObject>
#include <QDateTime>
class AddressTableModel;
class BanTableModel;
class OptionsModel;
class PeerTableModel;
class TransactionTableModel;
class CWallet;
class CBlockIndex;
QT_BEGIN_NAMESPACE
class QTimer;
QT_END_NAMESPACE
enum BlockSource {
BLOCK_SOURCE_NONE,
BLOCK_SOURCE_REINDEX,
BLOCK_SOURCE_DISK,
BLOCK_SOURCE_NETWORK
};
enum NumConnections {
CONNECTIONS_NONE = 0,
CONNECTIONS_IN = (1U << 0),
CONNECTIONS_OUT = (1U << 1),
CONNECTIONS_ALL = (CONNECTIONS_IN | CONNECTIONS_OUT),
};
/** Model for Bitcoin network client. */
class ClientModel : public QObject
{
Q_OBJECT
public:
explicit ClientModel(OptionsModel *optionsModel, QObject *parent = 0);
~ClientModel();
OptionsModel *getOptionsModel();
PeerTableModel *getPeerTableModel();
BanTableModel *getBanTableModel();
//! Return number of connections, default is in- and outbound (total)
int getNumConnections(unsigned int flags = CONNECTIONS_ALL) const;
int getNumBlocks() const;
//! Return number of transactions in the mempool
long getMempoolSize() const;
//! Return the dynamic memory usage of the mempool
size_t getMempoolDynamicUsage() const;
quint64 getTotalBytesRecv() const;
quint64 getTotalBytesSent() const;
double getVerificationProgress(const CBlockIndex *tip) const;
QDateTime getLastBlockDate() const;
//! Return true if core is doing initial block download
bool inInitialBlockDownload() const;
//! Return true if core is importing blocks
enum BlockSource getBlockSource() const;
//! Return warnings to be displayed in status bar
QString getStatusBarWarnings() const;
QString formatFullVersion() const;
QString formatSubVersion() const;
QString formatBuildDate() const;
bool isReleaseVersion() const;
QString clientName() const;
QString formatClientStartupTime() const;
private:
OptionsModel *optionsModel;
PeerTableModel *peerTableModel;
BanTableModel *banTableModel;
QTimer *pollTimer;
void subscribeToCoreSignals();
void unsubscribeFromCoreSignals();
Q_SIGNALS:
void numConnectionsChanged(int count);
void numBlocksChanged(int count, const QDateTime& blockDate, double nVerificationProgress);
void mempoolSizeChanged(long count, size_t mempoolSizeInBytes);
void alertsChanged(const QString &warnings);
void bytesChanged(quint64 totalBytesIn, quint64 totalBytesOut);
//! Fired when a message should be reported to the user
void message(const QString &title, const QString &message, unsigned int style);
// Show progress dialog e.g. for verifychain
void showProgress(const QString &title, int nProgress);
public Q_SLOTS:
void updateTimer();
void updateNumConnections(int numConnections);
- void updateAlert(const QString &hash, int status);
+ void updateAlert();
void updateBanlist();
};
#endif // BITCOIN_QT_CLIENTMODEL_H
diff --git a/src/test/alert_tests.cpp b/src/test/alert_tests.cpp
index 87d35be412..70f1f12273 100644
--- a/src/test/alert_tests.cpp
+++ b/src/test/alert_tests.cpp
@@ -1,256 +1,79 @@
// Copyright (c) 2013-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
// Unit tests for alert system
-#include "alert.h"
-#include "chain.h"
#include "chainparams.h"
-#include "clientversion.h"
-#include "data/alertTests.raw.h"
#include "main.h" // For PartitionCheck
-#include "serialize.h"
-#include "streams.h"
-#include "utilstrencodings.h"
#include "test/testutil.h"
#include "test/test_bitcoin.h"
-#include <fstream>
-
-#include <boost/filesystem/operations.hpp>
-#include <boost/foreach.hpp>
#include <boost/test/unit_test.hpp>
-#if 0
-//
-// alertTests contains 7 alerts, generated with this code:
-// (SignAndSave code not shown, alert signing key is secret)
-//
-{
- CAlert alert;
- alert.nRelayUntil = 60;
- alert.nExpiration = 24 * 60 * 60;
- alert.nID = 1;
- alert.nCancel = 0; // cancels previous messages up to this ID number
- alert.nMinVer = 0; // These versions are protocol versions
- alert.nMaxVer = 999001;
- alert.nPriority = 1;
- alert.strComment = "Alert comment";
- alert.strStatusBar = "Alert 1";
-
- SignAndSave(alert, "test/alertTests");
-
- alert.setSubVer.insert(std::string("/Satoshi:0.1.0/"));
- alert.strStatusBar = "Alert 1 for Satoshi 0.1.0";
- SignAndSave(alert, "test/alertTests");
-
- alert.setSubVer.insert(std::string("/Satoshi:0.2.0/"));
- alert.strStatusBar = "Alert 1 for Satoshi 0.1.0, 0.2.0";
- SignAndSave(alert, "test/alertTests");
-
- alert.setSubVer.clear();
- ++alert.nID;
- alert.nCancel = 1;
- alert.nPriority = 100;
- alert.strStatusBar = "Alert 2, cancels 1";
- SignAndSave(alert, "test/alertTests");
-
- alert.nExpiration += 60;
- ++alert.nID;
- SignAndSave(alert, "test/alertTests");
-
- ++alert.nID;
- alert.nMinVer = 11;
- alert.nMaxVer = 22;
- SignAndSave(alert, "test/alertTests");
-
- ++alert.nID;
- alert.strStatusBar = "Alert 2 for Satoshi 0.1.0";
- alert.setSubVer.insert(std::string("/Satoshi:0.1.0/"));
- SignAndSave(alert, "test/alertTests");
-
- ++alert.nID;
- alert.nMinVer = 0;
- alert.nMaxVer = 999999;
- alert.strStatusBar = "Evil Alert'; /bin/ls; echo '";
- alert.setSubVer.clear();
- SignAndSave(alert, "test/alertTests");
-}
-#endif
-
-struct ReadAlerts : public TestingSetup
-{
- ReadAlerts()
- {
- std::vector<unsigned char> vch(alert_tests::alertTests, alert_tests::alertTests + sizeof(alert_tests::alertTests));
- CDataStream stream(vch, SER_DISK, CLIENT_VERSION);
- try {
- while (!stream.eof())
- {
- CAlert alert;
- stream >> alert;
- alerts.push_back(alert);
- }
- }
- catch (const std::exception&) { }
- }
- ~ReadAlerts() { }
-
- static std::vector<std::string> read_lines(boost::filesystem::path filepath)
- {
- std::vector<std::string> result;
-
- std::ifstream f(filepath.string().c_str());
- std::string line;
- while (std::getline(f,line))
- result.push_back(line);
-
- return result;
- }
-
- std::vector<CAlert> alerts;
-};
-
-BOOST_FIXTURE_TEST_SUITE(Alert_tests, ReadAlerts)
-
-
-BOOST_AUTO_TEST_CASE(AlertApplies)
-{
- SetMockTime(11);
- const std::vector<unsigned char>& alertKey = Params(CBaseChainParams::MAIN).AlertKey();
+BOOST_FIXTURE_TEST_SUITE(Alert_tests, TestingSetup)
- BOOST_FOREACH(const CAlert& alert, alerts)
- {
- BOOST_CHECK(alert.CheckSignature(alertKey));
- }
-
- BOOST_CHECK(alerts.size() >= 3);
-
- // Matches:
- BOOST_CHECK(alerts[0].AppliesTo(1, ""));
- BOOST_CHECK(alerts[0].AppliesTo(999001, ""));
- BOOST_CHECK(alerts[0].AppliesTo(1, "/Satoshi:11.11.11/"));
-
- BOOST_CHECK(alerts[1].AppliesTo(1, "/Satoshi:0.1.0/"));
- BOOST_CHECK(alerts[1].AppliesTo(999001, "/Satoshi:0.1.0/"));
-
- BOOST_CHECK(alerts[2].AppliesTo(1, "/Satoshi:0.1.0/"));
- BOOST_CHECK(alerts[2].AppliesTo(1, "/Satoshi:0.2.0/"));
-
- // Don't match:
- BOOST_CHECK(!alerts[0].AppliesTo(-1, ""));
- BOOST_CHECK(!alerts[0].AppliesTo(999002, ""));
-
- BOOST_CHECK(!alerts[1].AppliesTo(1, ""));
- BOOST_CHECK(!alerts[1].AppliesTo(1, "Satoshi:0.1.0"));
- BOOST_CHECK(!alerts[1].AppliesTo(1, "/Satoshi:0.1.0"));
- BOOST_CHECK(!alerts[1].AppliesTo(1, "Satoshi:0.1.0/"));
- BOOST_CHECK(!alerts[1].AppliesTo(-1, "/Satoshi:0.1.0/"));
- BOOST_CHECK(!alerts[1].AppliesTo(999002, "/Satoshi:0.1.0/"));
- BOOST_CHECK(!alerts[1].AppliesTo(1, "/Satoshi:0.2.0/"));
-
- BOOST_CHECK(!alerts[2].AppliesTo(1, "/Satoshi:0.3.0/"));
-
- SetMockTime(0);
-}
-
-
-BOOST_AUTO_TEST_CASE(AlertNotify)
-{
- SetMockTime(11);
- const std::vector<unsigned char>& alertKey = Params(CBaseChainParams::MAIN).AlertKey();
-
- boost::filesystem::path temp = GetTempPath() /
- boost::filesystem::unique_path("alertnotify-%%%%.txt");
-
- mapArgs["-alertnotify"] = std::string("echo %s >> ") + temp.string();
-
- BOOST_FOREACH(CAlert alert, alerts)
- alert.ProcessAlert(alertKey, false);
-
- std::vector<std::string> r = read_lines(temp);
- BOOST_CHECK_EQUAL(r.size(), 4u);
-
-// Windows built-in echo semantics are different than posixy shells. Quotes and
-// whitespace are printed literally.
-
-#ifndef WIN32
- BOOST_CHECK_EQUAL(r[0], "Alert 1");
- BOOST_CHECK_EQUAL(r[1], "Alert 2, cancels 1");
- BOOST_CHECK_EQUAL(r[2], "Alert 2, cancels 1");
- BOOST_CHECK_EQUAL(r[3], "Evil Alert; /bin/ls; echo "); // single-quotes should be removed
-#else
- BOOST_CHECK_EQUAL(r[0], "'Alert 1' ");
- BOOST_CHECK_EQUAL(r[1], "'Alert 2, cancels 1' ");
- BOOST_CHECK_EQUAL(r[2], "'Alert 2, cancels 1' ");
- BOOST_CHECK_EQUAL(r[3], "'Evil Alert; /bin/ls; echo ' ");
-#endif
- boost::filesystem::remove(temp);
-
- SetMockTime(0);
-}
static bool falseFunc() { return false; }
BOOST_AUTO_TEST_CASE(PartitionAlert)
{
// Test PartitionCheck
CCriticalSection csDummy;
CBlockIndex indexDummy[100];
CChainParams& params = Params(CBaseChainParams::MAIN);
int64_t nPowTargetSpacing = params.GetConsensus().nPowTargetSpacing;
// Generate fake blockchain timestamps relative to
// an arbitrary time:
int64_t now = 1427379054;
SetMockTime(now);
for (int i = 0; i < 100; i++)
{
indexDummy[i].phashBlock = NULL;
if (i == 0) indexDummy[i].pprev = NULL;
else indexDummy[i].pprev = &indexDummy[i-1];
indexDummy[i].nHeight = i;
indexDummy[i].nTime = now - (100-i)*nPowTargetSpacing;
// Other members don't matter, the partition check code doesn't
// use them
}
strMiscWarning = "";
// Test 1: chain with blocks every nPowTargetSpacing seconds,
// as normal, no worries:
PartitionCheck(falseFunc, csDummy, &indexDummy[99], nPowTargetSpacing);
BOOST_CHECK_MESSAGE(strMiscWarning.empty(), strMiscWarning);
// Test 2: go 3.5 hours without a block, expect a warning:
now += 3*60*60+30*60;
SetMockTime(now);
PartitionCheck(falseFunc, csDummy, &indexDummy[99], nPowTargetSpacing);
BOOST_CHECK(!strMiscWarning.empty());
BOOST_TEST_MESSAGE(std::string("Got alert text: ")+strMiscWarning);
strMiscWarning = "";
// Test 3: test the "partition alerts only go off once per day"
// code:
now += 60*10;
SetMockTime(now);
PartitionCheck(falseFunc, csDummy, &indexDummy[99], nPowTargetSpacing);
BOOST_CHECK(strMiscWarning.empty());
// Test 4: get 2.5 times as many blocks as expected:
now += 60*60*24; // Pretend it is a day later
SetMockTime(now);
int64_t quickSpacing = nPowTargetSpacing*2/5;
for (int i = 0; i < 100; i++) // Tweak chain timestamps:
indexDummy[i].nTime = now - (100-i)*quickSpacing;
PartitionCheck(falseFunc, csDummy, &indexDummy[99], nPowTargetSpacing);
BOOST_CHECK(!strMiscWarning.empty());
BOOST_TEST_MESSAGE(std::string("Got alert text: ")+strMiscWarning);
strMiscWarning = "";
SetMockTime(0);
}
-BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/test/data/alertTests.raw b/src/test/data/alertTests.raw
deleted file mode 100644
index 01f50680b9..0000000000
Binary files a/src/test/data/alertTests.raw and /dev/null differ
diff --git a/src/ui_interface.h b/src/ui_interface.h
index 967d243270..0b51d52e65 100644
--- a/src/ui_interface.h
+++ b/src/ui_interface.h
@@ -1,106 +1,105 @@
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2012-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_UI_INTERFACE_H
#define BITCOIN_UI_INTERFACE_H
#include <stdint.h>
#include <string>
#include <boost/signals2/last_value.hpp>
#include <boost/signals2/signal.hpp>
class CBasicKeyStore;
class CWallet;
class uint256;
class CBlockIndex;
/** General change type (added, updated, removed). */
enum ChangeType
{
CT_NEW,
CT_UPDATED,
CT_DELETED
};
/** Signals for UI communication. */
class CClientUIInterface
{
public:
/** Flags for CClientUIInterface::ThreadSafeMessageBox */
enum MessageBoxFlags
{
ICON_INFORMATION = 0,
ICON_WARNING = (1U << 0),
ICON_ERROR = (1U << 1),
/**
* Mask of all available icons in CClientUIInterface::MessageBoxFlags
* This needs to be updated, when icons are changed there!
*/
ICON_MASK = (ICON_INFORMATION | ICON_WARNING | ICON_ERROR),
/** These values are taken from qmessagebox.h "enum StandardButton" to be directly usable */
BTN_OK = 0x00000400U, // QMessageBox::Ok
BTN_YES = 0x00004000U, // QMessageBox::Yes
BTN_NO = 0x00010000U, // QMessageBox::No
BTN_ABORT = 0x00040000U, // QMessageBox::Abort
BTN_RETRY = 0x00080000U, // QMessageBox::Retry
BTN_IGNORE = 0x00100000U, // QMessageBox::Ignore
BTN_CLOSE = 0x00200000U, // QMessageBox::Close
BTN_CANCEL = 0x00400000U, // QMessageBox::Cancel
BTN_DISCARD = 0x00800000U, // QMessageBox::Discard
BTN_HELP = 0x01000000U, // QMessageBox::Help
BTN_APPLY = 0x02000000U, // QMessageBox::Apply
BTN_RESET = 0x04000000U, // QMessageBox::Reset
/**
* Mask of all available buttons in CClientUIInterface::MessageBoxFlags
* This needs to be updated, when buttons are changed there!
*/
BTN_MASK = (BTN_OK | BTN_YES | BTN_NO | BTN_ABORT | BTN_RETRY | BTN_IGNORE |
BTN_CLOSE | BTN_CANCEL | BTN_DISCARD | BTN_HELP | BTN_APPLY | BTN_RESET),
/** Force blocking, modal message box dialog (not just OS notification) */
MODAL = 0x10000000U,
/** Do not print contents of message to debug log */
SECURE = 0x40000000U,
/** Predefined combinations for certain default usage cases */
MSG_INFORMATION = ICON_INFORMATION,
MSG_WARNING = (ICON_WARNING | BTN_OK | MODAL),
MSG_ERROR = (ICON_ERROR | BTN_OK | MODAL)
};
/** Show message box. */
boost::signals2::signal<bool (const std::string& message, const std::string& caption, unsigned int style), boost::signals2::last_value<bool> > ThreadSafeMessageBox;
/** Progress message during initialization. */
boost::signals2::signal<void (const std::string &message)> InitMessage;
/** Number of network connections changed. */
boost::signals2::signal<void (int newNumConnections)> NotifyNumConnectionsChanged;
/**
- * New, updated or cancelled alert.
- * @note called with lock cs_mapAlerts held.
+ * Status bar alerts changed.
*/
- boost::signals2::signal<void (const uint256 &hash, ChangeType status)> NotifyAlertChanged;
+ boost::signals2::signal<void ()> NotifyAlertChanged;
/** A wallet has been loaded. */
boost::signals2::signal<void (CWallet* wallet)> LoadWallet;
/** Show progress e.g. for verifychain */
boost::signals2::signal<void (const std::string &title, int nProgress)> ShowProgress;
/** New block has been accepted */
boost::signals2::signal<void (bool, const CBlockIndex *)> NotifyBlockTip;
/** Banlist did change. */
boost::signals2::signal<void (void)> BannedListChanged;
};
extern CClientUIInterface uiInterface;
#endif // BITCOIN_UI_INTERFACE_H

File Metadata

Mime Type
text/x-diff
Expires
Sun, Mar 2, 12:09 (1 d, 4 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5187786
Default Alt Text
(447 KB)

Event Timeline