diff --git a/qa/rpc-tests/abc-cmdline.py b/qa/rpc-tests/abc-cmdline.py index 9cc6a9fb6..8c1d4ba55 100755 --- a/qa/rpc-tests/abc-cmdline.py +++ b/qa/rpc-tests/abc-cmdline.py @@ -1,103 +1,138 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Exercise the command line functions specific to ABC functionality. Currently: -excessiveblocksize= """ import re from test_framework.test_framework import BitcoinTestFramework from test_framework.util import (start_node, stop_node, assert_equal) from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE from test_framework.outputchecker import OutputChecker +MAX_GENERATED_BLOCK_SIZE_ERROR = ( + 'Max generated block size (blockmaxsize) cannot be lower than ' + '1MB or exceed the excessive block size (excessiveblocksize)') class ABC_CmdLine_Test (BitcoinTestFramework): def __init__(self): super(ABC_CmdLine_Test, self).__init__() self.num_nodes = 1 self.setup_clean_chain = False def setup_network(self): self.nodes = self.setup_nodes() def check_excessive(self, expected_value): 'Check that the excessiveBlockSize is as expected' getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, expected_value) def check_subversion(self, pattern_str): 'Check that the subversion is set as expected' netinfo = self.nodes[0].getnetworkinfo() subversion = netinfo['subversion'] pattern = re.compile(pattern_str) assert(pattern.match(subversion)) def excessiveblocksize_test(self): print("Testing -excessiveblocksize") print(" Set to twice the default, i.e. %d bytes" % (2 * LEGACY_MAX_BLOCK_SIZE)) stop_node(self.nodes[0], 0) self.extra_args = [["-excessiveblocksize=%d" % (2 * LEGACY_MAX_BLOCK_SIZE)]] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0]) self.check_excessive(2 * LEGACY_MAX_BLOCK_SIZE) # Check for EB correctness in the subver string self.check_subversion("/Bitcoin ABC:.*\(EB2\.0\)/") print(" Attempt to set below legacy limit of 1MB - try %d bytes" % LEGACY_MAX_BLOCK_SIZE) outputchecker = OutputChecker() stop_node(self.nodes[0], 0) try: self.extra_args = [["-excessiveblocksize=%d" % LEGACY_MAX_BLOCK_SIZE]] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0], stderr_checker=outputchecker) except Exception as e: assert(outputchecker.contains( 'Error: Excessive block size must be > 1,000,000 bytes (1MB)')) assert_equal('bitcoind exited with status 1 during initialization', str(e)) else: raise AssertionError("Must not accept excessiveblocksize" " value < %d bytes" % LEGACY_MAX_BLOCK_SIZE) print(" Attempt to set below blockmaxsize (mining limit)") outputchecker = OutputChecker() try: self.extra_args = [['-blockmaxsize=1500000', '-excessiveblocksize=1300000']] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0], stderr_checker=outputchecker) except Exception as e: - assert(outputchecker.contains( - 'Error: Max generated block size (blockmaxsize) cannot be ' - 'lower than 1MB or exceed the excessive block size (excessiveblocksize)')) + assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) assert_equal('bitcoind exited with status 1 during initialization', str(e)) else: raise AssertionError('Must not accept excessiveblocksize' ' below blockmaxsize') + # Make sure that allowsmallgeneratedblocksize doesn't help here + outputchecker = OutputChecker() + try: + self.extra_args = [['-blockmaxsize=1500000', + '-excessiveblocksize=1300000', + '-allowsmallgeneratedblocksize']] + self.nodes[0] = start_node(0, self.options.tmpdir, + self.extra_args[0], + stderr_checker=outputchecker) + except Exception as e: + assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) + assert_equal('bitcoind exited with status 1 during initialization', str(e)) + else: + raise AssertionError('Must not accept excessiveblocksize' + ' below blockmaxsize') - def run_test(self): + print(" Attempt to set blockmaxsize below 1MB") + outputchecker = OutputChecker() + try: + self.extra_args = [["-blockmaxsize=%d" % LEGACY_MAX_BLOCK_SIZE]] + self.nodes[0] = start_node(0, self.options.tmpdir, + self.extra_args[0], + stderr_checker=outputchecker) + except Exception as e: + assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) + assert_equal('bitcoind exited with status 1 during initialization', str(e)) + else: + raise AssertionError('Must not accept excessiveblocksize' + ' below blockmaxsize') + outputchecker = OutputChecker() + self.extra_args = [["-blockmaxsize=%d" % LEGACY_MAX_BLOCK_SIZE, + "-allowsmallgeneratedblocksize"]] + self.nodes[0] = start_node(0, self.options.tmpdir, + self.extra_args[0], + stderr_checker=outputchecker) + assert(outputchecker.contains('Warning: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) + + + def run_test(self): # Run tests on -excessiveblocksize option self.excessiveblocksize_test() - # Start up default because test framework expects running node - # at end of test. - self.nodes[0] = start_node(0, self.options.tmpdir) if __name__ == '__main__': ABC_CmdLine_Test().main() diff --git a/qa/rpc-tests/maxuploadtarget.py b/qa/rpc-tests/maxuploadtarget.py index a7d6b7ac7..a11039dfd 100755 --- a/qa/rpc-tests/maxuploadtarget.py +++ b/qa/rpc-tests/maxuploadtarget.py @@ -1,237 +1,237 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE ''' Test behavior of -maxuploadtarget. * Verify that getdata requests for old blocks (>1week) are dropped if uploadtarget has been reached. * Verify that getdata requests for recent blocks are respecteved even if uploadtarget has been reached. * Verify that the upload counters are reset after 24 hours. ''' # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() self.block_receive_map = {} def add_connection(self, conn): self.connection = conn self.peer_disconnected = False def on_inv(self, conn, message): pass # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message def on_block(self, conn, message): message.block.calc_sha256() try: self.block_receive_map[message.block.sha256] += 1 except KeyError as e: self.block_receive_map[message.block.sha256] = 1 # Spin until verack message is received from the node. # We use this to signal that our test can begin. This # is called from the testing thread, so it needs to acquire # the global lock. def wait_for_verack(self): def veracked(): return self.verack_received return wait_until(veracked, timeout=10) def wait_for_disconnect(self): def disconnected(): return self.peer_disconnected return wait_until(disconnected, timeout=10) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message def on_close(self, conn): self.peer_disconnected = True # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): def received_pong(): return (self.last_pong.nonce == self.ping_counter) self.connection.send_message(msg_ping(nonce=self.ping_counter)) success = wait_until(received_pong, timeout=timeout) self.ping_counter += 1 return success class MaxUploadTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 # Cache for utxos, as the listunspent may take a long time later in the test self.utxo_cache = [] def setup_network(self): # Start a node with maxuploadtarget of 200 MB (/24h) self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"])) + self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200"])) def run_test(self): # Before we connect anything, we first set the time on the node # to be in the past, otherwise things break because the CNode # time counters can't be reset backward after initialization old_time = int(time.time() - 2*60*60*24*7) self.nodes[0].setmocktime(old_time) # Generate some old blocks self.nodes[0].generate(130) # test_nodes[0] will only request old blocks # test_nodes[1] will only request new blocks # test_nodes[2] will test resetting the counters test_nodes = [] connections = [] for i in range(3): test_nodes.append(TestNode()) connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i])) test_nodes[i].add_connection(connections[i]) NetworkThread().start() # Start up network handling in another thread [x.wait_for_verack() for x in test_nodes] # Test logic begins here # Now mine a big block mine_large_block(self.nodes[0], self.utxo_cache) # Store the hash; we'll request this later big_old_block = self.nodes[0].getbestblockhash() old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] big_old_block = int(big_old_block, 16) # Advance to two days ago self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24) # Mine one more block, so that the prior block looks old mine_large_block(self.nodes[0], self.utxo_cache) # We'll be requesting this new block too big_new_block = self.nodes[0].getbestblockhash() big_new_block = int(big_new_block, 16) # test_nodes[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, big_old_block)) max_bytes_per_day = 200*1024*1024 daily_buffer = 144 * LEGACY_MAX_BLOCK_SIZE max_bytes_available = max_bytes_per_day - daily_buffer success_count = max_bytes_available // old_block_size # 144MB will be reserved for relaying new blocks, so expect this to # succeed for ~70 tries. for i in range(success_count): test_nodes[0].send_message(getdata_request) test_nodes[0].sync_with_ping() assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1) assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). for i in range(3): test_nodes[0].send_message(getdata_request) test_nodes[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) print("Peer 0 disconnected after downloading old block too many times") # Requesting the current block on test_nodes[1] should succeed indefinitely, # even when over the max upload target. # We'll try 200 times getdata_request.inv = [CInv(2, big_new_block)] for i in range(200): test_nodes[1].send_message(getdata_request) test_nodes[1].sync_with_ping() assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1) print("Peer 1 able to repeatedly download new block") # But if test_nodes[1] tries for an old block, it gets disconnected too. getdata_request.inv = [CInv(2, big_old_block)] test_nodes[1].send_message(getdata_request) test_nodes[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) print("Peer 1 disconnected after trying to download old block") print("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, # and test_nodes[2] should be able to retrieve the old block. self.nodes[0].setmocktime(int(time.time())) test_nodes[2].sync_with_ping() test_nodes[2].send_message(getdata_request) test_nodes[2].sync_with_ping() assert_equal(test_nodes[2].block_receive_map[big_old_block], 1) print("Peer 2 able to download old block") [c.disconnect_node() for c in connections] #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 print("Restarting nodes with -whitelist=127.0.0.1") stop_node(self.nodes[0], 0) - self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) + self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1"]) #recreate/reconnect 3 test nodes test_nodes = [] connections = [] for i in range(3): test_nodes.append(TestNode()) connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i])) test_nodes[i].add_connection(connections[i]) NetworkThread().start() # Start up network handling in another thread [x.wait_for_verack() for x in test_nodes] #retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(2, big_new_block)] for i in range(20): test_nodes[1].send_message(getdata_request) test_nodes[1].sync_with_ping() assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1) getdata_request.inv = [CInv(2, big_old_block)] test_nodes[1].send_message(getdata_request) test_nodes[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist print("Peer 1 still connected after trying to download old block (whitelisted)") [c.disconnect_node() for c in connections] if __name__ == '__main__': MaxUploadTest().main() diff --git a/qa/rpc-tests/pruning.py b/qa/rpc-tests/pruning.py index 1e6d41918..46f8cec34 100755 --- a/qa/rpc-tests/pruning.py +++ b/qa/rpc-tests/pruning.py @@ -1,454 +1,461 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test pruning code # ******** # WARNING: # This test uses 4GB of disk space. # This test takes 30 mins or more (up to 2 hours) # ******** from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * +from test_framework.outputchecker import OutputChecker import time import os MIN_BLOCKS_TO_KEEP = 288 # Rescans start at the earliest block up to 2 hours before a key timestamp, so # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. RESCAN_WINDOW = 2 * 60 * 60 def calc_usage(blockdir): return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 6 # Cache for utxos, as the listunspent may take a long time later in the test self.utxo_cache_0 = [] self.utxo_cache_1 = [] def setup_network(self): self.nodes = [] self.is_network_split = False # Create nodes 0 and 1 to mine - self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)) - self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)) + self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-checkblocks=5"], timewait=900)) + self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-checkblocks=5"], timewait=900)) # Create node 2 to test pruning self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900)) self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/" # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) - self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) - self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900)) + self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000"], timewait=900)) + self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000"], timewait=900)) # Create nodes 5 to test wallet in prune mode, but do not connect self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=550"])) # Determine default relay fee self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 2) connect_nodes(self.nodes[2], 0) connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[0], 4) sync_blocks(self.nodes[0:5]) def create_big_chain(self): # Start by creating some coinbases we can spend later self.nodes[1].generate(200) sync_blocks(self.nodes[0:2]) self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data for i in range(645): mine_large_block(self.nodes[0], self.utxo_cache_0) sync_blocks(self.nodes[0:5]) def test_height_min(self): if not os.path.isfile(self.prunedir+"blk00000.dat"): raise AssertionError("blk00000.dat is missing, pruning too early") print("Success") print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir)) print("Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this for i in range(25): mine_large_block(self.nodes[0], self.utxo_cache_0) waitstart = time.time() while os.path.isfile(self.prunedir+"blk00000.dat"): time.sleep(0.1) if time.time() - waitstart > 30: raise AssertionError("blk00000.dat not pruned when it should be") print("Success") usage = calc_usage(self.prunedir) print("Usage should be below target:", usage) if (usage > 550): raise AssertionError("Pruning target not being met") def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine self.stop_node(0) - self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900) + self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-checkblocks=5"], timewait=900) # Mine 24 blocks in node 1 for i in range(24): if j == 0: mine_large_block(self.nodes[1], self.utxo_cache_1) else: self.nodes[1].generate(1) #tx's already in mempool from previous disconnects # Reorg back with 25 block chain from node 0 for i in range(25): mine_large_block(self.nodes[0], self.utxo_cache_0) # Create connections in the order so both nodes can see the reorg at the same time connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[2], 0) sync_blocks(self.nodes[0:3]) print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir)) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) self.stop_node(1) - self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) + self.nodes[1]=start_node(1, self.options.tmpdir, + ["-debug", "-maxreceivebuffer=20000", "-allowsmallgeneratedblocksize", + "-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], + timewait=900, stderr_checker=OutputChecker()) height = self.nodes[1].getblockcount() print("Current block height:", height) invalidheight = height-287 badhash = self.nodes[1].getblockhash(invalidheight) print("Invalidating block at height:",invalidheight,badhash) self.nodes[1].invalidateblock(badhash) # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago) mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) curhash = self.nodes[1].getblockhash(invalidheight - 1) while curhash != mainchainhash: self.nodes[1].invalidateblock(curhash) curhash = self.nodes[1].getblockhash(invalidheight - 1) assert(self.nodes[1].getblockcount() == invalidheight - 1) print("New best height", self.nodes[1].getblockcount()) # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) - self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900) + self.nodes[1]=start_node(1, self.options.tmpdir, + ["-debug", "-maxreceivebuffer=20000", "-allowsmallgeneratedblocksize", + "-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], + timewait=900, stderr_checker=OutputChecker()) print("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) print("Reconnect nodes") connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[2], 1) sync_blocks(self.nodes[0:3], timeout=120) print("Verify height on node 2:",self.nodes[2].getblockcount()) print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir)) print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") for i in range(22): # This can be slow, so do this in multiple RPC calls to avoid # RPC timeouts. self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects sync_blocks(self.nodes[0:3], timeout=300) usage = calc_usage(self.prunedir) print("Usage should be below target:", usage) if (usage > 550): raise AssertionError("Pruning target not being met") return invalidheight,badhash def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away try: self.nodes[2].getblock(self.forkhash) raise AssertionError("Old block wasn't pruned so can't test redownload") except JSONRPCException as e: print("Will need to redownload block",self.forkheight) # Verify that we have enough history to reorg back to the fork point # Although this is more than 288 blocks, because this chain was written more recently # and only its other 299 small and 220 large block are in the block files after it, # its expected to still be retained self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() curchainhash = self.nodes[2].getblockhash(self.mainchainheight) self.nodes[2].invalidateblock(curchainhash) goalbestheight = self.mainchainheight goalbesthash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to the original chain created in # create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to # redownload its missing blocks. # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain # because it has all the block data. # However it must mine enough blocks to have a more work chain than the reorg_test chain in order # to trigger node 2's block download logic. # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine) self.nodes[0].invalidateblock(curchainhash) assert(self.nodes[0].getblockcount() == self.mainchainheight) assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") waitstart = time.time() while self.nodes[2].getblockcount() < goalbestheight: time.sleep(0.1) if time.time() - waitstart > 900: raise AssertionError("Node 2 didn't reorg to proper height") assert(self.nodes[2].getbestblockhash() == goalbesthash) # Verify we can now have the data for a block previously pruned assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0"], timewait=900) assert_equal(node.getblockcount(), 995) assert_raises_message(JSONRPCException, "not in prune mode", node.pruneblockchain, 500) self.stop_node(node_number) # now re-start in manual pruning mode node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=1"], timewait=900) assert_equal(node.getblockcount(), 995) def height(index): if use_timestamp: return node.getblockheader(node.getblockhash(index))["time"] + RESCAN_WINDOW else: return index def prune(index, expected_ret=None): ret = node.pruneblockchain(height(index)) # Check the return value. When use_timestamp is True, just check # that the return value is less than or equal to the expected # value, because when more than one block is generated per second, # a timestamp will not be granular enough to uniquely identify an # individual block. if expected_ret is None: expected_ret = index if use_timestamp: assert_greater_than(ret, 0) assert_greater_than(expected_ret + 1, ret) else: assert_equal(ret, expected_ret) def has_block(index): return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000) assert_raises_message(JSONRPCException, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) node.generate(6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # negative heights should raise an exception assert_raises_message(JSONRPCException, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) if not has_block(0): raise AssertionError("blk00000.dat is missing when should still be there") # Does nothing node.pruneblockchain(height(0)) if not has_block(0): raise AssertionError("blk00000.dat is missing when should still be there") # height=500 should prune first file prune(500) if has_block(0): raise AssertionError("blk00000.dat is still there, should be pruned by now") if not has_block(1): raise AssertionError("blk00001.dat is missing when should still be there") # height=650 should prune second file prune(650) if has_block(1): raise AssertionError("blk00001.dat is still there, should be pruned by now") # height=1000 should not prune anything more, because tip-288 is in blk00002.dat. prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) if not has_block(2): raise AssertionError("blk00002.dat is still there, should be pruned by now") # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat) node.generate(288) prune(1000) if has_block(2): raise AssertionError("blk00002.dat is still there, should be pruned by now") if has_block(3): raise AssertionError("blk00003.dat is still there, should be pruned by now") # stop node, start back up with auto-prune at 550MB, make sure still runs self.stop_node(node_number) self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=550"], timewait=900) print("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape print("Stop and start pruning node to trigger wallet rescan") try: self.stop_node(2) start_node(2, self.options.tmpdir, ["-debug=1","-prune=550"]) print("Success") except Exception as detail: raise AssertionError("Wallet test: unable to re-start the pruning node") # check that wallet loads loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. print ("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], 5) nds = [self.nodes[0], self.nodes[5]] sync_blocks(nds, wait=5, timeout=300) try: self.stop_node(5) #stop and start to trigger rescan start_node(5, self.options.tmpdir, ["-debug=1","-prune=550"]) print ("Success") except Exception as detail: raise AssertionError("Wallet test: unable to re-start node5") def run_test(self): print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") print("Mining a big blockchain of 995 blocks") self.create_big_chain() # Chain diagram key: # * blocks on main chain # +,&,$,@ blocks on other forks # X invalidated block # N1 Node 1 # # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) # stop manual-pruning node with 995 blocks self.stop_node(3) self.stop_node(4) print("Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) print("Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) # N0 **...**...**(1045) # # reconnect nodes causing reorg on N1 and N2 # N1=N2 **...*(1020) *...**(1045) # \ # +...+(1044) # # repeat this process until you have 12 stale forks hanging off the # main chain on N1 and N2 # N0 *************************...***************************(1320) # # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # \ \ \ # +...+(1044) &.. $...$(1319) # Save some current chain state for later use self.mainchainheight = self.nodes[2].getblockcount() #1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) print("Check that we can survive a 288 block reorg still") (self.forkheight,self.forkhash) = self.reorg_test() #(1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # N1 **...*(1020) **...**(1032)X.. # \ # ++...+(1031)X.. # # Now mine 300 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@(1332) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # Reconnect nodes and mine 220 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # N2 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ *...**(1320) # \ \ # ++...++(1044) .. # # N0 ********************(1032) @@...@@@(1552) # \ # *...**(1320) print("Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # original main chain (*), but will require redownload of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: # N0 ********************(1032) **...****(1553) # \ # X@...@@@(1552) # # N2 **...*(1020) **...**(1032) **...****(1553) # \ \ # \ X@...@@@(1552) # \ # +.. # # N1 doesn't change because 1033 on main chain (*) is invalid print("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) print("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) print("Test wallet re-scan") self.wallet_test() print("Done") if __name__ == '__main__': PruneTest().main() diff --git a/qa/rpc-tests/smartfees.py b/qa/rpc-tests/smartfees.py index 2c56f954a..f89d8efb2 100755 --- a/qa/rpc-tests/smartfees.py +++ b/qa/rpc-tests/smartfees.py @@ -1,267 +1,270 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test fee estimation code # from collections import OrderedDict from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * +from test_framework.outputchecker import OutputChecker # Construct 2 trivial P2SH's and the ScriptSigs that spend them # So we can create many many transactions without needing to spend # time signing. P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP" P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP" # Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2 # 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or "OP_2 OP_DROP" SCRIPT_SIG = ["0451025175", "0451025275"] def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment): ''' Create and send a transaction with a random fee. The transaction pays to a trivial P2SH script, and assumes that its inputs are of the same form. The function takes a list of confirmed outputs and unconfirmed outputs and attempts to use the confirmed list first for its inputs. It adds the newly created outputs to the unconfirmed list. Returns (raw transaction, fee) ''' # It's best to exponentially distribute our random fees # because the buckets are exponentially spaced. # Exponentially distributed from 1-128 * fee_increment rand_fee = float(fee_increment)*(1.1892**random.randint(0,28)) # Total fee ranges from min_fee to min_fee + 127*fee_increment fee = min_fee - fee_increment + satoshi_round(rand_fee) inputs = [] total_in = Decimal("0.00000000") while total_in <= (amount + fee) and len(conflist) > 0: t = conflist.pop(0) total_in += t["amount"] inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} ) if total_in <= amount + fee: while total_in <= (amount + fee) and len(unconflist) > 0: t = unconflist.pop(0) total_in += t["amount"] inputs.append({ "txid" : t["txid"], "vout" : t["vout"]} ) if total_in <= amount + fee: raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in)) outputs = {} outputs = OrderedDict([(P2SH_1, total_in - amount - fee), (P2SH_2, amount)]) rawtx = from_node.createrawtransaction(inputs, outputs) # createrawtransaction constructs a transaction that is ready to be signed. # These transactions don't need to be signed, but we still have to insert the ScriptSig # that will satisfy the ScriptPubKey. completetx = rawtx[0:10] inputnum = 0 for inp in inputs: completetx += rawtx[10+82*inputnum:82+82*inputnum] completetx += SCRIPT_SIG[inp["vout"]] completetx += rawtx[84+82*inputnum:92+82*inputnum] inputnum += 1 completetx += rawtx[10+82*inputnum:] txid = from_node.sendrawtransaction(completetx, True) unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee}) unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount}) return (completetx, fee) def split_inputs(from_node, txins, txouts, initial_split = False): ''' We need to generate a lot of very small inputs so we can generate a ton of transactions and they will have low priority. This function takes an input from txins, and creates and sends a transaction which splits the value into 2 outputs which are appended to txouts. ''' prevtxout = txins.pop() inputs = [] inputs.append({ "txid" : prevtxout["txid"], "vout" : prevtxout["vout"] }) half_change = satoshi_round(prevtxout["amount"]/2) rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000") outputs = OrderedDict([(P2SH_1, half_change), (P2SH_2, rem_change)]) rawtx = from_node.createrawtransaction(inputs, outputs) # If this is the initial split we actually need to sign the transaction # Otherwise we just need to insert the property ScriptSig if (initial_split) : completetx = from_node.signrawtransaction(rawtx)["hex"] else : completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:] txid = from_node.sendrawtransaction(completetx, True) txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change}) txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change}) def check_estimates(node, fees_seen, max_invalid, print_estimates = True): ''' This function calls estimatefee and verifies that the estimates meet certain invariants. ''' all_estimates = [ node.estimatefee(i) for i in range(1,26) ] if print_estimates: print([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]]) delta = 1.0e-6 # account for rounding error last_e = max(fees_seen) for e in [x for x in all_estimates if x >= 0]: # Estimates should be within the bounds of what transactions fees actually were: if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen): raise AssertionError("Estimated fee (%f) out of range (%f,%f)" %(float(e), min(fees_seen), max(fees_seen))) # Estimates should be monotonically decreasing if float(e)-delta > last_e: raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms" %(float(e),float(last_e))) last_e = e valid_estimate = False invalid_estimates = 0 for i,e in enumerate(all_estimates): # estimate is for i+1 if e >= 0: valid_estimate = True # estimatesmartfee should return the same result assert_equal(node.estimatesmartfee(i+1)["feerate"], e) else: invalid_estimates += 1 # estimatesmartfee should still be valid approx_estimate = node.estimatesmartfee(i+1)["feerate"] answer_found = node.estimatesmartfee(i+1)["blocks"] assert(approx_estimate > 0) assert(answer_found > i+1) # Once we're at a high enough confirmation count that we can give an estimate # We should have estimates for all higher confirmation counts if valid_estimate: raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate") # Check on the expected number of different confirmation counts # that we might not have valid estimates for if invalid_estimates > max_invalid: raise AssertionError("More than (%d) invalid estimates"%(max_invalid)) return all_estimates class EstimateFeeTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 3 self.setup_clean_chain = False def setup_network(self): ''' We'll setup the network to have 3 nodes that all mine with different parameters. But first we need to use one node to create a lot of small low priority outputs which we will use to generate our transactions. ''' self.nodes = [] # Use node0 to mine blocks for input splitting self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-whitelist=127.0.0.1"])) print("This test is time consuming, please be patient") print("Splitting inputs to small size so we can generate low priority tx's") self.txouts = [] self.txouts2 = [] # Split a coinbase into two transaction puzzle outputs split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True) # Mine while (len(self.nodes[0].getrawmempool()) > 0): self.nodes[0].generate(1) # Repeatedly split those 2 outputs, doubling twice for each rep # Use txouts to monitor the available utxo, since these won't be tracked in wallet reps = 0 while (reps < 5): #Double txouts to txouts2 while (len(self.txouts)>0): split_inputs(self.nodes[0], self.txouts, self.txouts2) while (len(self.nodes[0].getrawmempool()) > 0): self.nodes[0].generate(1) #Double txouts2 to txouts while (len(self.txouts2)>0): split_inputs(self.nodes[0], self.txouts2, self.txouts) while (len(self.nodes[0].getrawmempool()) > 0): self.nodes[0].generate(1) reps += 1 print("Finished splitting") # Now we can connect the other nodes, didn't want to connect them earlier # so the estimates would not be affected by the splitting transactions # Node1 mines small blocks but that are bigger than the expected transaction rate, # and allows free transactions. # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, # (17k is room enough for 110 or so transactions) self.nodes.append(start_node(1, self.options.tmpdir, ["-blockprioritysize=1500", "-blockmaxsize=17000", - "-maxorphantx=1000", "-debug=estimatefee"])) + "-maxorphantx=1000", "-debug=estimatefee", + "-allowsmallgeneratedblocksize"], + stderr_checker=OutputChecker())) connect_nodes(self.nodes[1], 0) # Node2 is a stingy miner, that # produces too small blocks (room for only 55 or so transactions) - node2args = ["-blockprioritysize=0", "-blockmaxsize=8000", "-maxorphantx=1000"] + node2args = ["-blockprioritysize=0", "-blockmaxsize=8000", "-maxorphantx=1000", "-allowsmallgeneratedblocksize"] - self.nodes.append(start_node(2, self.options.tmpdir, node2args)) + self.nodes.append(start_node(2, self.options.tmpdir, node2args, stderr_checker=OutputChecker())) connect_nodes(self.nodes[0], 2) connect_nodes(self.nodes[2], 1) self.is_network_split = False self.sync_all() def transact_and_mine(self, numblocks, mining_node): min_fee = Decimal("0.00001") # We will now mine numblocks blocks generating on average 100 transactions between each block # We shuffle our confirmed txout set before each set of transactions # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible # resorting to tx's that depend on the mempool when those run out for i in range(numblocks): random.shuffle(self.confutxo) for j in range(random.randrange(100-50,100+50)): from_index = random.randint(1,2) (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo, self.memutxo, Decimal("0.005"), min_fee, min_fee) tx_kbytes = (len(txhex) // 2) / 1000.0 self.fees_per_kb.append(float(fee)/tx_kbytes) sync_mempools(self.nodes[0:3], wait=.1) mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"] sync_blocks(self.nodes[0:3], wait=.1) # update which txouts are confirmed newmem = [] for utx in self.memutxo: if utx["txid"] in mined: self.confutxo.append(utx) else: newmem.append(utx) self.memutxo = newmem def run_test(self): self.fees_per_kb = [] self.memutxo = [] self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting print("Will output estimates for 1/2/3/6/15/25 blocks") for i in range(2): print("Creating transactions and mining them with a block size that can't keep up") # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine self.transact_and_mine(10, self.nodes[2]) check_estimates(self.nodes[1], self.fees_per_kb, 14) print("Creating transactions and mining them at a block size that is just big enough") # Generate transactions while mining 10 more blocks, this time with node1 # which mines blocks with capacity just above the rate that transactions are being created self.transact_and_mine(10, self.nodes[1]) check_estimates(self.nodes[1], self.fees_per_kb, 2) # Finish by mining a normal-sized block: while len(self.nodes[1].getrawmempool()) > 0: self.nodes[1].generate(1) sync_blocks(self.nodes[0:3], wait=.1) print("Final estimates after emptying mempools") check_estimates(self.nodes[1], self.fees_per_kb, 2) if __name__ == '__main__': EstimateFeeTest().main() diff --git a/src/init.cpp b/src/init.cpp index 9d4122518..bd33fd1ed 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1,2165 +1,2174 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "init.h" #include "addrman.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "checkpoints.h" #include "compat/sanity.h" #include "config.h" #include "consensus/validation.h" #include "httprpc.h" #include "httpserver.h" #include "key.h" #include "miner.h" #include "net.h" #include "net_processing.h" #include "netbase.h" #include "policy/policy.h" #include "rpc/register.h" #include "rpc/server.h" #include "scheduler.h" #include "script/sigcache.h" #include "script/standard.h" #include "timedata.h" #include "torcontrol.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" #include "util.h" #include "utilmoneystr.h" #include "validation.h" #include "validationinterface.h" #ifdef ENABLE_WALLET #include "wallet/rpcdump.h" #include "wallet/wallet.h" #endif #include "warnings.h" #include #include #include #ifndef WIN32 #include #endif #include #include #include #include #include #include #include #include #include #include #if ENABLE_ZMQ #include "zmq/zmqnotificationinterface.h" #endif bool fFeeEstimatesInitialized = false; static const bool DEFAULT_PROXYRANDOMIZE = true; static const bool DEFAULT_REST_ENABLE = false; static const bool DEFAULT_DISABLE_SAFEMODE = false; static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false; std::unique_ptr g_connman; std::unique_ptr peerLogic; #if ENABLE_ZMQ static CZMQNotificationInterface *pzmqNotificationInterface = NULL; #endif #ifdef WIN32 // Win32 LevelDB doesn't use filedescriptors, and the ones used for accessing // block files don't count towards the fd_set size limit anyway. #define MIN_CORE_FILEDESCRIPTORS 0 #else #define MIN_CORE_FILEDESCRIPTORS 150 #endif /** Used to pass flags to the Bind() function */ enum BindFlags { BF_NONE = 0, BF_EXPLICIT = (1U << 0), BF_REPORT_ERROR = (1U << 1), BF_WHITELIST = (1U << 2), }; static const char *FEE_ESTIMATES_FILENAME = "fee_estimates.dat"; ////////////////////////////////////////////////////////////////////////////// // // Shutdown // // // Thread management and startup/shutdown: // // The network-processing threads are all part of a thread group created by // AppInit() or the Qt main() function. // // A clean exit happens when StartShutdown() or the SIGTERM signal handler sets // fRequestShutdown, which triggers the DetectShutdownThread(), which interrupts // the main thread group. DetectShutdownThread() then exits, which causes // AppInit() to continue (it .joins the shutdown thread). Shutdown() is then // called to clean up database connections, and stop other threads that should // only be stopped after the main network-processing threads have exited. // // Note that if running -daemon the parent process returns from AppInit2 before // adding any threads to the threadGroup, so .join_all() returns immediately and // the parent exits from main(). // // Shutdown for Qt is very similar, only it uses a QTimer to detect // fRequestShutdown getting set, and then does the normal Qt shutdown thing. // std::atomic fRequestShutdown(false); std::atomic fDumpMempoolLater(false); void StartShutdown() { fRequestShutdown = true; } bool ShutdownRequested() { return fRequestShutdown; } /** * This is a minimally invasive approach to shutdown on LevelDB read errors from * the chainstate, while keeping user interface out of the common library, which * is shared between bitcoind, and bitcoin-qt and non-server tools. */ class CCoinsViewErrorCatcher : public CCoinsViewBacked { public: CCoinsViewErrorCatcher(CCoinsView *view) : CCoinsViewBacked(view) {} bool GetCoins(const uint256 &txid, CCoins &coins) const { try { return CCoinsViewBacked::GetCoins(txid, coins); } catch (const std::runtime_error &e) { uiInterface.ThreadSafeMessageBox( _("Error reading from database, shutting down."), "", CClientUIInterface::MSG_ERROR); LogPrintf("Error reading from database: %s\n", e.what()); // Starting the shutdown sequence and returning false to the caller // would be interpreted as 'entry not found' (as opposed to unable // to read data), and could lead to invalid interpretation. Just // exit immediately, as we can't continue anyway, and all writes // should be atomic. abort(); } } // Writes do not need similar protection, as failure to write is handled by // the caller. }; static CCoinsViewDB *pcoinsdbview = NULL; static CCoinsViewErrorCatcher *pcoinscatcher = NULL; static std::unique_ptr globalVerifyHandle; void Interrupt(boost::thread_group &threadGroup) { InterruptHTTPServer(); InterruptHTTPRPC(); InterruptRPC(); InterruptREST(); InterruptTorControl(); if (g_connman) g_connman->Interrupt(); threadGroup.interrupt_all(); } void Shutdown() { LogPrintf("%s: In progress...\n", __func__); static CCriticalSection cs_Shutdown; TRY_LOCK(cs_Shutdown, lockShutdown); if (!lockShutdown) return; /// Note: Shutdown() must be able to handle cases in which AppInit2() failed /// part of the way, for example if the data directory was found to be /// locked. Be sure that anything that writes files or flushes caches only /// does this if the respective module was initialized. RenameThread("bitcoin-shutoff"); mempool.AddTransactionsUpdated(1); StopHTTPRPC(); StopREST(); StopRPC(); StopHTTPServer(); #ifdef ENABLE_WALLET if (pwalletMain) pwalletMain->Flush(false); #endif MapPort(false); UnregisterValidationInterface(peerLogic.get()); peerLogic.reset(); g_connman.reset(); StopTorControl(); UnregisterNodeSignals(GetNodeSignals()); if (fDumpMempoolLater) DumpMempool(); if (fFeeEstimatesInitialized) { boost::filesystem::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME; CAutoFile est_fileout(fopen(est_path.string().c_str(), "wb"), SER_DISK, CLIENT_VERSION); if (!est_fileout.IsNull()) mempool.WriteFeeEstimates(est_fileout); else LogPrintf("%s: Failed to write fee estimates to %s\n", __func__, est_path.string()); fFeeEstimatesInitialized = false; } { LOCK(cs_main); if (pcoinsTip != NULL) { FlushStateToDisk(); } delete pcoinsTip; pcoinsTip = NULL; delete pcoinscatcher; pcoinscatcher = NULL; delete pcoinsdbview; pcoinsdbview = NULL; delete pblocktree; pblocktree = NULL; } #ifdef ENABLE_WALLET if (pwalletMain) pwalletMain->Flush(true); #endif #if ENABLE_ZMQ if (pzmqNotificationInterface) { UnregisterValidationInterface(pzmqNotificationInterface); delete pzmqNotificationInterface; pzmqNotificationInterface = NULL; } #endif #ifndef WIN32 try { boost::filesystem::remove(GetPidFile()); } catch (const boost::filesystem::filesystem_error &e) { LogPrintf("%s: Unable to remove pidfile: %s\n", __func__, e.what()); } #endif UnregisterAllValidationInterfaces(); #ifdef ENABLE_WALLET delete pwalletMain; pwalletMain = NULL; #endif globalVerifyHandle.reset(); ECC_Stop(); LogPrintf("%s: done\n", __func__); } /** * Signal handlers are very limited in what they are allowed to do, so: */ void HandleSIGTERM(int) { fRequestShutdown = true; } void HandleSIGHUP(int) { fReopenDebugLog = true; } bool static Bind(CConnman &connman, const CService &addr, unsigned int flags) { if (!(flags & BF_EXPLICIT) && IsLimited(addr)) return false; std::string strError; if (!connman.BindListenPort(addr, strError, (flags & BF_WHITELIST) != 0)) { if (flags & BF_REPORT_ERROR) return InitError(strError); return false; } return true; } void OnRPCStarted() { uiInterface.NotifyBlockTip.connect(&RPCNotifyBlockChange); } void OnRPCStopped() { uiInterface.NotifyBlockTip.disconnect(&RPCNotifyBlockChange); RPCNotifyBlockChange(false, nullptr); cvBlockChange.notify_all(); LogPrint("rpc", "RPC stopped.\n"); } void OnRPCPreCommand(const CRPCCommand &cmd) { // Observe safe mode. std::string strWarning = GetWarnings("rpc"); if (strWarning != "" && !GetBoolArg("-disablesafemode", DEFAULT_DISABLE_SAFEMODE) && !cmd.okSafeMode) throw JSONRPCError(RPC_FORBIDDEN_BY_SAFE_MODE, std::string("Safe mode: ") + strWarning); } std::string HelpMessage(HelpMessageMode mode) { const bool showDebug = GetBoolArg("-help-debug", false); // When adding new options to the categories, please keep and ensure // alphabetical ordering. Do not translate _(...) -help-debug options, Many // technical terms, and only a very small audience, so is unnecessary stress // to translators. std::string strUsage = HelpMessageGroup(_("Options:")); strUsage += HelpMessageOpt("-?", _("Print this help message and exit")); strUsage += HelpMessageOpt("-version", _("Print version and exit")); strUsage += HelpMessageOpt( "-alertnotify=", _("Execute command when a relevant alert is received or we see a " "really long fork (%s in cmd is replaced by message)")); strUsage += HelpMessageOpt("-blocknotify=", _("Execute command when the best block changes " "(%s in cmd is replaced by block hash)")); if (showDebug) strUsage += HelpMessageOpt( "-blocksonly", strprintf( _("Whether to operate in a blocks only mode (default: %u)"), DEFAULT_BLOCKSONLY)); strUsage += HelpMessageOpt( "-assumevalid=", strprintf(_("If this block is in the chain assume that it and its " "ancestors are valid and potentially skip their script " "verification (0 to verify all, default: %s, testnet: %s)"), Params(CBaseChainParams::MAIN) .GetConsensus() .defaultAssumeValid.GetHex(), Params(CBaseChainParams::TESTNET) .GetConsensus() .defaultAssumeValid.GetHex())); strUsage += HelpMessageOpt( "-conf=", strprintf(_("Specify configuration file (default: %s)"), BITCOIN_CONF_FILENAME)); if (mode == HMM_BITCOIND) { #if HAVE_DECL_DAEMON strUsage += HelpMessageOpt( "-daemon", _("Run in the background as a daemon and accept commands")); #endif } strUsage += HelpMessageOpt("-datadir=", _("Specify data directory")); strUsage += HelpMessageOpt( "-dbcache=", strprintf( _("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache)); if (showDebug) strUsage += HelpMessageOpt( "-feefilter", strprintf("Tell other nodes to filter invs to us by " "our mempool min fee (default: %u)", DEFAULT_FEEFILTER)); strUsage += HelpMessageOpt( "-loadblock=", _("Imports blocks from external blk000??.dat file on startup")); strUsage += HelpMessageOpt( "-maxorphantx=", strprintf(_("Keep at most unconnectable " "transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS)); strUsage += HelpMessageOpt("-maxmempool=", strprintf(_("Keep the transaction memory pool " "below megabytes (default: %u)"), DEFAULT_MAX_MEMPOOL_SIZE)); strUsage += HelpMessageOpt("-mempoolexpiry=", strprintf(_("Do not keep transactions in the mempool " "longer than hours (default: %u)"), DEFAULT_MEMPOOL_EXPIRY)); strUsage += HelpMessageOpt( "-blockreconstructionextratxn=", strprintf(_("Extra transactions to keep in memory for compact block " "reconstructions (default: %u)"), DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN)); strUsage += HelpMessageOpt( "-par=", strprintf(_("Set the number of script verification threads (%u to %d, " "0 = auto, <0 = leave that many cores free, default: %d)"), -GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS)); #ifndef WIN32 strUsage += HelpMessageOpt( "-pid=", strprintf(_("Specify pid file (default: %s)"), BITCOIN_PID_FILENAME)); #endif strUsage += HelpMessageOpt( "-prune=", strprintf( _("Reduce storage requirements by enabling pruning (deleting) of " "old blocks. This allows the pruneblockchain RPC to be called to " "delete specific blocks, and enables automatic pruning of old " "blocks if a target size in MiB is provided. This mode is " "incompatible with -txindex and -rescan. " "Warning: Reverting this setting requires re-downloading the " "entire blockchain. " "(default: 0 = disable pruning blocks, 1 = allow manual pruning " "via RPC, >%u = automatically prune block files to stay under " "the specified target size in MiB)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); strUsage += HelpMessageOpt( "-reindex-chainstate", _("Rebuild chain state from the currently indexed blocks")); strUsage += HelpMessageOpt("-reindex", _("Rebuild chain state and block index from " "the blk*.dat files on disk")); #ifndef WIN32 strUsage += HelpMessageOpt( "-sysperms", _("Create new files with system default permissions, instead of umask " "077 (only effective with disabled wallet functionality)")); #endif strUsage += HelpMessageOpt( "-txindex", strprintf(_("Maintain a full transaction index, used by " "the getrawtransaction rpc call (default: %u)"), DEFAULT_TXINDEX)); strUsage += HelpMessageGroup(_("Connection options:")); strUsage += HelpMessageOpt( "-addnode=", _("Add a node to connect to and attempt to keep the connection open")); strUsage += HelpMessageOpt( "-banscore=", strprintf( _("Threshold for disconnecting misbehaving peers (default: %u)"), DEFAULT_BANSCORE_THRESHOLD)); strUsage += HelpMessageOpt( "-bantime=", strprintf(_("Number of seconds to keep misbehaving " "peers from reconnecting (default: %u)"), DEFAULT_MISBEHAVING_BANTIME)); strUsage += HelpMessageOpt("-bind=", _("Bind to given address and always listen on " "it. Use [host]:port notation for IPv6")); strUsage += HelpMessageOpt("-connect=", _("Connect only to the specified node(s); -noconnect or " "-connect=0 alone to disable automatic connections")); strUsage += HelpMessageOpt("-discover", _("Discover own IP addresses (default: 1 when " "listening and no -externalip or -proxy)")); strUsage += HelpMessageOpt( "-dns", _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + strprintf(_("(default: %u)"), DEFAULT_NAME_LOOKUP)); strUsage += HelpMessageOpt( "-dnsseed", _("Query for peer addresses via DNS lookup, if low on " "addresses (default: 1 unless -connect/-noconnect)")); strUsage += HelpMessageOpt("-externalip=", _("Specify your own public address")); strUsage += HelpMessageOpt( "-forcednsseed", strprintf( _("Always query for peer addresses via DNS lookup (default: %u)"), DEFAULT_FORCEDNSSEED)); strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: " "1 if no -proxy or -connect/-noconnect)")); strUsage += HelpMessageOpt( "-listenonion", strprintf(_("Automatically create Tor hidden service (default: %d)"), DEFAULT_LISTEN_ONION)); strUsage += HelpMessageOpt( "-maxconnections=", strprintf(_("Maintain at most connections to peers (default: %u)"), DEFAULT_MAX_PEER_CONNECTIONS)); strUsage += HelpMessageOpt("-maxreceivebuffer=", strprintf(_("Maximum per-connection receive buffer, " "*1000 bytes (default: %u)"), DEFAULT_MAXRECEIVEBUFFER)); strUsage += HelpMessageOpt( "-maxsendbuffer=", strprintf(_("Maximum per-connection send buffer, " "*1000 bytes (default: %u)"), DEFAULT_MAXSENDBUFFER)); strUsage += HelpMessageOpt( "-maxtimeadjustment", strprintf(_("Maximum allowed median peer time offset adjustment. Local " "perspective of time may be influenced by peers forward or " "backward by this amount. (default: %u seconds)"), DEFAULT_MAX_TIME_ADJUSTMENT)); strUsage += HelpMessageOpt("-onion=", strprintf(_("Use separate SOCKS5 proxy to reach peers " "via Tor hidden services (default: %s)"), "-proxy")); strUsage += HelpMessageOpt( "-onlynet=", _("Only connect to nodes in network (ipv4, ipv6 or onion)")); strUsage += HelpMessageOpt("-permitbaremultisig", strprintf(_("Relay non-P2SH multisig (default: %u)"), DEFAULT_PERMIT_BAREMULTISIG)); strUsage += HelpMessageOpt( "-peerbloomfilters", strprintf(_("Support filtering of blocks and transaction with bloom " "filters (default: %u)"), DEFAULT_PEERBLOOMFILTERS)); strUsage += HelpMessageOpt( "-port=", strprintf( _("Listen for connections on (default: %u or testnet: %u)"), Params(CBaseChainParams::MAIN).GetDefaultPort(), Params(CBaseChainParams::TESTNET).GetDefaultPort())); strUsage += HelpMessageOpt("-proxy=", _("Connect through SOCKS5 proxy")); strUsage += HelpMessageOpt( "-proxyrandomize", strprintf(_("Randomize credentials for every proxy connection. This " "enables Tor stream isolation (default: %u)"), DEFAULT_PROXYRANDOMIZE)); strUsage += HelpMessageOpt( "-seednode=", _("Connect to a node to retrieve peer addresses, and disconnect")); strUsage += HelpMessageOpt( "-timeout=", strprintf(_("Specify connection timeout in " "milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT)); strUsage += HelpMessageOpt("-torcontrol=:", strprintf(_("Tor control port to use if onion " "listening enabled (default: %s)"), DEFAULT_TOR_CONTROL)); strUsage += HelpMessageOpt("-torpassword=", _("Tor control port password (default: empty)")); #ifdef USE_UPNP #if USE_UPNP strUsage += HelpMessageOpt("-upnp", _("Use UPnP to map the listening port " "(default: 1 when listening and no -proxy)")); #else strUsage += HelpMessageOpt( "-upnp", strprintf(_("Use UPnP to map the listening port (default: %u)"), 0)); #endif #endif strUsage += HelpMessageOpt("-whitebind=", _("Bind to given address and whitelist peers connecting " "to it. Use [host]:port notation for IPv6")); strUsage += HelpMessageOpt( "-whitelist=", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) " "or CIDR notated network (e.g. 1.2.3.0/24). Can be specified " "multiple times.") + " " + _("Whitelisted peers cannot be DoS banned and their " "transactions are always relayed, even if they are already " "in the mempool, useful e.g. for a gateway")); strUsage += HelpMessageOpt( "-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted " "peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY)); strUsage += HelpMessageOpt( "-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even " "if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY)); strUsage += HelpMessageOpt( "-maxuploadtarget=", strprintf(_("Tries to keep outbound traffic under the given target (in " "MiB per 24h), 0 = no limit (default: %d)"), DEFAULT_MAX_UPLOAD_TARGET)); #ifdef ENABLE_WALLET strUsage += CWallet::GetWalletHelpString(showDebug); #endif #if ENABLE_ZMQ strUsage += HelpMessageGroup(_("ZeroMQ notification options:")); strUsage += HelpMessageOpt("-zmqpubhashblock=
", _("Enable publish hash block in
")); strUsage += HelpMessageOpt("-zmqpubhashtx=
", _("Enable publish hash transaction in
")); strUsage += HelpMessageOpt("-zmqpubrawblock=
", _("Enable publish raw block in
")); strUsage += HelpMessageOpt("-zmqpubrawtx=
", _("Enable publish raw transaction in
")); #endif strUsage += HelpMessageGroup(_("Debugging/Testing options:")); strUsage += HelpMessageOpt("-uacomment=", _("Append comment to the user agent string")); if (showDebug) { strUsage += HelpMessageOpt( "-checkblocks=", strprintf( _("How many blocks to check at startup (default: %u, 0 = all)"), DEFAULT_CHECKBLOCKS)); strUsage += HelpMessageOpt("-checklevel=", strprintf(_("How thorough the block verification of " "-checkblocks is (0-4, default: %u)"), DEFAULT_CHECKLEVEL)); strUsage += HelpMessageOpt( "-checkblockindex", strprintf( "Do a full consistency check for mapBlockIndex, " "setBlockIndexCandidates, chainActive and mapBlocksUnlinked " "occasionally. Also sets -checkmempool (default: %u)", Params(CBaseChainParams::MAIN).DefaultConsistencyChecks())); strUsage += HelpMessageOpt( "-checkmempool=", strprintf( "Run checks every transactions (default: %u)", Params(CBaseChainParams::MAIN).DefaultConsistencyChecks())); strUsage += HelpMessageOpt( "-checkpoints", strprintf("Disable expensive verification for " "known chain history (default: %u)", DEFAULT_CHECKPOINTS_ENABLED)); strUsage += HelpMessageOpt( "-disablesafemode", strprintf("Disable safemode, override a real " "safe mode event (default: %u)", DEFAULT_DISABLE_SAFEMODE)); strUsage += HelpMessageOpt( "-testsafemode", strprintf("Force safe mode (default: %u)", DEFAULT_TESTSAFEMODE)); strUsage += HelpMessageOpt("-dropmessagestest=", "Randomly drop 1 of every network messages"); strUsage += HelpMessageOpt("-fuzzmessagestest=", "Randomly fuzz 1 of every network messages"); strUsage += HelpMessageOpt( "-stopafterblockimport", strprintf( "Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT)); strUsage += HelpMessageOpt( "-limitancestorcount=", strprintf("Do not accept transactions if number of in-mempool " "ancestors is or more (default: %u)", DEFAULT_ANCESTOR_LIMIT)); strUsage += HelpMessageOpt("-limitancestorsize=", strprintf("Do not accept transactions whose size " "with all in-mempool ancestors exceeds " " kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT)); strUsage += HelpMessageOpt( "-limitdescendantcount=", strprintf("Do not accept transactions if any ancestor would have " " or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT)); strUsage += HelpMessageOpt( "-limitdescendantsize=", strprintf("Do not accept transactions if any ancestor would have " "more than kilobytes of in-mempool descendants " "(default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT)); strUsage += HelpMessageOpt("-bip9params=deployment:start:end", "Use given start/end times for specified " "BIP9 deployment (regtest-only)"); } std::string debugCategories = "addrman, alert, bench, cmpctblock, coindb, db, http, libevent, lock, " "mempool, mempoolrej, net, proxy, prune, rand, reindex, rpc, " "selectcoins, tor, zmq"; // Don't translate these and qt below if (mode == HMM_BITCOIN_QT) debugCategories += ", qt"; strUsage += HelpMessageOpt( "-debug=", strprintf(_("Output debugging information (default: %u, supplying " " is optional)"), 0) + ". " + _("If is not supplied or if = 1, " "output all debugging information.") + _(" can be:") + " " + debugCategories + "."); if (showDebug) strUsage += HelpMessageOpt( "-nodebug", "Turn off debugging messages, same as -debug=0"); strUsage += HelpMessageOpt( "-help-debug", _("Show all debugging options (usage: --help -help-debug)")); strUsage += HelpMessageOpt( "-logips", strprintf(_("Include IP addresses in debug output (default: %u)"), DEFAULT_LOGIPS)); strUsage += HelpMessageOpt( "-logtimestamps", strprintf(_("Prepend debug output with timestamp (default: %u)"), DEFAULT_LOGTIMESTAMPS)); if (showDebug) { strUsage += HelpMessageOpt( "-logtimemicros", strprintf( "Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS)); strUsage += HelpMessageOpt( "-mocktime=", "Replace actual time with seconds since epoch (default: 0)"); strUsage += HelpMessageOpt( "-limitfreerelay=", strprintf("Continuously rate-limit free transactions to *1000 " "bytes per minute (default: %u)", DEFAULT_LIMITFREERELAY)); strUsage += HelpMessageOpt("-relaypriority", strprintf("Require high priority for relaying free " "or low-fee transactions (default: %u)", DEFAULT_RELAYPRIORITY)); strUsage += HelpMessageOpt( "-maxsigcachesize=", strprintf("Limit size of signature cache to MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE)); strUsage += HelpMessageOpt( "-maxtipage=", strprintf("Maximum tip age in seconds to consider node in initial " "block download (default: %u)", DEFAULT_MAX_TIP_AGE)); } strUsage += HelpMessageOpt( "-minrelaytxfee=", strprintf( _("Fees (in %s/kB) smaller than this are considered zero fee for " "relaying, mining and transaction creation (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE))); strUsage += HelpMessageOpt( "-maxtxfee=", strprintf(_("Maximum total fees (in %s) to use in a single wallet " "transaction or raw transaction; setting this too low may " "abort large transactions (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE))); strUsage += HelpMessageOpt( "-printtoconsole", _("Send trace/debug info to console instead of debug.log file")); if (showDebug) { strUsage += HelpMessageOpt( "-printpriority", strprintf("Log transaction priority and fee per " "kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY)); } strUsage += HelpMessageOpt("-shrinkdebugfile", _("Shrink debug.log file on client startup " "(default: 1 when no -debug)")); AppendParamsHelpMessages(strUsage, showDebug); strUsage += HelpMessageGroup(_("Node relay options:")); if (showDebug) { strUsage += HelpMessageOpt( "-acceptnonstdtxn", strprintf( "Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !Params(CBaseChainParams::TESTNET).RequireStandard())); strUsage += HelpMessageOpt("-excessiveblocksize=", strprintf(_("Do not accept blocks larger than this " "limit, in bytes (default: %d)"), LEGACY_MAX_BLOCK_SIZE)); strUsage += HelpMessageOpt( "-incrementalrelayfee=", strprintf( "Fee rate (in %s/kB) used to define cost of relay, used for " "mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE))); strUsage += HelpMessageOpt( "-dustrelayfee=", strprintf("Fee rate (in %s/kB) used to defined dust, the value of " "an output such that it will cost about 1/3 of its value " "in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE))); } strUsage += HelpMessageOpt("-bytespersigop", strprintf(_("Equivalent bytes per sigop in transactions " "for relay and mining (default: %u)"), DEFAULT_BYTES_PER_SIGOP)); strUsage += HelpMessageOpt( "-datacarrier", strprintf(_("Relay and mine data carrier transactions (default: %u)"), DEFAULT_ACCEPT_DATACARRIER)); strUsage += HelpMessageOpt( "-datacarriersize", strprintf(_("Maximum size of data in data carrier transactions we " "relay and mine (default: %u)"), MAX_OP_RETURN_RELAY)); strUsage += HelpMessageGroup(_("Block creation options:")); strUsage += HelpMessageOpt( "-blockmaxsize=", strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_MAX_GENERATED_BLOCK_SIZE)); strUsage += HelpMessageOpt("-blockprioritysize=", strprintf(_("Set maximum size of high-priority/low-fee " "transactions in bytes (default: %d)"), DEFAULT_BLOCK_PRIORITY_SIZE)); strUsage += HelpMessageOpt( "-blockmintxfee=", strprintf(_("Set lowest fee rate (in %s/kB) for transactions to be " "included in block creation. (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE))); if (showDebug) strUsage += HelpMessageOpt("-blockversion=", "Override block version to test forking scenarios"); strUsage += HelpMessageGroup(_("RPC server options:")); strUsage += HelpMessageOpt("-server", _("Accept command line and JSON-RPC commands")); strUsage += HelpMessageOpt( "-rest", strprintf(_("Accept public REST requests (default: %u)"), DEFAULT_REST_ENABLE)); strUsage += HelpMessageOpt( "-rpcbind=", _("Bind to given address to listen for JSON-RPC connections. Use " "[host]:port notation for IPv6. This option can be specified " "multiple times (default: bind to all interfaces)")); strUsage += HelpMessageOpt("-rpccookiefile=", _("Location of the auth cookie (default: data dir)")); strUsage += HelpMessageOpt("-rpcuser=", _("Username for JSON-RPC connections")); strUsage += HelpMessageOpt("-rpcpassword=", _("Password for JSON-RPC connections")); strUsage += HelpMessageOpt( "-rpcauth=", _("Username and hashed password for JSON-RPC connections. The field " " comes in the format: :$. A canonical " "python script is included in share/rpcuser. The client then " "connects normally using the " "rpcuser=/rpcpassword= pair of arguments. This " "option can be specified multiple times")); strUsage += HelpMessageOpt( "-rpcport=", strprintf(_("Listen for JSON-RPC connections on (default: %u or " "testnet: %u)"), BaseParams(CBaseChainParams::MAIN).RPCPort(), BaseParams(CBaseChainParams::TESTNET).RPCPort())); strUsage += HelpMessageOpt( "-rpcallowip=", _("Allow JSON-RPC connections from specified source. Valid for " "are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. " "1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This " "option can be specified multiple times")); strUsage += HelpMessageOpt( "-rpcthreads=", strprintf( _("Set the number of threads to service RPC calls (default: %d)"), DEFAULT_HTTP_THREADS)); if (showDebug) { strUsage += HelpMessageOpt( "-rpcworkqueue=", strprintf("Set the depth of the work queue to " "service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE)); strUsage += HelpMessageOpt( "-rpcservertimeout=", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT)); } return strUsage; } std::string LicenseInfo() { const std::string URL_SOURCE_CODE = ""; const std::string URL_WEBSITE = ""; return CopyrightHolders( strprintf(_("Copyright (C) %i-%i"), 2009, COPYRIGHT_YEAR) + " ") + "\n" + "\n" + strprintf(_("Please contribute if you find %s useful. " "Visit %s for further information about the software."), PACKAGE_NAME, URL_WEBSITE) + "\n" + strprintf(_("The source code is available from %s."), URL_SOURCE_CODE) + "\n" + "\n" + _("This is experimental software.") + "\n" + strprintf(_("Distributed under the MIT software license, see the " "accompanying file %s or %s"), "COPYING", "") + "\n" + "\n" + strprintf(_("This product includes software developed by the " "OpenSSL Project for use in the OpenSSL Toolkit %s and " "cryptographic software written by Eric Young and UPnP " "software written by Thomas Bernard."), "") + "\n"; } static void BlockNotifyCallback(bool initialSync, const CBlockIndex *pBlockIndex) { if (initialSync || !pBlockIndex) return; std::string strCmd = GetArg("-blocknotify", ""); boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex()); boost::thread t(runCommand, strCmd); // thread runs free } static bool fHaveGenesis = false; static boost::mutex cs_GenesisWait; static CConditionVariable condvar_GenesisWait; static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex) { if (pBlockIndex != NULL) { { boost::unique_lock lock_GenesisWait(cs_GenesisWait); fHaveGenesis = true; } condvar_GenesisWait.notify_all(); } } struct CImportingNow { CImportingNow() { assert(fImporting == false); fImporting = true; } ~CImportingNow() { assert(fImporting == true); fImporting = false; } }; // If we're using -prune with -reindex, then delete block files that will be // ignored by the reindex. Since reindexing works by starting at block file 0 // and looping until a blockfile is missing, do the same here to delete any // later block files after a gap. Also delete all rev files since they'll be // rewritten by the reindex anyway. This ensures that vinfoBlockFile is in sync // with what's actually on disk by the time we start downloading, so that // pruning works correctly. void CleanupBlockRevFiles() { std::map mapBlockFiles; // Glob all blk?????.dat and rev?????.dat files from the blocks directory. // Remove the rev files immediately and insert the blk file paths into an // ordered map keyed by block file index. LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for " "-reindex with -prune\n"); boost::filesystem::path blocksdir = GetDataDir() / "blocks"; for (boost::filesystem::directory_iterator it(blocksdir); it != boost::filesystem::directory_iterator(); it++) { if (is_regular_file(*it) && it->path().filename().string().length() == 12 && it->path().filename().string().substr(8, 4) == ".dat") { if (it->path().filename().string().substr(0, 3) == "blk") mapBlockFiles[it->path().filename().string().substr(3, 5)] = it->path(); else if (it->path().filename().string().substr(0, 3) == "rev") remove(it->path()); } } // Remove all block files that aren't part of a contiguous set starting at // zero by walking the ordered map (keys are block file indices) by keeping // a separate counter. Once we hit a gap (or if 0 doesn't exist) start // removing block files. int nContigCounter = 0; for (const std::pair &item : mapBlockFiles) { if (atoi(item.first) == nContigCounter) { nContigCounter++; continue; } remove(item.second); } } void ThreadImport(const Config &config, std::vector vImportFiles) { RenameThread("bitcoin-loadblk"); { CImportingNow imp; // -reindex if (fReindex) { int nFile = 0; while (true) { CDiskBlockPos pos(nFile, 0); if (!boost::filesystem::exists(GetBlockPosFilename(pos, "blk"))) break; // No block files left to reindex FILE *file = OpenBlockFile(pos, true); if (!file) break; // This error is logged in OpenBlockFile LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); LoadExternalBlockFile(config, file, &pos); nFile++; } pblocktree->WriteReindexing(false); fReindex = false; LogPrintf("Reindexing finished\n"); // To avoid ending up in a situation without genesis block, re-try // initializing (no-op if reindexing worked): InitBlockIndex(config); } // hardcoded $DATADIR/bootstrap.dat boost::filesystem::path pathBootstrap = GetDataDir() / "bootstrap.dat"; if (boost::filesystem::exists(pathBootstrap)) { FILE *file = fopen(pathBootstrap.string().c_str(), "rb"); if (file) { boost::filesystem::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old"; LogPrintf("Importing bootstrap.dat...\n"); LoadExternalBlockFile(config, file); RenameOver(pathBootstrap, pathBootstrapOld); } else { LogPrintf("Warning: Could not open bootstrap file %s\n", pathBootstrap.string()); } } // -loadblock= for (const boost::filesystem::path &path : vImportFiles) { FILE *file = fopen(path.string().c_str(), "rb"); if (file) { LogPrintf("Importing blocks file %s...\n", path.string()); LoadExternalBlockFile(config, file); } else { LogPrintf("Warning: Could not open blocks file %s\n", path.string()); } } // scan for better chains in the block chain database, that are not yet // connected in the active best chain CValidationState state; if (!ActivateBestChain(config, state)) { LogPrintf("Failed to connect best block"); StartShutdown(); } if (GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { LogPrintf("Stopping after block import\n"); StartShutdown(); } } // End scope of CImportingNow LoadMempool(config); fDumpMempoolLater = !fRequestShutdown; } /** Sanity checks * Ensure that Bitcoin is running in a usable environment with all * necessary library support. */ bool InitSanityCheck(void) { if (!ECC_InitSanityCheck()) { InitError( "Elliptic curve cryptography sanity check failure. Aborting."); return false; } if (!glibc_sanity_test() || !glibcxx_sanity_test()) return false; return true; } static bool AppInitServers(Config &config, boost::thread_group &threadGroup) { RPCServer::OnStarted(&OnRPCStarted); RPCServer::OnStopped(&OnRPCStopped); RPCServer::OnPreCommand(&OnRPCPreCommand); if (!InitHTTPServer(config)) return false; if (!StartRPC()) return false; if (!StartHTTPRPC()) return false; if (GetBoolArg("-rest", DEFAULT_REST_ENABLE) && !StartREST()) return false; if (!StartHTTPServer()) return false; return true; } // Parameter interaction based on rules void InitParameterInteraction() { // when specifying an explicit binding address, you want to listen on it // even when -connect or -proxy is specified. if (IsArgSet("-bind")) { if (SoftSetBoolArg("-listen", true)) LogPrintf( "%s: parameter interaction: -bind set -> setting -listen=1\n", __func__); } if (IsArgSet("-whitebind")) { if (SoftSetBoolArg("-listen", true)) LogPrintf("%s: parameter interaction: -whitebind set -> setting " "-listen=1\n", __func__); } if (mapMultiArgs.count("-connect") && mapMultiArgs.at("-connect").size() > 0) { // when only connecting to trusted nodes, do not seed via DNS, or listen // by default. if (SoftSetBoolArg("-dnsseed", false)) LogPrintf("%s: parameter interaction: -connect set -> setting " "-dnsseed=0\n", __func__); if (SoftSetBoolArg("-listen", false)) LogPrintf("%s: parameter interaction: -connect set -> setting " "-listen=0\n", __func__); } if (IsArgSet("-proxy")) { // to protect privacy, do not listen by default if a default proxy // server is specified. if (SoftSetBoolArg("-listen", false)) LogPrintf( "%s: parameter interaction: -proxy set -> setting -listen=0\n", __func__); // to protect privacy, do not use UPNP when a proxy is set. The user may // still specify -listen=1 to listen locally, so don't rely on this // happening through -listen below. if (SoftSetBoolArg("-upnp", false)) LogPrintf( "%s: parameter interaction: -proxy set -> setting -upnp=0\n", __func__); // to protect privacy, do not discover addresses by default if (SoftSetBoolArg("-discover", false)) LogPrintf("%s: parameter interaction: -proxy set -> setting " "-discover=0\n", __func__); } if (!GetBoolArg("-listen", DEFAULT_LISTEN)) { // do not map ports or try to retrieve public IP when not listening // (pointless) if (SoftSetBoolArg("-upnp", false)) LogPrintf( "%s: parameter interaction: -listen=0 -> setting -upnp=0\n", __func__); if (SoftSetBoolArg("-discover", false)) LogPrintf( "%s: parameter interaction: -listen=0 -> setting -discover=0\n", __func__); if (SoftSetBoolArg("-listenonion", false)) LogPrintf("%s: parameter interaction: -listen=0 -> setting " "-listenonion=0\n", __func__); } if (IsArgSet("-externalip")) { // if an explicit public IP is specified, do not try to find others if (SoftSetBoolArg("-discover", false)) LogPrintf("%s: parameter interaction: -externalip set -> setting " "-discover=0\n", __func__); } // disable whitelistrelay in blocksonly mode if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) { if (SoftSetBoolArg("-whitelistrelay", false)) LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting " "-whitelistrelay=0\n", __func__); } // Forcing relay from whitelisted hosts implies we will accept relays from // them in the first place. if (GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { if (SoftSetBoolArg("-whitelistrelay", true)) LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> " "setting -whitelistrelay=1\n", __func__); } } static std::string ResolveErrMsg(const char *const optname, const std::string &strBind) { return strprintf(_("Cannot resolve -%s address: '%s'"), optname, strBind); } void InitLogging() { fPrintToConsole = GetBoolArg("-printtoconsole", false); fLogTimestamps = GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS); fLogTimeMicros = GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS); fLogIPs = GetBoolArg("-logips", DEFAULT_LOGIPS); LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"); LogPrintf("Bitcoin version %s\n", FormatFullVersion()); } namespace { // Variables internal to initialization process only ServiceFlags nRelevantServices = NODE_NETWORK; int nMaxConnections; int nUserMaxConnections; int nFD; ServiceFlags nLocalServices = NODE_NETWORK; } [[noreturn]] static void new_handler_terminate() { // Rather than throwing std::bad-alloc if allocation fails, terminate // immediately to (try to) avoid chain corruption. Since LogPrintf may // itself allocate memory, set the handler directly to terminate first. std::set_new_handler(std::terminate); LogPrintf("Error: Out of memory. Terminating.\n"); // The log was successful, terminate now. std::terminate(); }; bool AppInitBasicSetup() { // Step 1: setup #ifdef _MSC_VER // Turn off Microsoft heap dump noise _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, 0)); #endif #if _MSC_VER >= 1400 // Disable confusing "helpful" text message on abort, Ctrl-C _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); #endif #ifdef WIN32 // Enable Data Execution Prevention (DEP) // Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008 // A failure is non-critical and needs no further attention! #ifndef PROCESS_DEP_ENABLE // We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >= // 0x0601 (Windows 7), which is not correct. Can be removed, when GCCs winbase.h // is fixed! #define PROCESS_DEP_ENABLE 0x00000001 #endif typedef BOOL(WINAPI * PSETPROCDEPPOL)(DWORD); PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress( GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy"); if (setProcDEPPol != NULL) setProcDEPPol(PROCESS_DEP_ENABLE); #endif if (!SetupNetworking()) return InitError("Initializing networking failed"); #ifndef WIN32 if (!GetBoolArg("-sysperms", false)) { umask(077); } // Clean shutdown on SIGTERM struct sigaction sa; sa.sa_handler = HandleSIGTERM; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; sigaction(SIGTERM, &sa, NULL); sigaction(SIGINT, &sa, NULL); // Reopen debug.log on SIGHUP struct sigaction sa_hup; sa_hup.sa_handler = HandleSIGHUP; sigemptyset(&sa_hup.sa_mask); sa_hup.sa_flags = 0; sigaction(SIGHUP, &sa_hup, NULL); // Ignore SIGPIPE, otherwise it will bring the daemon down if the client // closes unexpectedly signal(SIGPIPE, SIG_IGN); #endif std::set_new_handler(new_handler_terminate); return true; } bool AppInitParameterInteraction(Config &config) { const CChainParams &chainparams = Params(); // Step 2: parameter interactions // also see: InitParameterInteraction() // if using block pruning, then disallow txindex if (GetArg("-prune", 0)) { if (GetBoolArg("-txindex", DEFAULT_TXINDEX)) return InitError(_("Prune mode is incompatible with -txindex.")); } // Make sure enough file descriptors are available int nBind = std::max( (mapMultiArgs.count("-bind") ? mapMultiArgs.at("-bind").size() : 0) + (mapMultiArgs.count("-whitebind") ? mapMultiArgs.at("-whitebind").size() : 0), size_t(1)); nUserMaxConnections = GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS); nMaxConnections = std::max(nUserMaxConnections, 0); // Trim requested connection counts, to fit into system limitations nMaxConnections = std::max(std::min(nMaxConnections, (int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS)), 0); nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS); if (nFD < MIN_CORE_FILEDESCRIPTORS) return InitError(_("Not enough file descriptors available.")); nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS, nMaxConnections); if (nMaxConnections < nUserMaxConnections) InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, " "because of system limitations."), nUserMaxConnections, nMaxConnections)); // Step 3: parameter-to-internal-flags fDebug = mapMultiArgs.count("-debug"); // Special-case: if -debug=0/-nodebug is set, turn off debugging messages if (fDebug) { const std::vector &categories = mapMultiArgs.at("-debug"); if (GetBoolArg("-nodebug", false) || find(categories.begin(), categories.end(), std::string("0")) != categories.end()) fDebug = false; } // Check for -debugnet if (GetBoolArg("-debugnet", false)) InitWarning( _("Unsupported argument -debugnet ignored, use -debug=net.")); // Check for -socks - as this is a privacy risk to continue, exit here if (IsArgSet("-socks")) return InitError( _("Unsupported argument -socks found. Setting SOCKS version isn't " "possible anymore, only SOCKS5 proxies are supported.")); // Check for -tor - as this is a privacy risk to continue, exit here if (GetBoolArg("-tor", false)) return InitError(_("Unsupported argument -tor found, use -onion.")); if (GetBoolArg("-benchmark", false)) InitWarning( _("Unsupported argument -benchmark ignored, use -debug=bench.")); if (GetBoolArg("-whitelistalwaysrelay", false)) InitWarning(_("Unsupported argument -whitelistalwaysrelay ignored, use " "-whitelistrelay and/or -whitelistforcerelay.")); if (IsArgSet("-blockminsize")) InitWarning("Unsupported argument -blockminsize ignored."); // Checkmempool and checkblockindex default to true in regtest mode int ratio = std::min( std::max(GetArg("-checkmempool", chainparams.DefaultConsistencyChecks() ? 1 : 0), 0), 1000000); if (ratio != 0) { mempool.setSanityCheck(1.0 / ratio); } fCheckBlockIndex = GetBoolArg("-checkblockindex", chainparams.DefaultConsistencyChecks()); fCheckpointsEnabled = GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED); hashAssumeValid = uint256S( GetArg("-assumevalid", chainparams.GetConsensus().defaultAssumeValid.GetHex())); if (!hashAssumeValid.IsNull()) LogPrintf("Assuming ancestors of block %s have valid signatures.\n", hashAssumeValid.GetHex()); else LogPrintf("Validating signatures for all blocks.\n"); // mempool limits int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; int64_t nMempoolSizeMin = GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40; if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin) return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(nMempoolSizeMin / 1000000.0))); // Incremental relay fee sets the minimimum feerate increase necessary for // BIP 125 replacement in the mempool and the amount the mempool min fee // increases above the feerate of txs evicted due to mempool limiting. if (IsArgSet("-incrementalrelayfee")) { CAmount n = 0; if (!ParseMoney(GetArg("-incrementalrelayfee", ""), n)) return InitError(AmountErrMsg("incrementalrelayfee", GetArg("-incrementalrelayfee", ""))); incrementalRelayFee = CFeeRate(n); } // -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency nScriptCheckThreads = GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS); if (nScriptCheckThreads <= 0) nScriptCheckThreads += GetNumCores(); if (nScriptCheckThreads <= 1) nScriptCheckThreads = 0; else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS) nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS; // Configure excessive block size. const uint64_t nProposedExcessiveBlockSize = GetArg("-excessiveblocksize", DEFAULT_MAX_BLOCK_SIZE); if (!config.SetMaxBlockSize(nProposedExcessiveBlockSize)) { return InitError( _("Excessive block size must be > 1,000,000 bytes (1MB)")); } // Check blockmaxsize does not exceed maximum accepted block size. // Also checks that it isn't smaller than 1MB, as to make sure we can // satisfy the "must be big" UAHF rule. const uint64_t nProposedMaxGeneratedBlockSize = GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE); - if (nProposedMaxGeneratedBlockSize <= LEGACY_MAX_BLOCK_SIZE || - nProposedMaxGeneratedBlockSize > config.GetMaxBlockSize()) { - return InitError( + const bool maxGeneratedBlockSizeTooSmall = + nProposedMaxGeneratedBlockSize <= LEGACY_MAX_BLOCK_SIZE; + const bool maxGeneratedBlockSizeTooBig = + nProposedMaxGeneratedBlockSize > config.GetMaxBlockSize(); + if (maxGeneratedBlockSizeTooSmall || maxGeneratedBlockSizeTooBig) { + auto msg = _("Max generated block size (blockmaxsize) cannot be lower than " - "1MB or exceed the excessive block size (excessiveblocksize)")); + "1MB or exceed the excessive block size (excessiveblocksize)"); + if (maxGeneratedBlockSizeTooBig || + !IsArgSet("-allowsmallgeneratedblocksize")) { + return InitError(msg); + } + + InitWarning(msg); } const int64_t nProposedUAHFStartTime = GetArg("-uahfstarttime", DEFAULT_UAHF_START_TIME); if (!config.SetUAHFStartTime(nProposedUAHFStartTime)) { return InitError( strprintf(_("Unable to set uahfstarttime to the value (%d)"), nProposedUAHFStartTime)); assert(nProposedUAHFStartTime == config.GetUAHFStartTime()); } // block pruning; get the amount of disk space (in MiB) to allot for block & // undo files int64_t nPruneArg = GetArg("-prune", 0); if (nPruneArg < 0) { return InitError( _("Prune cannot be configured with a negative value.")); } nPruneTarget = (uint64_t)nPruneArg * 1024 * 1024; if (nPruneArg == 1) { // manual pruning: -prune=1 LogPrintf("Block pruning enabled. Use RPC call " "pruneblockchain(height) to manually prune block and undo " "files.\n"); nPruneTarget = std::numeric_limits::max(); fPruneMode = true; } else if (nPruneTarget) { if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) { return InitError( strprintf(_("Prune configured below the minimum of %d MiB. " "Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); } LogPrintf("Prune configured to target %uMiB on disk for block and undo " "files.\n", nPruneTarget / 1024 / 1024); fPruneMode = true; } RegisterAllRPCCommands(tableRPC); #ifdef ENABLE_WALLET RegisterWalletRPCCommands(tableRPC); RegisterDumpRPCCommands(tableRPC); #endif nConnectTimeout = GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT); if (nConnectTimeout <= 0) nConnectTimeout = DEFAULT_CONNECT_TIMEOUT; // Fee-per-kilobyte amount considered the same as "free". If you are mining, // be careful setting this: if you set it to zero then a transaction spammer // can cheaply fill blocks using 1-satoshi-fee transactions. It should be // set above the real cost to you of processing a transaction. if (IsArgSet("-minrelaytxfee")) { CAmount n = 0; if (!ParseMoney(GetArg("-minrelaytxfee", ""), n) || 0 == n) return InitError( AmountErrMsg("minrelaytxfee", GetArg("-minrelaytxfee", ""))); // High fee check is done afterward in CWallet::ParameterInteraction() ::minRelayTxFee = CFeeRate(n); } else if (incrementalRelayFee > ::minRelayTxFee) { // Allow only setting incrementalRelayFee to control both ::minRelayTxFee = incrementalRelayFee; LogPrintf( "Increasing minrelaytxfee to %s to match incrementalrelayfee\n", ::minRelayTxFee.ToString()); } // Sanity check argument for min fee for including tx in block // TODO: Harmonize which arguments need sanity checking and where that // happens. if (IsArgSet("-blockmintxfee")) { CAmount n = 0; if (!ParseMoney(GetArg("-blockmintxfee", ""), n)) return InitError( AmountErrMsg("blockmintxfee", GetArg("-blockmintxfee", ""))); } // Feerate used to define dust. Shouldn't be changed lightly as old // implementations may inadvertently create non-standard transactions. if (IsArgSet("-dustrelayfee")) { CAmount n = 0; if (!ParseMoney(GetArg("-dustrelayfee", ""), n) || 0 == n) return InitError( AmountErrMsg("dustrelayfee", GetArg("-dustrelayfee", ""))); dustRelayFee = CFeeRate(n); } fRequireStandard = !GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard()); if (chainparams.RequireStandard() && !fRequireStandard) return InitError( strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString())); nBytesPerSigOp = GetArg("-bytespersigop", nBytesPerSigOp); #ifdef ENABLE_WALLET if (!CWallet::ParameterInteraction()) return false; #endif fIsBareMultisigStd = GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG); fAcceptDatacarrier = GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER); nMaxDatacarrierBytes = GetArg("-datacarriersize", nMaxDatacarrierBytes); // Option to startup with mocktime set (used for regression testing): SetMockTime(GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op if (GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS)) nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM); nMaxTipAge = GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE); if (mapMultiArgs.count("-bip9params")) { // Allow overriding BIP9 parameters for testing if (!chainparams.MineBlocksOnDemand()) { return InitError( "BIP9 parameters may only be overridden on regtest."); } const std::vector &deployments = mapMultiArgs.at("-bip9params"); for (auto i : deployments) { std::vector vDeploymentParams; boost::split(vDeploymentParams, i, boost::is_any_of(":")); if (vDeploymentParams.size() != 3) { return InitError("BIP9 parameters malformed, expecting " "deployment:start:end"); } int64_t nStartTime, nTimeout; if (!ParseInt64(vDeploymentParams[1], &nStartTime)) { return InitError( strprintf("Invalid nStartTime (%s)", vDeploymentParams[1])); } if (!ParseInt64(vDeploymentParams[2], &nTimeout)) { return InitError( strprintf("Invalid nTimeout (%s)", vDeploymentParams[2])); } bool found = false; for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { if (vDeploymentParams[0].compare( VersionBitsDeploymentInfo[j].name) == 0) { UpdateRegtestBIP9Parameters(Consensus::DeploymentPos(j), nStartTime, nTimeout); found = true; LogPrintf("Setting BIP9 activation parameters for %s to " "start=%ld, timeout=%ld\n", vDeploymentParams[0], nStartTime, nTimeout); break; } } if (!found) { return InitError( strprintf("Invalid deployment (%s)", vDeploymentParams[0])); } } } return true; } static bool LockDataDirectory(bool probeOnly) { std::string strDataDir = GetDataDir().string(); // Make sure only a single Bitcoin process is using the data directory. boost::filesystem::path pathLockFile = GetDataDir() / ".lock"; // empty lock file; created if it doesn't exist. FILE *file = fopen(pathLockFile.string().c_str(), "a"); if (file) fclose(file); try { static boost::interprocess::file_lock lock( pathLockFile.string().c_str()); if (!lock.try_lock()) { return InitError( strprintf(_("Cannot obtain a lock on data directory %s. %s is " "probably already running."), strDataDir, _(PACKAGE_NAME))); } if (probeOnly) { lock.unlock(); } } catch (const boost::interprocess::interprocess_exception &e) { return InitError(strprintf(_("Cannot obtain a lock on data directory " "%s. %s is probably already running.") + " %s.", strDataDir, _(PACKAGE_NAME), e.what())); } return true; } bool AppInitSanityChecks() { // Step 4: sanity checks // Initialize elliptic curve code ECC_Start(); globalVerifyHandle.reset(new ECCVerifyHandle()); // Sanity check if (!InitSanityCheck()) return InitError(strprintf( _("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME))); // Probe the data directory lock to give an early error message, if possible return LockDataDirectory(true); } bool AppInitMain(Config &config, boost::thread_group &threadGroup, CScheduler &scheduler) { const CChainParams &chainparams = Params(); // Step 4a: application initialization // After daemonization get the data directory lock again and hold on to it // until exit. This creates a slight window for a race condition to happen, // however this condition is harmless: it will at most make us exit without // printing a message to console. if (!LockDataDirectory(false)) { // Detailed error printed inside LockDataDirectory return false; } #ifndef WIN32 CreatePidFile(GetPidFile(), getpid()); #endif if (GetBoolArg("-shrinkdebugfile", !fDebug)) { // Do this first since it both loads a bunch of debug.log into memory, // and because this needs to happen before any other debug.log printing. ShrinkDebugFile(); } if (fPrintToDebugLog) OpenDebugLog(); if (!fLogTimestamps) LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime())); LogPrintf("Default data directory %s\n", GetDefaultDataDir().string()); LogPrintf("Using data directory %s\n", GetDataDir().string()); LogPrintf("Using config file %s\n", GetConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME)).string()); LogPrintf("Using at most %i automatic connections (%i file descriptors " "available)\n", nMaxConnections, nFD); InitSignatureCache(); LogPrintf("Using %u threads for script verification\n", nScriptCheckThreads); if (nScriptCheckThreads) { for (int i = 0; i < nScriptCheckThreads - 1; i++) threadGroup.create_thread(&ThreadScriptCheck); } // Start the lightweight task scheduler thread CScheduler::Function serviceLoop = boost::bind(&CScheduler::serviceQueue, &scheduler); threadGroup.create_thread(boost::bind(&TraceThread, "scheduler", serviceLoop)); /* Start the RPC server already. It will be started in "warmup" mode * and not really process calls already (but it will signify connections * that the server is there and will be ready later). Warmup mode will * be disabled when initialisation is finished. */ if (GetBoolArg("-server", false)) { uiInterface.InitMessage.connect(SetRPCWarmupStatus); if (!AppInitServers(config, threadGroup)) return InitError( _("Unable to start HTTP server. See debug log for details.")); } int64_t nStart; // Step 5: verify wallet database integrity #ifdef ENABLE_WALLET if (!CWallet::Verify()) return false; #endif // Step 6: network initialization // Note that we absolutely cannot open any actual connections // until the very end ("start node") as the UTXO/block state // is not yet setup and may end up being set up twice if we // need to reindex later. assert(!g_connman); g_connman = std::unique_ptr( new CConnman(GetRand(std::numeric_limits::max()), GetRand(std::numeric_limits::max()))); CConnman &connman = *g_connman; peerLogic.reset(new PeerLogicValidation(&connman)); RegisterValidationInterface(peerLogic.get()); RegisterNodeSignals(GetNodeSignals()); if (mapMultiArgs.count("-onlynet")) { std::set nets; for (const std::string &snet : mapMultiArgs.at("-onlynet")) { enum Network net = ParseNetwork(snet); if (net == NET_UNROUTABLE) return InitError(strprintf( _("Unknown network specified in -onlynet: '%s'"), snet)); nets.insert(net); } for (int n = 0; n < NET_MAX; n++) { enum Network net = (enum Network)n; if (!nets.count(net)) SetLimited(net); } } if (mapMultiArgs.count("-whitelist")) { for (const std::string &net : mapMultiArgs.at("-whitelist")) { CSubNet subnet; LookupSubNet(net.c_str(), subnet); if (!subnet.IsValid()) return InitError(strprintf( _("Invalid netmask specified in -whitelist: '%s'"), net)); connman.AddWhitelistedRange(subnet); } } bool proxyRandomize = GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE); // -proxy sets a proxy for all outgoing network traffic // -noproxy (or -proxy=0) as well as the empty string can be used to not set // a proxy, this is the default std::string proxyArg = GetArg("-proxy", ""); SetLimited(NET_TOR); if (proxyArg != "" && proxyArg != "0") { CService resolved(LookupNumeric(proxyArg.c_str(), 9050)); proxyType addrProxy = proxyType(resolved, proxyRandomize); if (!addrProxy.IsValid()) return InitError( strprintf(_("Invalid -proxy address: '%s'"), proxyArg)); SetProxy(NET_IPV4, addrProxy); SetProxy(NET_IPV6, addrProxy); SetProxy(NET_TOR, addrProxy); SetNameProxy(addrProxy); SetLimited(NET_TOR, false); // by default, -proxy sets onion as // reachable, unless -noonion later } // -onion can be used to set only a proxy for .onion, or override normal // proxy for .onion addresses. // -noonion (or -onion=0) disables connecting to .onion entirely. An empty // string is used to not override the onion proxy (in which case it defaults // to -proxy set above, or none) std::string onionArg = GetArg("-onion", ""); if (onionArg != "") { if (onionArg == "0") { // Handle -noonion/-onion=0 SetLimited(NET_TOR); // set onions as unreachable } else { CService resolved(LookupNumeric(onionArg.c_str(), 9050)); proxyType addrOnion = proxyType(resolved, proxyRandomize); if (!addrOnion.IsValid()) return InitError( strprintf(_("Invalid -onion address: '%s'"), onionArg)); SetProxy(NET_TOR, addrOnion); SetLimited(NET_TOR, false); } } // see Step 2: parameter interactions for more information about these fListen = GetBoolArg("-listen", DEFAULT_LISTEN); fDiscover = GetBoolArg("-discover", true); fNameLookup = GetBoolArg("-dns", DEFAULT_NAME_LOOKUP); fRelayTxes = !GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY); if (fListen) { bool fBound = false; if (mapMultiArgs.count("-bind")) { for (const std::string &strBind : mapMultiArgs.at("-bind")) { CService addrBind; if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) return InitError(ResolveErrMsg("bind", strBind)); fBound |= Bind(connman, addrBind, (BF_EXPLICIT | BF_REPORT_ERROR)); } } if (mapMultiArgs.count("-whitebind")) { for (const std::string &strBind : mapMultiArgs.at("-whitebind")) { CService addrBind; if (!Lookup(strBind.c_str(), addrBind, 0, false)) return InitError(ResolveErrMsg("whitebind", strBind)); if (addrBind.GetPort() == 0) return InitError(strprintf( _("Need to specify a port with -whitebind: '%s'"), strBind)); fBound |= Bind(connman, addrBind, (BF_EXPLICIT | BF_REPORT_ERROR | BF_WHITELIST)); } } if (!mapMultiArgs.count("-bind") && !mapMultiArgs.count("-whitebind")) { struct in_addr inaddr_any; inaddr_any.s_addr = INADDR_ANY; fBound |= Bind(connman, CService(in6addr_any, GetListenPort()), BF_NONE); fBound |= Bind(connman, CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE); } if (!fBound) return InitError(_("Failed to listen on any port. Use -listen=0 if " "you want this.")); } if (mapMultiArgs.count("-externalip")) { for (const std::string &strAddr : mapMultiArgs.at("-externalip")) { CService addrLocal; if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid()) AddLocal(addrLocal, LOCAL_MANUAL); else return InitError(ResolveErrMsg("externalip", strAddr)); } } if (mapMultiArgs.count("-seednode")) { for (const std::string &strDest : mapMultiArgs.at("-seednode")) { connman.AddOneShot(strDest); } } #if ENABLE_ZMQ pzmqNotificationInterface = CZMQNotificationInterface::Create(); if (pzmqNotificationInterface) { RegisterValidationInterface(pzmqNotificationInterface); } #endif // unlimited unless -maxuploadtarget is set uint64_t nMaxOutboundLimit = 0; uint64_t nMaxOutboundTimeframe = MAX_UPLOAD_TIMEFRAME; if (IsArgSet("-maxuploadtarget")) { nMaxOutboundLimit = GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET) * 1024 * 1024; } // Step 7: load block chain fReindex = GetBoolArg("-reindex", false); bool fReindexChainState = GetBoolArg("-reindex-chainstate", false); // Upgrading to 0.8; hard-link the old blknnnn.dat files into /blocks/ boost::filesystem::path blocksDir = GetDataDir() / "blocks"; if (!boost::filesystem::exists(blocksDir)) { boost::filesystem::create_directories(blocksDir); bool linked = false; for (unsigned int i = 1; i < 10000; i++) { boost::filesystem::path source = GetDataDir() / strprintf("blk%04u.dat", i); if (!boost::filesystem::exists(source)) break; boost::filesystem::path dest = blocksDir / strprintf("blk%05u.dat", i - 1); try { boost::filesystem::create_hard_link(source, dest); LogPrintf("Hardlinked %s -> %s\n", source.string(), dest.string()); linked = true; } catch (const boost::filesystem::filesystem_error &e) { // Note: hardlink creation failing is not a disaster, it just // means blocks will get re-downloaded from peers. LogPrintf("Error hardlinking blk%04u.dat: %s\n", i, e.what()); break; } } if (linked) { fReindex = true; } } // cache size calculations int64_t nTotalCache = (GetArg("-dbcache", nDefaultDbCache) << 20); // total cache cannot be less than nMinDbCache nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be greater than nMaxDbcache nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); int64_t nBlockTreeDBCache = nTotalCache / 8; nBlockTreeDBCache = std::min(nBlockTreeDBCache, (GetBoolArg("-txindex", DEFAULT_TXINDEX) ? nMaxBlockDBAndTxIndexCache : nMaxBlockDBCache) << 20); nTotalCache -= nBlockTreeDBCache; // use 25%-50% of the remainder for disk cache int64_t nCoinDBCache = std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23)); // cap total coins db cache nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20); nTotalCache -= nCoinDBCache; // the rest goes to in-memory cache nCoinCacheUsage = nTotalCache; int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; LogPrintf("Cache configuration:\n"); LogPrintf("* Using %.1fMiB for block index database\n", nBlockTreeDBCache * (1.0 / 1024 / 1024)); LogPrintf("* Using %.1fMiB for chain state database\n", nCoinDBCache * (1.0 / 1024 / 1024)); LogPrintf("* Using %.1fMiB for in-memory UTXO set (plus up to %.1fMiB of " "unused mempool space)\n", nCoinCacheUsage * (1.0 / 1024 / 1024), nMempoolSizeMax * (1.0 / 1024 / 1024)); bool fLoaded = false; while (!fLoaded) { bool fReset = fReindex; std::string strLoadError; uiInterface.InitMessage(_("Loading block index...")); nStart = GetTimeMillis(); do { try { UnloadBlockIndex(); delete pcoinsTip; delete pcoinsdbview; delete pcoinscatcher; delete pblocktree; pblocktree = new CBlockTreeDB(nBlockTreeDBCache, false, fReindex); pcoinsdbview = new CCoinsViewDB(nCoinDBCache, false, fReindex || fReindexChainState); pcoinscatcher = new CCoinsViewErrorCatcher(pcoinsdbview); pcoinsTip = new CCoinsViewCache(pcoinscatcher); if (fReindex) { pblocktree->WriteReindexing(true); // If we're reindexing in prune mode, wipe away unusable // block files and all undo data files if (fPruneMode) CleanupBlockRevFiles(); } if (!LoadBlockIndex(chainparams)) { strLoadError = _("Error loading block database"); break; } // If the loaded chain has a wrong genesis, bail out immediately // (we're likely using a testnet datadir, or the other way // around). if (!mapBlockIndex.empty() && mapBlockIndex.count( chainparams.GetConsensus().hashGenesisBlock) == 0) return InitError(_("Incorrect or no genesis block found. " "Wrong datadir for network?")); // Initialize the block index (no-op if non-empty database was // already loaded) if (!InitBlockIndex(config)) { strLoadError = _("Error initializing block database"); break; } // Check for changed -txindex state if (fTxIndex != GetBoolArg("-txindex", DEFAULT_TXINDEX)) { strLoadError = _("You need to rebuild the database using " "-reindex-chainstate to change -txindex"); break; } // Check for changed -prune state. What we are concerned about // is a user who has pruned blocks in the past, but is now // trying to run unpruned. if (fHavePruned && !fPruneMode) { strLoadError = _("You need to rebuild the database using -reindex to " "go back to unpruned mode. This will redownload the " "entire blockchain"); break; } if (!fReindex && chainActive.Tip() != NULL) { uiInterface.InitMessage(_("Rewinding blocks...")); if (!RewindBlockIndex(config, chainparams)) { strLoadError = _("Unable to rewind the database to a " "pre-fork state. You will need to " "redownload the blockchain"); break; } } uiInterface.InitMessage(_("Verifying blocks...")); if (fHavePruned && GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) { LogPrintf("Prune: pruned datadir may not have more than %d " "blocks; only checking available blocks", MIN_BLOCKS_TO_KEEP); } { LOCK(cs_main); CBlockIndex *tip = chainActive.Tip(); RPCNotifyBlockChange(true, tip); if (tip && tip->nTime > GetAdjustedTime() + 2 * 60 * 60) { strLoadError = _("The block database contains a block which " "appears to be from the future. " "This may be due to your computer's date and " "time being set incorrectly. " "Only rebuild the block database if you are sure " "that your computer's date and time are correct"); break; } } if (!CVerifyDB().VerifyDB( config, chainparams, pcoinsdbview, GetArg("-checklevel", DEFAULT_CHECKLEVEL), GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) { strLoadError = _("Corrupted block database detected"); break; } } catch (const std::exception &e) { if (fDebug) LogPrintf("%s\n", e.what()); strLoadError = _("Error opening block database"); break; } fLoaded = true; } while (false); if (!fLoaded) { // first suggest a reindex if (!fReset) { bool fRet = uiInterface.ThreadSafeQuestion( strLoadError + ".\n\n" + _("Do you want to rebuild the block database now?"), strLoadError + ".\nPlease restart with -reindex or " "-reindex-chainstate to recover.", "", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT); if (fRet) { fReindex = true; fRequestShutdown = false; } else { LogPrintf("Aborted block database rebuild. Exiting.\n"); return false; } } else { return InitError(strLoadError); } } } // As LoadBlockIndex can take several minutes, it's possible the user // requested to kill the GUI during the last operation. If so, exit. // As the program has not fully started yet, Shutdown() is possibly // overkill. if (fRequestShutdown) { LogPrintf("Shutdown requested. Exiting.\n"); return false; } LogPrintf(" block index %15dms\n", GetTimeMillis() - nStart); boost::filesystem::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME; CAutoFile est_filein(fopen(est_path.string().c_str(), "rb"), SER_DISK, CLIENT_VERSION); // Allowed to fail as this file IS missing on first startup. if (!est_filein.IsNull()) mempool.ReadFeeEstimates(est_filein); fFeeEstimatesInitialized = true; // Step 8: load wallet #ifdef ENABLE_WALLET if (!CWallet::InitLoadWallet()) return false; #else LogPrintf("No wallet support compiled in!\n"); #endif // Step 9: data directory maintenance // if pruning, unset the service bit and perform the initial blockstore // prune after any wallet rescanning has taken place. if (fPruneMode) { LogPrintf("Unsetting NODE_NETWORK on prune mode\n"); nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK); if (!fReindex) { uiInterface.InitMessage(_("Pruning blockstore...")); PruneAndFlush(); } } // Step 10: import blocks if (!CheckDiskSpace()) return false; // Either install a handler to notify us when genesis activates, or set // fHaveGenesis directly. // No locking, as this happens before any background thread is started. if (chainActive.Tip() == NULL) { uiInterface.NotifyBlockTip.connect(BlockNotifyGenesisWait); } else { fHaveGenesis = true; } if (IsArgSet("-blocknotify")) uiInterface.NotifyBlockTip.connect(BlockNotifyCallback); std::vector vImportFiles; if (mapMultiArgs.count("-loadblock")) { for (const std::string &strFile : mapMultiArgs.at("-loadblock")) { vImportFiles.push_back(strFile); } } threadGroup.create_thread( boost::bind(&ThreadImport, boost::ref(config), vImportFiles)); // Wait for genesis block to be processed { boost::unique_lock lock(cs_GenesisWait); while (!fHaveGenesis) { condvar_GenesisWait.wait(lock); } uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait); } // Step 11: start node //// debug print LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size()); LogPrintf("nBestHeight = %d\n", chainActive.Height()); if (GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) StartTorControl(threadGroup, scheduler); Discover(threadGroup); // Map ports with UPnP MapPort(GetBoolArg("-upnp", DEFAULT_UPNP)); std::string strNodeError; CConnman::Options connOptions; connOptions.nLocalServices = nLocalServices; connOptions.nRelevantServices = nRelevantServices; connOptions.nMaxConnections = nMaxConnections; connOptions.nMaxOutbound = std::min(MAX_OUTBOUND_CONNECTIONS, connOptions.nMaxConnections); connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS; connOptions.nMaxFeeler = 1; connOptions.nBestHeight = chainActive.Height(); connOptions.uiInterface = &uiInterface; connOptions.nSendBufferMaxSize = 1000 * GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER); connOptions.nReceiveFloodSize = 1000 * GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER); connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe; connOptions.nMaxOutboundLimit = nMaxOutboundLimit; if (!connman.Start(scheduler, strNodeError, connOptions)) return InitError(strNodeError); // Step 12: finished SetRPCWarmupFinished(); uiInterface.InitMessage(_("Done loading")); #ifdef ENABLE_WALLET if (pwalletMain) pwalletMain->postInitProcess(threadGroup); #endif return !fRequestShutdown; }