Changeset View
Changeset View
Standalone View
Standalone View
qa/rpc-tests/sendheaders.py
#!/usr/bin/env python3 | #!/usr/bin/env python3 | ||||
# Copyright (c) 2014-2016 The Bitcoin Core developers | # Copyright (c) 2014-2016 The Bitcoin Core developers | ||||
# Distributed under the MIT software license, see the accompanying | # Distributed under the MIT software license, see the accompanying | ||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php. | # file COPYING or http://www.opensource.org/licenses/mit-license.php. | ||||
from test_framework.mininode import * | from test_framework.mininode import * | ||||
from test_framework.test_framework import BitcoinTestFramework | from test_framework.test_framework import BitcoinTestFramework | ||||
from test_framework.util import * | from test_framework.util import * | ||||
from test_framework.blocktools import create_block, create_coinbase | from test_framework.blocktools import create_block, create_coinbase | ||||
''' | ''' | ||||
SendHeadersTest -- test behavior of headers messages to announce blocks. | SendHeadersTest -- test behavior of headers messages to announce blocks. | ||||
Setup: | Setup: | ||||
- Two nodes, two p2p connections to node0. One p2p connection should only ever | - Two nodes, two p2p connections to node0. One p2p connection should only ever | ||||
receive inv's (omitted from testing description below, this is our control). | receive inv's (omitted from testing description below, this is our control). | ||||
Second node is used for creating reorgs. | Second node is used for creating reorgs. | ||||
Part 1: No headers announcements before "sendheaders" | Part 1: No headers announcements before "sendheaders" | ||||
a. node mines a block [expect: inv] | a. node mines a block [expect: inv] | ||||
send getdata for the block [expect: block] | send getdata for the block [expect: block] | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | |||||
d. Announce 49 headers that don't connect. | d. Announce 49 headers that don't connect. | ||||
Expect: getheaders message each time. | Expect: getheaders message each time. | ||||
e. Announce one more that doesn't connect. | e. Announce one more that doesn't connect. | ||||
Expect: disconnect. | Expect: disconnect. | ||||
''' | ''' | ||||
direct_fetch_response_time = 0.05 | direct_fetch_response_time = 0.05 | ||||
class BaseNode(SingleNodeConnCB): | class BaseNode(SingleNodeConnCB): | ||||
def __init__(self): | def __init__(self): | ||||
SingleNodeConnCB.__init__(self) | SingleNodeConnCB.__init__(self) | ||||
self.last_inv = None | self.last_inv = None | ||||
self.last_headers = None | self.last_headers = None | ||||
self.last_block = None | self.last_block = None | ||||
self.last_getdata = None | self.last_getdata = None | ||||
self.block_announced = False | self.block_announced = False | ||||
self.last_getheaders = None | self.last_getheaders = None | ||||
▲ Show 20 Lines • Show All 65 Lines • ▼ Show 20 Lines | def check_last_announcement(self, headers=None, inv=None): | ||||
if self.last_inv != None: | if self.last_inv != None: | ||||
compare_inv = [x.hash for x in self.last_inv.inv] | compare_inv = [x.hash for x in self.last_inv.inv] | ||||
if compare_inv != expect_inv: | if compare_inv != expect_inv: | ||||
success = False | success = False | ||||
hash_headers = [] | hash_headers = [] | ||||
if self.last_headers != None: | if self.last_headers != None: | ||||
# treat headers as a list of block hashes | # treat headers as a list of block hashes | ||||
hash_headers = [ x.sha256 for x in self.last_headers.headers ] | hash_headers = [x.sha256 for x in self.last_headers.headers] | ||||
if hash_headers != expect_headers: | if hash_headers != expect_headers: | ||||
success = False | success = False | ||||
self.last_inv = None | self.last_inv = None | ||||
self.last_headers = None | self.last_headers = None | ||||
return success | return success | ||||
# Syncing helpers | # Syncing helpers | ||||
def wait_for_block(self, blockhash, timeout=60): | def wait_for_block(self, blockhash, timeout=60): | ||||
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash | test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash | ||||
assert(wait_until(test_function, timeout=timeout)) | assert(wait_until(test_function, timeout=timeout)) | ||||
return | return | ||||
def wait_for_getheaders(self, timeout=60): | def wait_for_getheaders(self, timeout=60): | ||||
test_function = lambda: self.last_getheaders != None | test_function = lambda: self.last_getheaders != None | ||||
assert(wait_until(test_function, timeout=timeout)) | assert(wait_until(test_function, timeout=timeout)) | ||||
return | return | ||||
def wait_for_getdata(self, hash_list, timeout=60): | def wait_for_getdata(self, hash_list, timeout=60): | ||||
if hash_list == []: | if hash_list == []: | ||||
return | return | ||||
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list | test_function = lambda: self.last_getdata != None and [ | ||||
x.hash for x in self.last_getdata.inv] == hash_list | |||||
assert(wait_until(test_function, timeout=timeout)) | assert(wait_until(test_function, timeout=timeout)) | ||||
return | return | ||||
def wait_for_disconnect(self, timeout=60): | def wait_for_disconnect(self, timeout=60): | ||||
test_function = lambda: self.disconnected | test_function = lambda: self.disconnected | ||||
assert(wait_until(test_function, timeout=timeout)) | assert(wait_until(test_function, timeout=timeout)) | ||||
return | return | ||||
def wait_for_block_announcement(self, block_hash, timeout=60): | def wait_for_block_announcement(self, block_hash, timeout=60): | ||||
test_function = lambda: self.last_blockhash_announced == block_hash | test_function = lambda: self.last_blockhash_announced == block_hash | ||||
assert(wait_until(test_function, timeout=timeout)) | assert(wait_until(test_function, timeout=timeout)) | ||||
return | return | ||||
def send_header_for_blocks(self, new_blocks): | def send_header_for_blocks(self, new_blocks): | ||||
headers_message = msg_headers() | headers_message = msg_headers() | ||||
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ] | headers_message.headers = [CBlockHeader(b) for b in new_blocks] | ||||
self.send_message(headers_message) | self.send_message(headers_message) | ||||
def send_getblocks(self, locator): | def send_getblocks(self, locator): | ||||
getblocks_message = msg_getblocks() | getblocks_message = msg_getblocks() | ||||
getblocks_message.locator.vHave = locator | getblocks_message.locator.vHave = locator | ||||
self.send_message(getblocks_message) | self.send_message(getblocks_message) | ||||
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a | # InvNode: This peer should only ever receive inv's, because it doesn't ever send a | ||||
# "sendheaders" message. | # "sendheaders" message. | ||||
class InvNode(BaseNode): | class InvNode(BaseNode): | ||||
def __init__(self): | def __init__(self): | ||||
BaseNode.__init__(self) | BaseNode.__init__(self) | ||||
# TestNode: This peer is the one we use for most of the testing. | # TestNode: This peer is the one we use for most of the testing. | ||||
class TestNode(BaseNode): | class TestNode(BaseNode): | ||||
def __init__(self): | def __init__(self): | ||||
BaseNode.__init__(self) | BaseNode.__init__(self) | ||||
class SendHeadersTest(BitcoinTestFramework): | class SendHeadersTest(BitcoinTestFramework): | ||||
def __init__(self): | def __init__(self): | ||||
super().__init__() | super().__init__() | ||||
self.setup_clean_chain = True | self.setup_clean_chain = True | ||||
self.num_nodes = 2 | self.num_nodes = 2 | ||||
def setup_network(self): | def setup_network(self): | ||||
self.nodes = [] | self.nodes = [] | ||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2) | self.nodes = start_nodes( | ||||
self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]] * 2) | |||||
connect_nodes(self.nodes[0], 1) | connect_nodes(self.nodes[0], 1) | ||||
# mine count blocks and return the new tip | # mine count blocks and return the new tip | ||||
def mine_blocks(self, count): | def mine_blocks(self, count): | ||||
# Clear out last block announcement from each p2p listener | # Clear out last block announcement from each p2p listener | ||||
[ x.clear_last_announcement() for x in self.p2p_connections ] | [x.clear_last_announcement() for x in self.p2p_connections] | ||||
self.nodes[0].generate(count) | self.nodes[0].generate(count) | ||||
return int(self.nodes[0].getbestblockhash(), 16) | return int(self.nodes[0].getbestblockhash(), 16) | ||||
# mine a reorg that invalidates length blocks (replacing them with | # mine a reorg that invalidates length blocks (replacing them with | ||||
# length+1 blocks). | # length+1 blocks). | ||||
# Note: we clear the state of our p2p connections after the | # Note: we clear the state of our p2p connections after the | ||||
# to-be-reorged-out blocks are mined, so that we don't break later tests. | # to-be-reorged-out blocks are mined, so that we don't break later tests. | ||||
# return the list of block hashes newly mined | # return the list of block hashes newly mined | ||||
def mine_reorg(self, length): | def mine_reorg(self, length): | ||||
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's | self.nodes[0].generate( | ||||
length) # make sure all invalidated blocks are node0's | |||||
sync_blocks(self.nodes, wait=0.1) | sync_blocks(self.nodes, wait=0.1) | ||||
for x in self.p2p_connections: | for x in self.p2p_connections: | ||||
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16)) | x.wait_for_block_announcement( | ||||
int(self.nodes[0].getbestblockhash(), 16)) | |||||
x.clear_last_announcement() | x.clear_last_announcement() | ||||
tip_height = self.nodes[1].getblockcount() | tip_height = self.nodes[1].getblockcount() | ||||
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1)) | hash_to_invalidate = self.nodes[ | ||||
1].getblockhash(tip_height - (length - 1)) | |||||
self.nodes[1].invalidateblock(hash_to_invalidate) | self.nodes[1].invalidateblock(hash_to_invalidate) | ||||
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain | all_hashes = self.nodes[1].generate( | ||||
length + 1) # Must be longer than the orig chain | |||||
sync_blocks(self.nodes, wait=0.1) | sync_blocks(self.nodes, wait=0.1) | ||||
return [int(x, 16) for x in all_hashes] | return [int(x, 16) for x in all_hashes] | ||||
def run_test(self): | def run_test(self): | ||||
# Setup the p2p connections and start up the network thread. | # Setup the p2p connections and start up the network thread. | ||||
inv_node = InvNode() | inv_node = InvNode() | ||||
test_node = TestNode() | test_node = TestNode() | ||||
self.p2p_connections = [inv_node, test_node] | self.p2p_connections = [inv_node, test_node] | ||||
connections = [] | connections = [] | ||||
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node)) | connections.append( | ||||
NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node)) | |||||
# Set nServices to 0 for test_node, so no block download will occur outside of | # Set nServices to 0 for test_node, so no block download will occur outside of | ||||
# direct fetching | # direct fetching | ||||
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0)) | connections.append( | ||||
NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0)) | |||||
inv_node.add_connection(connections[0]) | inv_node.add_connection(connections[0]) | ||||
test_node.add_connection(connections[1]) | test_node.add_connection(connections[1]) | ||||
NetworkThread().start() # Start up network handling in another thread | NetworkThread().start() # Start up network handling in another thread | ||||
# Test logic begins here | # Test logic begins here | ||||
inv_node.wait_for_verack() | inv_node.wait_for_verack() | ||||
test_node.wait_for_verack() | test_node.wait_for_verack() | ||||
tip = int(self.nodes[0].getbestblockhash(), 16) | tip = int(self.nodes[0].getbestblockhash(), 16) | ||||
# PART 1 | # PART 1 | ||||
# 1. Mine a block; expect inv announcements each time | # 1. Mine a block; expect inv announcements each time | ||||
print("Part 1: headers don't start before sendheaders message...") | print("Part 1: headers don't start before sendheaders message...") | ||||
for i in range(4): | for i in range(4): | ||||
old_tip = tip | old_tip = tip | ||||
tip = self.mine_blocks(1) | tip = self.mine_blocks(1) | ||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | ||||
assert_equal(test_node.check_last_announcement(inv=[tip]), True) | assert_equal(test_node.check_last_announcement(inv=[tip]), True) | ||||
# Try a few different responses; none should affect next announcement | # Try a few different responses; none should affect next | ||||
# announcement | |||||
if i == 0: | if i == 0: | ||||
# first request the block | # first request the block | ||||
test_node.get_data([tip]) | test_node.get_data([tip]) | ||||
test_node.wait_for_block(tip, timeout=5) | test_node.wait_for_block(tip, timeout=5) | ||||
elif i == 1: | elif i == 1: | ||||
# next try requesting header and block | # next try requesting header and block | ||||
test_node.get_headers(locator=[old_tip], hashstop=tip) | test_node.get_headers(locator=[old_tip], hashstop=tip) | ||||
test_node.get_data([tip]) | test_node.get_data([tip]) | ||||
test_node.wait_for_block(tip) | test_node.wait_for_block(tip) | ||||
test_node.clear_last_announcement() # since we requested headers... | test_node.clear_last_announcement( | ||||
) # since we requested headers... | |||||
elif i == 2: | elif i == 2: | ||||
# this time announce own block via headers | # this time announce own block via headers | ||||
height = self.nodes[0].getblockcount() | height = self.nodes[0].getblockcount() | ||||
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] | last_time = self.nodes[0].getblock( | ||||
self.nodes[0].getbestblockhash())['time'] | |||||
block_time = last_time + 1 | block_time = last_time + 1 | ||||
new_block = create_block(tip, create_coinbase(height+1), block_time) | new_block = create_block( | ||||
tip, create_coinbase(height + 1), block_time) | |||||
new_block.solve() | new_block.solve() | ||||
test_node.send_header_for_blocks([new_block]) | test_node.send_header_for_blocks([new_block]) | ||||
test_node.wait_for_getdata([new_block.sha256], timeout=5) | test_node.wait_for_getdata([new_block.sha256], timeout=5) | ||||
test_node.send_message(msg_block(new_block)) | test_node.send_message(msg_block(new_block)) | ||||
test_node.sync_with_ping() # make sure this block is processed | test_node.sync_with_ping() # make sure this block is processed | ||||
inv_node.clear_last_announcement() | inv_node.clear_last_announcement() | ||||
test_node.clear_last_announcement() | test_node.clear_last_announcement() | ||||
print("Part 1: success!") | print("Part 1: success!") | ||||
print("Part 2: announce blocks with headers after sendheaders message...") | print( | ||||
"Part 2: announce blocks with headers after sendheaders message...") | |||||
# PART 2 | # PART 2 | ||||
# 2. Send a sendheaders message and test that headers announcements | # 2. Send a sendheaders message and test that headers announcements | ||||
# commence and keep working. | # commence and keep working. | ||||
test_node.send_message(msg_sendheaders()) | test_node.send_message(msg_sendheaders()) | ||||
prev_tip = int(self.nodes[0].getbestblockhash(), 16) | prev_tip = int(self.nodes[0].getbestblockhash(), 16) | ||||
test_node.get_headers(locator=[prev_tip], hashstop=0) | test_node.get_headers(locator=[prev_tip], hashstop=0) | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
# Now that we've synced headers, headers announcements should work | # Now that we've synced headers, headers announcements should work | ||||
tip = self.mine_blocks(1) | tip = self.mine_blocks(1) | ||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | ||||
assert_equal(test_node.check_last_announcement(headers=[tip]), True) | assert_equal(test_node.check_last_announcement(headers=[tip]), True) | ||||
height = self.nodes[0].getblockcount()+1 | height = self.nodes[0].getblockcount() + 1 | ||||
block_time += 10 # Advance far enough ahead | block_time += 10 # Advance far enough ahead | ||||
for i in range(10): | for i in range(10): | ||||
# Mine i blocks, and alternate announcing either via | # Mine i blocks, and alternate announcing either via | ||||
# inv (of tip) or via headers. After each, new blocks | # inv (of tip) or via headers. After each, new blocks | ||||
# mined by the node should successfully be announced | # mined by the node should successfully be announced | ||||
# with block header, even though the blocks are never requested | # with block header, even though the blocks are never requested | ||||
for j in range(2): | for j in range(2): | ||||
blocks = [] | blocks = [] | ||||
for b in range(i+1): | for b in range(i + 1): | ||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) | blocks.append( | ||||
create_block(tip, create_coinbase(height), block_time)) | |||||
blocks[-1].solve() | blocks[-1].solve() | ||||
tip = blocks[-1].sha256 | tip = blocks[-1].sha256 | ||||
block_time += 1 | block_time += 1 | ||||
height += 1 | height += 1 | ||||
if j == 0: | if j == 0: | ||||
# Announce via inv | # Announce via inv | ||||
test_node.send_block_inv(tip) | test_node.send_block_inv(tip) | ||||
test_node.wait_for_getheaders(timeout=5) | test_node.wait_for_getheaders(timeout=5) | ||||
# Should have received a getheaders now | # Should have received a getheaders now | ||||
test_node.send_header_for_blocks(blocks) | test_node.send_header_for_blocks(blocks) | ||||
# Test that duplicate inv's won't result in duplicate | # Test that duplicate inv's won't result in duplicate | ||||
# getdata requests, or duplicate headers announcements | # getdata requests, or duplicate headers announcements | ||||
[ inv_node.send_block_inv(x.sha256) for x in blocks ] | [inv_node.send_block_inv(x.sha256) for x in blocks] | ||||
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5) | test_node.wait_for_getdata( | ||||
[x.sha256 for x in blocks], timeout=5) | |||||
inv_node.sync_with_ping() | inv_node.sync_with_ping() | ||||
else: | else: | ||||
# Announce via headers | # Announce via headers | ||||
test_node.send_header_for_blocks(blocks) | test_node.send_header_for_blocks(blocks) | ||||
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5) | test_node.wait_for_getdata( | ||||
[x.sha256 for x in blocks], timeout=5) | |||||
# Test that duplicate headers won't result in duplicate | # Test that duplicate headers won't result in duplicate | ||||
# getdata requests (the check is further down) | # getdata requests (the check is further down) | ||||
inv_node.send_header_for_blocks(blocks) | inv_node.send_header_for_blocks(blocks) | ||||
inv_node.sync_with_ping() | inv_node.sync_with_ping() | ||||
[ test_node.send_message(msg_block(x)) for x in blocks ] | [test_node.send_message(msg_block(x)) for x in blocks] | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
inv_node.sync_with_ping() | inv_node.sync_with_ping() | ||||
# This block should not be announced to the inv node (since it also | # This block should not be announced to the inv node (since it also | ||||
# broadcast it) | # broadcast it) | ||||
assert_equal(inv_node.last_inv, None) | assert_equal(inv_node.last_inv, None) | ||||
assert_equal(inv_node.last_headers, None) | assert_equal(inv_node.last_headers, None) | ||||
tip = self.mine_blocks(1) | tip = self.mine_blocks(1) | ||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | ||||
assert_equal(test_node.check_last_announcement(headers=[tip]), True) | assert_equal( | ||||
test_node.check_last_announcement(headers=[tip]), True) | |||||
height += 1 | height += 1 | ||||
block_time += 1 | block_time += 1 | ||||
print("Part 2: success!") | print("Part 2: success!") | ||||
print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") | print( | ||||
"Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") | |||||
# PART 3. Headers announcements can stop after large reorg, and resume after | # PART 3. Headers announcements can stop after large reorg, and resume after | ||||
# getheaders or inv from peer. | # getheaders or inv from peer. | ||||
for j in range(2): | for j in range(2): | ||||
# First try mining a reorg that can propagate with header announcement | # First try mining a reorg that can propagate with header | ||||
# announcement | |||||
new_block_hashes = self.mine_reorg(length=7) | new_block_hashes = self.mine_reorg(length=7) | ||||
tip = new_block_hashes[-1] | tip = new_block_hashes[-1] | ||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | ||||
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True) | assert_equal( | ||||
test_node.check_last_announcement(headers=new_block_hashes), True) | |||||
block_time += 8 | block_time += 8 | ||||
# Mine a too-large reorg, which should be announced with a single inv | # Mine a too-large reorg, which should be announced with a single | ||||
# inv | |||||
new_block_hashes = self.mine_reorg(length=8) | new_block_hashes = self.mine_reorg(length=8) | ||||
tip = new_block_hashes[-1] | tip = new_block_hashes[-1] | ||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | ||||
assert_equal(test_node.check_last_announcement(inv=[tip]), True) | assert_equal(test_node.check_last_announcement(inv=[tip]), True) | ||||
block_time += 9 | block_time += 9 | ||||
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"] | fork_point = self.nodes[0].getblock( | ||||
"%02x" % new_block_hashes[0])["previousblockhash"] | |||||
fork_point = int(fork_point, 16) | fork_point = int(fork_point, 16) | ||||
# Use getblocks/getdata | # Use getblocks/getdata | ||||
test_node.send_getblocks(locator = [fork_point]) | test_node.send_getblocks(locator=[fork_point]) | ||||
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True) | assert_equal( | ||||
test_node.check_last_announcement(inv=new_block_hashes), True) | |||||
test_node.get_data(new_block_hashes) | test_node.get_data(new_block_hashes) | ||||
test_node.wait_for_block(new_block_hashes[-1]) | test_node.wait_for_block(new_block_hashes[-1]) | ||||
for i in range(3): | for i in range(3): | ||||
# Mine another block, still should get only an inv | # Mine another block, still should get only an inv | ||||
tip = self.mine_blocks(1) | tip = self.mine_blocks(1) | ||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | ||||
assert_equal(test_node.check_last_announcement(inv=[tip]), True) | assert_equal( | ||||
test_node.check_last_announcement(inv=[tip]), True) | |||||
if i == 0: | if i == 0: | ||||
# Just get the data -- shouldn't cause headers announcements to resume | # Just get the data -- shouldn't cause headers | ||||
# announcements to resume | |||||
test_node.get_data([tip]) | test_node.get_data([tip]) | ||||
test_node.wait_for_block(tip) | test_node.wait_for_block(tip) | ||||
elif i == 1: | elif i == 1: | ||||
# Send a getheaders message that shouldn't trigger headers announcements | # Send a getheaders message that shouldn't trigger headers announcements | ||||
# to resume (best header sent will be too old) | # to resume (best header sent will be too old) | ||||
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) | test_node.get_headers(locator=[ | ||||
fork_point], hashstop=new_block_hashes[1]) | |||||
test_node.get_data([tip]) | test_node.get_data([tip]) | ||||
test_node.wait_for_block(tip) | test_node.wait_for_block(tip) | ||||
elif i == 2: | elif i == 2: | ||||
test_node.get_data([tip]) | test_node.get_data([tip]) | ||||
test_node.wait_for_block(tip) | test_node.wait_for_block(tip) | ||||
# This time, try sending either a getheaders to trigger resumption | # This time, try sending either a getheaders to trigger resumption | ||||
# of headers announcements, or mine a new block and inv it, also | # of headers announcements, or mine a new block and inv it, also | ||||
# triggering resumption of headers announcements. | # triggering resumption of headers announcements. | ||||
if j == 0: | if j == 0: | ||||
test_node.get_headers(locator=[tip], hashstop=0) | test_node.get_headers(locator=[tip], hashstop=0) | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
else: | else: | ||||
test_node.send_block_inv(tip) | test_node.send_block_inv(tip) | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
# New blocks should now be announced with header | # New blocks should now be announced with header | ||||
tip = self.mine_blocks(1) | tip = self.mine_blocks(1) | ||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | assert_equal(inv_node.check_last_announcement(inv=[tip]), True) | ||||
assert_equal(test_node.check_last_announcement(headers=[tip]), True) | assert_equal( | ||||
test_node.check_last_announcement(headers=[tip]), True) | |||||
print("Part 3: success!") | print("Part 3: success!") | ||||
print("Part 4: Testing direct fetch behavior...") | print("Part 4: Testing direct fetch behavior...") | ||||
tip = self.mine_blocks(1) | tip = self.mine_blocks(1) | ||||
height = self.nodes[0].getblockcount() + 1 | height = self.nodes[0].getblockcount() + 1 | ||||
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] | last_time = self.nodes[0].getblock( | ||||
self.nodes[0].getbestblockhash())['time'] | |||||
block_time = last_time + 1 | block_time = last_time + 1 | ||||
# Create 2 blocks. Send the blocks, then send the headers. | # Create 2 blocks. Send the blocks, then send the headers. | ||||
blocks = [] | blocks = [] | ||||
for b in range(2): | for b in range(2): | ||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) | blocks.append( | ||||
create_block(tip, create_coinbase(height), block_time)) | |||||
blocks[-1].solve() | blocks[-1].solve() | ||||
tip = blocks[-1].sha256 | tip = blocks[-1].sha256 | ||||
block_time += 1 | block_time += 1 | ||||
height += 1 | height += 1 | ||||
inv_node.send_message(msg_block(blocks[-1])) | inv_node.send_message(msg_block(blocks[-1])) | ||||
inv_node.sync_with_ping() # Make sure blocks are processed | inv_node.sync_with_ping() # Make sure blocks are processed | ||||
test_node.last_getdata = None | test_node.last_getdata = None | ||||
test_node.send_header_for_blocks(blocks) | test_node.send_header_for_blocks(blocks) | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
# should not have received any getdata messages | # should not have received any getdata messages | ||||
with mininode_lock: | with mininode_lock: | ||||
assert_equal(test_node.last_getdata, None) | assert_equal(test_node.last_getdata, None) | ||||
# This time, direct fetch should work | # This time, direct fetch should work | ||||
blocks = [] | blocks = [] | ||||
for b in range(3): | for b in range(3): | ||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) | blocks.append( | ||||
create_block(tip, create_coinbase(height), block_time)) | |||||
blocks[-1].solve() | blocks[-1].solve() | ||||
tip = blocks[-1].sha256 | tip = blocks[-1].sha256 | ||||
block_time += 1 | block_time += 1 | ||||
height += 1 | height += 1 | ||||
test_node.send_header_for_blocks(blocks) | test_node.send_header_for_blocks(blocks) | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time) | test_node.wait_for_getdata( | ||||
[x.sha256 for x in blocks], timeout=direct_fetch_response_time) | |||||
[ test_node.send_message(msg_block(x)) for x in blocks ] | [test_node.send_message(msg_block(x)) for x in blocks] | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
# Now announce a header that forks the last two blocks | # Now announce a header that forks the last two blocks | ||||
tip = blocks[0].sha256 | tip = blocks[0].sha256 | ||||
height -= 1 | height -= 1 | ||||
blocks = [] | blocks = [] | ||||
# Create extra blocks for later | # Create extra blocks for later | ||||
for b in range(20): | for b in range(20): | ||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) | blocks.append( | ||||
create_block(tip, create_coinbase(height), block_time)) | |||||
blocks[-1].solve() | blocks[-1].solve() | ||||
tip = blocks[-1].sha256 | tip = blocks[-1].sha256 | ||||
block_time += 1 | block_time += 1 | ||||
height += 1 | height += 1 | ||||
# Announcing one block on fork should not trigger direct fetch | # Announcing one block on fork should not trigger direct fetch | ||||
# (less work than tip) | # (less work than tip) | ||||
test_node.last_getdata = None | test_node.last_getdata = None | ||||
test_node.send_header_for_blocks(blocks[0:1]) | test_node.send_header_for_blocks(blocks[0:1]) | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
with mininode_lock: | with mininode_lock: | ||||
assert_equal(test_node.last_getdata, None) | assert_equal(test_node.last_getdata, None) | ||||
# Announcing one more block on fork should trigger direct fetch for | # Announcing one more block on fork should trigger direct fetch for | ||||
# both blocks (same work as tip) | # both blocks (same work as tip) | ||||
test_node.send_header_for_blocks(blocks[1:2]) | test_node.send_header_for_blocks(blocks[1:2]) | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time) | test_node.wait_for_getdata( | ||||
[x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time) | |||||
# Announcing 16 more headers should trigger direct fetch for 14 more | # Announcing 16 more headers should trigger direct fetch for 14 more | ||||
# blocks | # blocks | ||||
test_node.send_header_for_blocks(blocks[2:18]) | test_node.send_header_for_blocks(blocks[2:18]) | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time) | test_node.wait_for_getdata( | ||||
[x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time) | |||||
# Announcing 1 more header should not trigger any response | # Announcing 1 more header should not trigger any response | ||||
test_node.last_getdata = None | test_node.last_getdata = None | ||||
test_node.send_header_for_blocks(blocks[18:19]) | test_node.send_header_for_blocks(blocks[18:19]) | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
with mininode_lock: | with mininode_lock: | ||||
assert_equal(test_node.last_getdata, None) | assert_equal(test_node.last_getdata, None) | ||||
print("Part 4: success!") | print("Part 4: success!") | ||||
# Now deliver all those blocks we announced. | # Now deliver all those blocks we announced. | ||||
[ test_node.send_message(msg_block(x)) for x in blocks ] | [test_node.send_message(msg_block(x)) for x in blocks] | ||||
print("Part 5: Testing handling of unconnecting headers") | print("Part 5: Testing handling of unconnecting headers") | ||||
# First we test that receipt of an unconnecting header doesn't prevent | # First we test that receipt of an unconnecting header doesn't prevent | ||||
# chain sync. | # chain sync. | ||||
for i in range(10): | for i in range(10): | ||||
test_node.last_getdata = None | test_node.last_getdata = None | ||||
blocks = [] | blocks = [] | ||||
# Create two more blocks. | # Create two more blocks. | ||||
for j in range(2): | for j in range(2): | ||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) | blocks.append( | ||||
create_block(tip, create_coinbase(height), block_time)) | |||||
blocks[-1].solve() | blocks[-1].solve() | ||||
tip = blocks[-1].sha256 | tip = blocks[-1].sha256 | ||||
block_time += 1 | block_time += 1 | ||||
height += 1 | height += 1 | ||||
# Send the header of the second block -> this won't connect. | # Send the header of the second block -> this won't connect. | ||||
with mininode_lock: | with mininode_lock: | ||||
test_node.last_getheaders = None | test_node.last_getheaders = None | ||||
test_node.send_header_for_blocks([blocks[1]]) | test_node.send_header_for_blocks([blocks[1]]) | ||||
test_node.wait_for_getheaders(timeout=1) | test_node.wait_for_getheaders(timeout=1) | ||||
test_node.send_header_for_blocks(blocks) | test_node.send_header_for_blocks(blocks) | ||||
test_node.wait_for_getdata([x.sha256 for x in blocks]) | test_node.wait_for_getdata([x.sha256 for x in blocks]) | ||||
[ test_node.send_message(msg_block(x)) for x in blocks ] | [test_node.send_message(msg_block(x)) for x in blocks] | ||||
test_node.sync_with_ping() | test_node.sync_with_ping() | ||||
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) | assert_equal( | ||||
int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) | |||||
blocks = [] | blocks = [] | ||||
# Now we test that if we repeatedly don't send connecting headers, we | # Now we test that if we repeatedly don't send connecting headers, we | ||||
# don't go into an infinite loop trying to get them to connect. | # don't go into an infinite loop trying to get them to connect. | ||||
MAX_UNCONNECTING_HEADERS = 10 | MAX_UNCONNECTING_HEADERS = 10 | ||||
for j in range(MAX_UNCONNECTING_HEADERS+1): | for j in range(MAX_UNCONNECTING_HEADERS + 1): | ||||
blocks.append(create_block(tip, create_coinbase(height), block_time)) | blocks.append( | ||||
create_block(tip, create_coinbase(height), block_time)) | |||||
blocks[-1].solve() | blocks[-1].solve() | ||||
tip = blocks[-1].sha256 | tip = blocks[-1].sha256 | ||||
block_time += 1 | block_time += 1 | ||||
height += 1 | height += 1 | ||||
for i in range(1, MAX_UNCONNECTING_HEADERS): | for i in range(1, MAX_UNCONNECTING_HEADERS): | ||||
# Send a header that doesn't connect, check that we get a getheaders. | # Send a header that doesn't connect, check that we get a | ||||
# getheaders. | |||||
with mininode_lock: | with mininode_lock: | ||||
test_node.last_getheaders = None | test_node.last_getheaders = None | ||||
test_node.send_header_for_blocks([blocks[i]]) | test_node.send_header_for_blocks([blocks[i]]) | ||||
test_node.wait_for_getheaders(timeout=1) | test_node.wait_for_getheaders(timeout=1) | ||||
# Next header will connect, should re-set our count: | # Next header will connect, should re-set our count: | ||||
test_node.send_header_for_blocks([blocks[0]]) | test_node.send_header_for_blocks([blocks[0]]) | ||||
# Remove the first two entries (blocks[1] would connect): | # Remove the first two entries (blocks[1] would connect): | ||||
blocks = blocks[2:] | blocks = blocks[2:] | ||||
# Now try to see how many unconnecting headers we can send | # Now try to see how many unconnecting headers we can send | ||||
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS | # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS | ||||
for i in range(5*MAX_UNCONNECTING_HEADERS - 1): | for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): | ||||
# Send a header that doesn't connect, check that we get a getheaders. | # Send a header that doesn't connect, check that we get a | ||||
# getheaders. | |||||
with mininode_lock: | with mininode_lock: | ||||
test_node.last_getheaders = None | test_node.last_getheaders = None | ||||
test_node.send_header_for_blocks([blocks[i%len(blocks)]]) | test_node.send_header_for_blocks([blocks[i % len(blocks)]]) | ||||
test_node.wait_for_getheaders(timeout=1) | test_node.wait_for_getheaders(timeout=1) | ||||
# Eventually this stops working. | # Eventually this stops working. | ||||
with mininode_lock: | with mininode_lock: | ||||
self.last_getheaders = None | self.last_getheaders = None | ||||
test_node.send_header_for_blocks([blocks[-1]]) | test_node.send_header_for_blocks([blocks[-1]]) | ||||
# Should get disconnected | # Should get disconnected | ||||
Show All 12 Lines |