diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -92,14 +92,14 @@ # Create a new block b0 = self.next_block(0) self.save_spendable_output() - self.sync_blocks([b0]) + self.send_blocks([b0]) # Allow the block to mature blocks = [] for i in range(99): blocks.append(self.next_block(5000 + i)) self.save_spendable_output() - self.sync_blocks(blocks) + self.send_blocks(blocks) # collect spendable outputs now to avoid cluttering the code later on out = [] @@ -115,7 +115,7 @@ b2 = self.next_block(2, spend=out[1]) self.save_spendable_output() - self.sync_blocks([b1, b2]) + self.send_blocks([b1, b2]) # Fork like this: # @@ -128,7 +128,7 @@ self.move_tip(1) b3 = self.next_block(3, spend=out[1]) txout_b3 = b3.vtx[1] - self.sync_blocks([b3], False) + self.send_blocks([b3], False) # Now we add another block to make the alternative chain longer. # @@ -136,7 +136,7 @@ # \-> b3 (1) -> b4 (2) self.log.info("Reorg to a longer chain") b4 = self.next_block(4, spend=out[2]) - self.sync_blocks([b4]) + self.send_blocks([b4]) # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) @@ -144,11 +144,11 @@ self.move_tip(2) b5 = self.next_block(5, spend=out[2]) self.save_spendable_output() - self.sync_blocks([b5], False) + self.send_blocks([b5], False) self.log.info("Reorg back to the original chain") b6 = self.next_block(6, spend=out[3]) - self.sync_blocks([b6], True) + self.send_blocks([b6], True) # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) @@ -158,10 +158,10 @@ "Reject a chain with a double spend, even if it is longer") self.move_tip(5) b7 = self.next_block(7, spend=out[2]) - self.sync_blocks([b7], False) + self.send_blocks([b7], False) b8 = self.next_block(8, spend=out[4]) - self.sync_blocks([b8], False, reconnect=True) + self.send_blocks([b8], False, reconnect=True) # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) @@ -171,7 +171,7 @@ "Reject a block where the miner creates too much coinbase reward") self.move_tip(6) b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1) - self.sync_blocks([b9], success=False, + self.send_blocks([b9], success=False, reject_reason='bad-cb-amount', reconnect=True) # Create a fork that ends in a block with too much fee (the one that causes the reorg) @@ -182,10 +182,10 @@ "Reject a chain where the miner creates too much coinbase reward, even if the chain is longer") self.move_tip(5) b10 = self.next_block(10, spend=out[3]) - self.sync_blocks([b10], False) + self.send_blocks([b10], False) b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1) - self.sync_blocks([b11], success=False, + self.send_blocks([b11], success=False, reject_reason='bad-cb-amount', reconnect=True) # Try again, but with a valid fork first @@ -200,7 +200,7 @@ b13 = self.next_block(13, spend=out[4]) self.save_spendable_output() b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1) - self.sync_blocks([b12, b13, b14], success=False, + self.send_blocks([b12, b13, b14], success=False, reject_reason='bad-cb-amount', reconnect=True) # New tip should be b13. @@ -211,7 +211,7 @@ self.move_tip(13) b15 = self.next_block(15) self.save_spendable_output() - self.sync_blocks([b15], True) + self.send_blocks([b15], True) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) @@ -220,7 +220,7 @@ self.log.info("Reject a block with a spend from a re-org'ed out tx") self.move_tip(15) b17 = self.next_block(17, spend=txout_b3) - self.sync_blocks([b17], success=False, + self.send_blocks([b17], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a transaction created on a different fork (on a fork this time) @@ -232,10 +232,10 @@ "Reject a block with a spend from a re-org'ed out tx (on a forked chain)") self.move_tip(13) b18 = self.next_block(18, spend=txout_b3) - self.sync_blocks([b18], False) + self.send_blocks([b18], False) b19 = self.next_block(19, spend=out[6]) - self.sync_blocks([b19], success=False, + self.send_blocks([b19], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a coinbase at depth too low @@ -245,7 +245,7 @@ self.log.info("Reject a block spending an immature coinbase.") self.move_tip(15) b20 = self.next_block(20, spend=out[7]) - self.sync_blocks([b20], success=False, + self.send_blocks([b20], success=False, reject_reason='bad-txns-premature-spend-of-coinbase') # Attempt to spend a coinbase at depth too low (on a fork this time) @@ -257,10 +257,10 @@ "Reject a block spending an immature coinbase (on a forked chain)") self.move_tip(13) b21 = self.next_block(21, spend=out[6]) - self.sync_blocks([b21], False) + self.send_blocks([b21], False) b22 = self.next_block(22, spend=out[5]) - self.sync_blocks([b22], success=False, + self.send_blocks([b22], success=False, reject_reason='bad-txns-premature-spend-of-coinbase') # Create a block on either side of LEGACY_MAX_BLOCK_SIZE and make sure its accepted/rejected @@ -279,7 +279,7 @@ b23 = self.update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), LEGACY_MAX_BLOCK_SIZE) - self.sync_blocks([b23], True) + self.send_blocks([b23], True) self.save_spendable_output() # Create blocks with a coinbase input script size out of range @@ -296,12 +296,12 @@ # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = self.update_block(26, []) - self.sync_blocks([b26], success=False, + self.send_blocks([b26], success=False, reject_reason='bad-cb-length', reconnect=True) # Extend the b26 chain to make sure bitcoind isn't accepting b26 b27 = self.next_block(27, spend=out[7]) - self.sync_blocks([b27], False) + self.send_blocks([b27], False) # Now try a too-large-coinbase script self.move_tip(15) @@ -309,12 +309,12 @@ b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = self.update_block(28, []) - self.sync_blocks([b28], success=False, + self.send_blocks([b28], success=False, reject_reason='bad-cb-length', reconnect=True) # Extend the b28 chain to make sure bitcoind isn't accepting b28 b29 = self.next_block(29, spend=out[7]) - self.sync_blocks([b29], False) + self.send_blocks([b29], False) # b30 has a max-sized coinbase scriptSig. self.move_tip(23) @@ -322,7 +322,7 @@ b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = self.update_block(30, []) - self.sync_blocks([b30], True) + self.send_blocks([b30], True) self.save_spendable_output() self.log.info("Skipped sigops tests") @@ -333,7 +333,7 @@ self.save_spendable_output() b35 = self.next_block(35) self.save_spendable_output() - self.sync_blocks([b31, b33, b35], True) + self.send_blocks([b31, b33, b35], True) # Check spending of a transaction in a block which failed to connect # @@ -352,14 +352,14 @@ txout_b37 = b37.vtx[1] tx = self.create_and_sign_transaction(out[11], 0) b37 = self.update_block(37, [tx]) - self.sync_blocks([b37], success=False, + self.send_blocks([b37], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # attempt to spend b37's first non-coinbase tx, at which point b37 was # still considered valid self.move_tip(35) b38 = self.next_block(38, spend=txout_b37) - self.sync_blocks([b38], success=False, + self.send_blocks([b38], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) self.log.info("Skipped sigops tests") @@ -368,7 +368,7 @@ b39 = self.next_block(39) self.save_spendable_output() b41 = self.next_block(41) - self.sync_blocks([b39, b41], True) + self.send_blocks([b39, b41], True) # Fork off of b39 to create a constant base again # @@ -381,7 +381,7 @@ b43 = self.next_block(43, spend=out[13]) self.save_spendable_output() - self.sync_blocks([b42, b43], True) + self.send_blocks([b42, b43], True) # Test a number of really invalid scenarios # @@ -404,7 +404,7 @@ self.tip = b44 self.block_heights[b44.sha256] = height self.blocks[44] = b44 - self.sync_blocks([b44], True) + self.send_blocks([b44], True) self.log.info("Reject a block with a non-coinbase as the first tx") non_coinbase = self.create_tx(out[15], 0, 1) @@ -420,7 +420,7 @@ self.tip.sha256] + 1 self.tip = b45 self.blocks[45] = b45 - self.sync_blocks([b45], success=False, + self.send_blocks([b45], success=False, reject_reason='bad-cb-missing', reconnect=True) self.log.info("Reject a block with no transactions") @@ -436,7 +436,7 @@ self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 - self.sync_blocks([b46], success=False, + self.send_blocks([b46], success=False, reject_reason='bad-cb-missing', reconnect=True) self.log.info("Reject a block with invalid work") @@ -446,21 +446,21 @@ while b47.sha256 < target: b47.nNonce += 1 b47.rehash() - self.sync_blocks([b47], False, request_block=False) + self.send_blocks([b47], False, request_block=False) self.log.info("Reject a block with a timestamp >2 hours in the future") self.move_tip(44) b48 = self.next_block(48, solve=False) b48.nTime = int(time.time()) + 60 * 60 * 3 b48.solve() - self.sync_blocks([b48], False, request_block=False) + self.send_blocks([b48], False, request_block=False) self.log.info("Reject a block with invalid merkle hash") self.move_tip(44) b49 = self.next_block(49) b49.hashMerkleRoot += 1 b49.solve() - self.sync_blocks([b49], success=False, + self.send_blocks([b49], success=False, reject_reason='bad-txnmrklroot', reconnect=True) self.log.info("Reject a block with incorrect POW limit") @@ -468,21 +468,21 @@ b50 = self.next_block(50) b50.nBits = b50.nBits - 1 b50.solve() - self.sync_blocks([b50], False, request_block=False, reconnect=True) + self.send_blocks([b50], False, request_block=False, reconnect=True) self.log.info("Reject a block with two coinbase transactions") self.move_tip(44) b51 = self.next_block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) b51 = self.update_block(51, [cb2]) - self.sync_blocks([b51], success=False, + self.send_blocks([b51], success=False, reject_reason='bad-tx-coinbase', reconnect=True) self.log.info("Reject a block with duplicate transactions") self.move_tip(44) b52 = self.next_block(52, spend=out[15]) b52 = self.update_block(52, [b52.vtx[1]]) - self.sync_blocks([b52], success=False, + self.send_blocks([b52], success=False, reject_reason='tx-duplicate', reconnect=True) # Test block timestamps @@ -491,21 +491,21 @@ # self.move_tip(43) b53 = self.next_block(53, spend=out[14]) - self.sync_blocks([b53], False) + self.send_blocks([b53], False) self.save_spendable_output() self.log.info("Reject a block with timestamp before MedianTimePast") b54 = self.next_block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() - self.sync_blocks([b54], False, request_block=False) + self.send_blocks([b54], False, request_block=False) # valid timestamp self.move_tip(53) b55 = self.next_block(55, spend=out[15]) b55.nTime = b35.nTime self.update_block(55, []) - self.sync_blocks([b55], True) + self.send_blocks([b55], True) self.save_spendable_output() # Test Merkle tree malleability @@ -551,7 +551,7 @@ assert_equal(len(b56.vtx), 3) b56 = self.update_block(56, [b57.vtx[2]]) assert_equal(b56.hash, b57.hash) - self.sync_blocks([b56], success=False, + self.send_blocks([b56], success=False, reject_reason='bad-txns-duplicate', reconnect=True) # b57p2 - a good block with 6 tx'es, don't submit until end @@ -573,15 +573,15 @@ assert_equal(len(b56p2.vtx), 6) b56p2 = self.update_block("b56p2", b56p2.vtx[4:6], reorder=False) assert_equal(b56p2.hash, b57p2.hash) - self.sync_blocks([b56p2], success=False, + self.send_blocks([b56p2], success=False, reject_reason='bad-txns-duplicate', reconnect=True) self.move_tip("57p2") - self.sync_blocks([b57p2], True) + self.send_blocks([b57p2], True) self.move_tip(57) # The tip is not updated because 57p2 seen first - self.sync_blocks([b57], False) + self.send_blocks([b57], False) self.save_spendable_output() # Test a few invalid tx types @@ -603,7 +603,7 @@ pad_tx(tx) tx.calc_sha256() b58 = self.update_block(58, [tx]) - self.sync_blocks([b58], success=False, + self.send_blocks([b58], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # tx with output value > input value @@ -613,13 +613,13 @@ b59 = self.next_block(59) tx = self.create_and_sign_transaction(out[17], 51 * COIN) b59 = self.update_block(59, [tx]) - self.sync_blocks([b59], success=False, + self.send_blocks([b59], success=False, reject_reason='bad-txns-in-belowout', reconnect=True) # reset to good chain self.move_tip(57) b60 = self.next_block(60, spend=out[17]) - self.sync_blocks([b60], True) + self.send_blocks([b60], True) self.save_spendable_output() # Test BIP30 @@ -640,7 +640,7 @@ b61.vtx[0].rehash() b61 = self.update_block(61, []) assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) - self.sync_blocks([b61], success=False, + self.send_blocks([b61], success=False, reject_reason='bad-txns-BIP30', reconnect=True) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) @@ -660,7 +660,7 @@ assert tx.vin[0].nSequence < 0xffffffff tx.calc_sha256() b62 = self.update_block(62, [tx]) - self.sync_blocks([b62], success=False, + self.send_blocks([b62], success=False, reject_reason='bad-txns-nonfinal') # Test a non-final coinbase is also rejected @@ -676,7 +676,7 @@ b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() b63 = self.update_block(63, []) - self.sync_blocks([b63], success=False, + self.send_blocks([b63], success=False, reject_reason='bad-txns-nonfinal') # This checks that a block with a bloated VARINT between the block_header and the array of tx such that @@ -713,7 +713,7 @@ tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = self.update_block("64a", [tx]) assert_equal(len(b64a.serialize()), LEGACY_MAX_BLOCK_SIZE + 8) - self.sync_blocks([b64a], success=False, + self.send_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize():') # bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently @@ -730,7 +730,7 @@ assert_equal(len(b64.serialize()), LEGACY_MAX_BLOCK_SIZE) self.blocks[64] = b64 b64 = self.update_block(64, []) - self.sync_blocks([b64], True) + self.send_blocks([b64], True) self.save_spendable_output() # Spend an output created in the block itself @@ -744,7 +744,7 @@ tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue) tx2 = self.create_and_sign_transaction(tx1, 0) b65 = self.update_block(65, [tx1, tx2]) - self.sync_blocks([b65], True) + self.send_blocks([b65], True) self.save_spendable_output() # Attempt to double-spend a transaction created in a block @@ -761,7 +761,7 @@ tx2 = self.create_and_sign_transaction(tx1, 1) tx3 = self.create_and_sign_transaction(tx1, 2) b67 = self.update_block(67, [tx1, tx2, tx3]) - self.sync_blocks([b67], success=False, + self.send_blocks([b67], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # More tests of block subsidy @@ -783,7 +783,7 @@ tx = self.create_and_sign_transaction( out[20], out[20].vout[0].nValue - 9) b68 = self.update_block(68, [tx]) - self.sync_blocks([b68], success=False, + self.send_blocks([b68], success=False, reject_reason='bad-cb-amount', reconnect=True) self.log.info( @@ -793,7 +793,7 @@ tx = self.create_and_sign_transaction( out[20], out[20].vout[0].nValue - 10) self.update_block(69, [tx]) - self.sync_blocks([b69], True) + self.send_blocks([b69], True) self.save_spendable_output() # Test spending the outpoint of a non-existent transaction @@ -813,7 +813,7 @@ tx.vout.append(CTxOut(1, b"")) pad_tx(tx) b70 = self.update_block(70, [tx]) - self.sync_blocks([b70], success=False, + self.send_blocks([b70], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) @@ -843,11 +843,11 @@ assert_equal(b72.sha256, b71.sha256) self.move_tip(71) - self.sync_blocks([b71], success=False, + self.send_blocks([b71], success=False, reject_reason='bad-txns-duplicate', reconnect=True) self.move_tip(72) - self.sync_blocks([b72], True) + self.send_blocks([b72], True) self.save_spendable_output() self.log.info("Skipped sigops tests") @@ -856,7 +856,7 @@ self.save_spendable_output() b76 = self.next_block(76) self.save_spendable_output() - self.sync_blocks([b75, b76], True) + self.send_blocks([b75, b76], True) # Test transaction resurrection # @@ -880,35 +880,35 @@ b77 = self.next_block(77) tx77 = self.create_and_sign_transaction(out[24], 10 * COIN) b77 = self.update_block(77, [tx77]) - self.sync_blocks([b77], True) + self.send_blocks([b77], True) self.save_spendable_output() b78 = self.next_block(78) tx78 = self.create_tx(tx77, 0, 9 * COIN) b78 = self.update_block(78, [tx78]) - self.sync_blocks([b78], True) + self.send_blocks([b78], True) b79 = self.next_block(79) tx79 = self.create_tx(tx78, 0, 8 * COIN) b79 = self.update_block(79, [tx79]) - self.sync_blocks([b79], True) + self.send_blocks([b79], True) # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) self.move_tip(77) b80 = self.next_block(80, spend=out[25]) - self.sync_blocks([b80], False, request_block=False) + self.send_blocks([b80], False, request_block=False) self.save_spendable_output() b81 = self.next_block(81, spend=out[26]) # other chain is same length - self.sync_blocks([b81], False, request_block=False) + self.send_blocks([b81], False, request_block=False) self.save_spendable_output() b82 = self.next_block(82, spend=out[27]) # now this chain is longer, triggers re-org - self.sync_blocks([b82], True) + self.send_blocks([b82], True) self.save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's @@ -935,7 +935,7 @@ tx2.rehash() b83 = self.update_block(83, [tx1, tx2]) - self.sync_blocks([b83], True) + self.send_blocks([b83], True) self.save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) @@ -963,30 +963,30 @@ tx5 = self.create_tx(tx1, vout_offset + 3, 0, CScript([OP_RETURN])) b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5]) - self.sync_blocks([b84], True) + self.send_blocks([b84], True) self.save_spendable_output() self.move_tip(83) b85 = self.next_block(85, spend=out[29]) - self.sync_blocks([b85], False) # other chain is same length + self.send_blocks([b85], False) # other chain is same length b86 = self.next_block(86, spend=out[30]) - self.sync_blocks([b86], True) + self.send_blocks([b86], True) self.move_tip(84) b87 = self.next_block(87, spend=out[30]) - self.sync_blocks([b87], False) # other chain is same length + self.send_blocks([b87], False) # other chain is same length self.save_spendable_output() b88 = self.next_block(88, spend=out[31]) - self.sync_blocks([b88], True) + self.send_blocks([b88], True) self.save_spendable_output() # trying to spend the OP_RETURN output is rejected b89a = self.next_block("89a", spend=out[32]) tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE])) b89a = self.update_block("89a", [tx]) - self.sync_blocks([b89a], success=False, + self.send_blocks([b89a], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) self.log.info( @@ -1009,7 +1009,7 @@ self.save_spendable_output() spend = self.get_spendable_output() - self.sync_blocks(blocks, True, timeout=960) + self.send_blocks(blocks, True, timeout=960) chain1_tip = i # now create alt chain of same length @@ -1017,18 +1017,18 @@ blocks2 = [] for i in range(89, LARGE_REORG_SIZE + 89): blocks2.append(self.next_block("alt" + str(i))) - self.sync_blocks(blocks2, False, request_block=False) + self.send_blocks(blocks2, False, request_block=False) # extend alt chain to trigger re-org block = self.next_block("alt" + str(chain1_tip + 1)) - self.sync_blocks([block], True, timeout=960) + self.send_blocks([block], True, timeout=960) # ... and re-org back to the first chain self.move_tip(chain1_tip) block = self.next_block(chain1_tip + 1) - self.sync_blocks([block], False, request_block=False) + self.send_blocks([block], False, request_block=False) block = self.next_block(chain1_tip + 2) - self.sync_blocks([block], True, timeout=960) + self.send_blocks([block], True, timeout=960) # Helper methods ################ @@ -1148,7 +1148,7 @@ self.nodes[0].disconnect_p2ps() self.bootstrap_p2p() - def sync_blocks(self, blocks, success=True, reject_reason=None, + def send_blocks(self, blocks, success=True, reject_reason=None, request_block=True, reconnect=False, timeout=60): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -222,7 +222,7 @@ block.solve() return block - def sync_blocks(self, blocks, success=True): + def send_blocks(self, blocks, success=True): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. Call with success = False if the tip shouldn't advance to the most recent block.""" @@ -256,7 +256,7 @@ # Generate 489 more version 4 blocks test_blocks = self.generate_blocks(489) # Test #1 - self.sync_blocks(test_blocks) + self.send_blocks(test_blocks) # Still not activated. assert_equal(get_csv_status(self.nodes[0]), False) @@ -313,7 +313,7 @@ # 2 more version 4 blocks test_blocks = self.generate_blocks(2) # Test #2 - self.sync_blocks(test_blocks) + self.send_blocks(test_blocks) self.log.info( "Not yet activated, height = 574 (will activate for block 576, not 575)") @@ -397,7 +397,7 @@ success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)]) # Test #3 - self.sync_blocks([self.create_test_block(success_txs)]) + self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Test version 2 txs") @@ -428,14 +428,14 @@ success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)]) # Test #4 - self.sync_blocks([self.create_test_block(success_txs)]) + self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # 1 more version 4 block to get us to height 575 so the fork should # now be active for the next block test_blocks = self.generate_blocks(1) # Test #5 - self.sync_blocks(test_blocks) + self.send_blocks(test_blocks) assert_equal(get_csv_status(self.nodes[0]), False) self.nodes[0].generate(1) @@ -455,7 +455,7 @@ bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: # Test #6, Test #7 - self.sync_blocks( + self.send_blocks( [self.create_test_block([bip113tx])], success=False) # BIP 113 tests should now pass if the locktime is < MTP @@ -467,13 +467,13 @@ bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: # Test #8, Test #9 - self.sync_blocks([self.create_test_block([bip113tx])]) + self.send_blocks([self.create_test_block([bip113tx])]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Next block height = 580 after 4 blocks of random version test_blocks = self.generate_blocks(4) # Test #10 - self.sync_blocks(test_blocks) + self.send_blocks(test_blocks) self.log.info("BIP 68 tests") self.log.info("Test version 1 txs - all should still pass") @@ -481,14 +481,14 @@ success_txs = [] success_txs.extend(all_rlt_txs(bip68txs_v1)) # Test #11 - self.sync_blocks([self.create_test_block(success_txs)]) + self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Test version 2 txs") # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']] - self.sync_blocks([self.create_test_block(bip68success_txs)]) + self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # All txs without flag fail as we are at delta height = 8 < 10 and @@ -497,38 +497,38 @@ for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']] for tx in bip68timetxs: # Test #13 - Test #16 - self.sync_blocks([self.create_test_block([tx])], success=False) + self.send_blocks([self.create_test_block([tx])], success=False) bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']] for tx in bip68heighttxs: # Test #17 - Test #20 - self.sync_blocks([self.create_test_block([tx])], success=False) + self.send_blocks([self.create_test_block([tx])], success=False) # Advance one block to 581 test_blocks = self.generate_blocks(1) # Test #21 - self.sync_blocks(test_blocks,) + self.send_blocks(test_blocks,) # Height txs should fail and time txs should now pass 9 * 600 > 10 * # 512 bip68success_txs.extend(bip68timetxs) # Test #22 - self.sync_blocks([self.create_test_block(bip68success_txs)]) + self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) for tx in bip68heighttxs: # Test #23 - Test #26 - self.sync_blocks([self.create_test_block([tx])], success=False) + self.send_blocks([self.create_test_block([tx])], success=False) # Advance one block to 582 test_blocks = self.generate_blocks(1) # Test #27 - self.sync_blocks(test_blocks) + self.send_blocks(test_blocks) # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) # Test #28 - self.sync_blocks([self.create_test_block(bip68success_txs)]) + self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("BIP 112 tests") @@ -536,7 +536,7 @@ # -1 OP_CSV tx should fail # Test #29 - self.sync_blocks([self.create_test_block_spend_utxos( + self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [bip112tx_special_v1])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, @@ -546,7 +546,7 @@ success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']] # Test #30 - self.sync_blocks( + self.send_blocks( [self.create_test_block_spend_utxos(self.nodes[0], success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) @@ -560,14 +560,14 @@ for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] for tx in fail_txs: # Test #31 - Test #78 - self.sync_blocks([self.create_test_block_spend_utxos( + self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) self.log.info("Test version 2 txs") # -1 OP_CSV tx should fail # Test #79 - self.sync_blocks([self.create_test_block_spend_utxos( + self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [bip112tx_special_v2])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, @@ -578,7 +578,7 @@ for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']] # Test #80 - self.sync_blocks( + self.send_blocks( [self.create_test_block_spend_utxos(self.nodes[0], success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) @@ -592,7 +592,7 @@ for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']] for tx in fail_txs: # Test #81 - Test #104 - self.sync_blocks([self.create_test_block_spend_utxos( + self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail @@ -600,7 +600,7 @@ for tx in bip112txs_vary_nSequence_v2 if tx['sdf']] for tx in fail_txs: # Test #105 - Test #112 - self.sync_blocks([self.create_test_block_spend_utxos( + self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) # If sequencelock types mismatch, tx should fail @@ -610,7 +610,7 @@ for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']] for tx in fail_txs: # Test #113 - Test #120 - self.sync_blocks([self.create_test_block_spend_utxos( + self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) # Remaining txs should pass, just test masking works properly @@ -619,7 +619,7 @@ success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']] # Test #121 - self.sync_blocks([self.create_test_block(success_txs)]) + self.send_blocks([self.create_test_block(success_txs)]) # Spending the previous block utxos requires a difference of 10 blocks (nSequence = 10). # Generate 9 blocks then spend in the 10th @@ -628,7 +628,7 @@ self.tip = int("0x" + block, 0) self.tipheight += 1 # Test #122 - self.sync_blocks(self.generate_blocks(9)) + self.send_blocks(self.generate_blocks(9)) spend_txs = [] for tx in success_txs: @@ -637,7 +637,7 @@ raw_tx.rehash() spend_txs.append(raw_tx) # Test #123 - self.sync_blocks([self.create_test_block(spend_txs)]) + self.send_blocks([self.create_test_block(spend_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Additional test, of checking that comparison of two time types works @@ -649,7 +649,7 @@ time_txs.append(signtx) # Test #124 - self.sync_blocks([self.create_test_block(time_txs)]) + self.send_blocks([self.create_test_block(time_txs)]) # Spending the previous block utxos requires a block time difference of # at least 10 * 512s (nSequence = 10). @@ -659,7 +659,7 @@ self.tip = int("0x" + block, 0) self.tipheight += 1 # Test #125 - self.sync_blocks(self.generate_blocks(8)) + self.send_blocks(self.generate_blocks(8)) spend_txs = [] for tx in time_txs: @@ -668,7 +668,7 @@ raw_tx.rehash() spend_txs.append(raw_tx) # Test #126 - self.sync_blocks([self.create_test_block(spend_txs)]) + self.send_blocks([self.create_test_block(spend_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # TODO: Test empty stack fails