From 49d13674e1497a514ec1bff28d584a192e69efdd Mon Sep 17 00:00:00 2001 From: davide3011 Date: Fri, 6 Feb 2026 19:43:10 +0100 Subject: [PATCH] validation,policy,test: activate taproot flags and harden wallet fs checks Wire Taproot script verification into validation/policy/libconsensus and update standardness flags. Adapt wallet_multiwallet functional test to match filesystem error variants (create_directory/create_directories/filesystem error), making the suite robust across fs backends and library versions. --- src/policy/policy.cpp | 46 ++++++++++++++- src/policy/policy.h | 12 +++- src/script/palladiumconsensus.cpp | 2 +- src/script/palladiumconsensus.h | 4 +- src/validation.cpp | 10 ++++ test/functional/feature_pruning.py | 78 +++++++++----------------- test/functional/p2p_segwit.py | 9 ++- test/functional/rpc_blockchain.py | 49 ++++++++-------- test/functional/test_framework/util.py | 13 +++-- test/functional/wallet_multiwallet.py | 4 +- 10 files changed, 144 insertions(+), 83 deletions(-) diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index 7d75248..06b132b 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -199,8 +199,9 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) // get the scriptPubKey corresponding to this input: CScript prevScript = prev.scriptPubKey; + const bool is_p2sh = prevScript.IsPayToScriptHash(); - if (prevScript.IsPayToScriptHash()) { + if (is_p2sh) { std::vector > stack; // If the scriptPubKey is P2SH, we try to extract the redeemScript casually by converting the scriptSig // into a stack. We do not check IsPushOnly nor compare the hash as these will be done later anyway. @@ -230,6 +231,49 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) if (tx.vin[i].scriptWitness.stack[j].size() > MAX_STANDARD_P2WSH_STACK_ITEM_SIZE) return false; } + } else if (witnessversion == 1 && witnessprogram.size() == WITNESS_V1_TAPROOT_SIZE) { + // Taproot spends cannot be wrapped inside P2SH and must have at least one witness element. + if (is_p2sh || tx.vin[i].scriptWitness.stack.empty()) { + return false; + } + + const auto& witness_stack = tx.vin[i].scriptWitness.stack; + const bool has_annex = witness_stack.size() >= 2 && !witness_stack.back().empty() && witness_stack.back()[0] == 0x50; + const size_t non_annex_items = witness_stack.size() - (has_annex ? 1 : 0); + if (non_annex_items == 0) { + return false; + } + + // Key path spends have one witness element after optional annex stripping. + if (non_annex_items == 1) { + continue; + } + + // Script path spend policy checks. + const std::vector& control_block = witness_stack[non_annex_items - 1]; + if (control_block.size() < TAPROOT_CONTROL_BASE_SIZE || + control_block.size() > TAPROOT_CONTROL_MAX_SIZE || + ((control_block.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) { + return false; + } + if ((control_block[0] & TAPROOT_LEAF_MASK) != TAPROOT_LEAF_TAPSCRIPT) { + return false; + } + + const std::vector& tapscript = witness_stack[non_annex_items - 2]; + if (tapscript.size() > MAX_STANDARD_TAPSCRIPT_SCRIPT_SIZE) { + return false; + } + + const size_t witness_arg_count = non_annex_items - 2; + if (witness_arg_count > MAX_STANDARD_TAPSCRIPT_STACK_ITEMS) { + return false; + } + for (size_t j = 0; j < witness_arg_count; ++j) { + if (witness_stack[j].size() > MAX_STANDARD_TAPSCRIPT_STACK_ITEM_SIZE) { + return false; + } + } } } return true; diff --git a/src/policy/policy.h b/src/policy/policy.h index 17e494f..2cc9495 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -42,6 +42,12 @@ static const unsigned int MAX_STANDARD_P2WSH_STACK_ITEMS = 100; static const unsigned int MAX_STANDARD_P2WSH_STACK_ITEM_SIZE = 80; /** The maximum size of a standard witnessScript */ static const unsigned int MAX_STANDARD_P2WSH_SCRIPT_SIZE = 3600; +/** The maximum number of witness stack items in a standard Tapscript path spend */ +static const unsigned int MAX_STANDARD_TAPSCRIPT_STACK_ITEMS = 100; +/** The maximum size of each witness stack item in a standard Tapscript path spend */ +static const unsigned int MAX_STANDARD_TAPSCRIPT_STACK_ITEM_SIZE = 80; +/** The maximum size of a standard tapscript */ +static const unsigned int MAX_STANDARD_TAPSCRIPT_SCRIPT_SIZE = 3600; /** Min feerate for defining dust. Historically this has been based on the * minRelayTxFee, however changing the dust limit changes which transactions are * standard and should be done with care and ideally rarely. It makes sense to @@ -68,7 +74,11 @@ static constexpr unsigned int STANDARD_SCRIPT_VERIFY_FLAGS = MANDATORY_SCRIPT_VE SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM | SCRIPT_VERIFY_WITNESS_PUBKEYTYPE | - SCRIPT_VERIFY_CONST_SCRIPTCODE; + SCRIPT_VERIFY_CONST_SCRIPTCODE | + SCRIPT_VERIFY_TAPROOT | + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION | + SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS | + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE; /** For convenience, standard but not mandatory verify flags. */ static constexpr unsigned int STANDARD_NOT_MANDATORY_VERIFY_FLAGS = STANDARD_SCRIPT_VERIFY_FLAGS & ~MANDATORY_SCRIPT_VERIFY_FLAGS; diff --git a/src/script/palladiumconsensus.cpp b/src/script/palladiumconsensus.cpp index 2cf2e00..9f62f31 100644 --- a/src/script/palladiumconsensus.cpp +++ b/src/script/palladiumconsensus.cpp @@ -114,7 +114,7 @@ int palladiumconsensus_verify_script(const unsigned char *scriptPubKey, unsigned const unsigned char *txTo , unsigned int txToLen, unsigned int nIn, unsigned int flags, palladiumconsensus_error* err) { - if (flags & palladiumconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) { + if (flags & (palladiumconsensus_SCRIPT_FLAGS_VERIFY_WITNESS | palladiumconsensus_SCRIPT_FLAGS_VERIFY_TAPROOT)) { return set_error(err, palladiumconsensus_ERR_AMOUNT_REQUIRED); } diff --git a/src/script/palladiumconsensus.h b/src/script/palladiumconsensus.h index 07bc6d2..f1b8164 100644 --- a/src/script/palladiumconsensus.h +++ b/src/script/palladiumconsensus.h @@ -55,9 +55,11 @@ enum palladiumconsensus_SCRIPT_FLAGS_VERIFY_CHECKLOCKTIMEVERIFY = (1U << 9), // enable CHECKLOCKTIMEVERIFY (BIP65) palladiumconsensus_SCRIPT_FLAGS_VERIFY_CHECKSEQUENCEVERIFY = (1U << 10), // enable CHECKSEQUENCEVERIFY (BIP112) palladiumconsensus_SCRIPT_FLAGS_VERIFY_WITNESS = (1U << 11), // enable WITNESS (BIP141) + palladiumconsensus_SCRIPT_FLAGS_VERIFY_TAPROOT = (1U << 17), // enable TAPROOT (BIP341/BIP342) palladiumconsensus_SCRIPT_FLAGS_VERIFY_ALL = palladiumconsensus_SCRIPT_FLAGS_VERIFY_P2SH | palladiumconsensus_SCRIPT_FLAGS_VERIFY_DERSIG | palladiumconsensus_SCRIPT_FLAGS_VERIFY_NULLDUMMY | palladiumconsensus_SCRIPT_FLAGS_VERIFY_CHECKLOCKTIMEVERIFY | - palladiumconsensus_SCRIPT_FLAGS_VERIFY_CHECKSEQUENCEVERIFY | palladiumconsensus_SCRIPT_FLAGS_VERIFY_WITNESS + palladiumconsensus_SCRIPT_FLAGS_VERIFY_CHECKSEQUENCEVERIFY | palladiumconsensus_SCRIPT_FLAGS_VERIFY_WITNESS | + palladiumconsensus_SCRIPT_FLAGS_VERIFY_TAPROOT }; /// Returns 1 if the input nIn of the serialized transaction pointed to by diff --git a/src/validation.cpp b/src/validation.cpp index bd12a8c..039b395 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1842,6 +1842,11 @@ static bool IsScriptWitnessEnabled(const Consensus::Params& params) return params.SegwitHeight != std::numeric_limits::max(); } +static bool IsTaprootEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params) +{ + return VersionBitsState(pindexPrev, params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache) == ThresholdState::ACTIVE; +} + static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); @@ -1885,6 +1890,11 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consens flags |= SCRIPT_VERIFY_NULLDUMMY; } + // Enforce BIP341/BIP342 (taproot/tapscript) once the deployment is active. + if ((flags & SCRIPT_VERIFY_WITNESS) && IsTaprootEnabled(pindex->pprev, consensusparams)) { + flags |= SCRIPT_VERIFY_TAPROOT; + } + return flags; } diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 5c8eaac..5f24367 100644 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -10,16 +10,15 @@ This test takes 30 mins or more (up to 2 hours) """ import os -from test_framework.blocktools import create_coinbase -from test_framework.messages import CBlock, ToHex -from test_framework.script import CScript, OP_RETURN, OP_NOP from test_framework.test_framework import PalladiumTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, + create_lots_of_big_transactions, connect_nodes, disconnect_nodes, + gen_return_txouts, wait_until, ) @@ -27,49 +26,28 @@ from test_framework.util import ( # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. TIMESTAMP_WINDOW = 2 * 60 * 60 +MAX_GENERATE_TRIES = 100000000 + +def generate_blocks(node, n): + return node.generate(n, maxtries=MAX_GENERATE_TRIES) def mine_large_blocks(node, n): - # Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN - # followed by 950k of OP_NOP. This would be non-standard in a non-coinbase - # transaction but is consensus valid. - - # Set the nTime if this is the first time this function has been called. - # A static variable ensures that time is monotonicly increasing and is therefore - # different for each block created => blockhash is unique. - if "nTimes" not in mine_large_blocks.__dict__: - mine_large_blocks.nTime = 0 - - # Get the block parameters for the first block - big_script = CScript([OP_RETURN] + [OP_NOP] * 950000) - best_block = node.getblock(node.getbestblockhash()) - height = int(best_block["height"]) + 1 - mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1 - previousblockhash = int(best_block["hash"], 16) + # Mine "large" blocks using many ~66k transactions and built-in mining. + # Use a larger tx count than the generic helper to keep prune thresholds + # aligned with Palladium's block/weight behavior. + txouts = gen_return_txouts() + utxos = [] + fee = 100 * node.getnetworkinfo()["relayfee"] + tx_per_block = 24 for _ in range(n): - # Build the coinbase transaction (with large scriptPubKey) - coinbase_tx = create_coinbase(height) - coinbase_tx.vin[0].nSequence = 2 ** 32 - 1 - coinbase_tx.vout[0].scriptPubKey = big_script - coinbase_tx.rehash() - - # Build the block - block = CBlock() - block.nVersion = best_block["version"] - block.hashPrevBlock = previousblockhash - block.nTime = mine_large_blocks.nTime - block.nBits = int('207fffff', 16) - block.nNonce = 0 - block.vtx = [coinbase_tx] - block.hashMerkleRoot = block.calc_merkle_root() - block.solve() - - # Submit to the node - node.submitblock(ToHex(block)) - - previousblockhash = block.sha256 - height += 1 - mine_large_blocks.nTime += 1 + if len(utxos) < tx_per_block: + utxos.clear() + utxos.extend(node.listunspent()) + if not utxos: + raise AssertionError("No spendable UTXOs available for large-block mining") + create_lots_of_big_transactions(node, txouts, utxos, min(tx_per_block, len(utxos)), fee=fee) + generate_blocks(node, 1) def calc_usage(blockdir): return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) @@ -82,7 +60,7 @@ class PruneTest(PalladiumTestFramework): # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. - self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"] + self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5", "-acceptnonstdtxn=1"] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [ @@ -118,12 +96,12 @@ class PruneTest(PalladiumTestFramework): def create_big_chain(self): # Start by creating some coinbases we can spend later - self.nodes[1].generate(200) + generate_blocks(self.nodes[1], 200) self.sync_blocks(self.nodes[0:2]) - self.nodes[0].generate(150) + generate_blocks(self.nodes[0], 200) # Then mine enough full blocks to create more than 550MiB of data - mine_large_blocks(self.nodes[0], 645) + mine_large_blocks(self.nodes[0], 595) self.sync_blocks(self.nodes[0:5]) @@ -193,7 +171,7 @@ class PruneTest(PalladiumTestFramework): disconnect_nodes(self.nodes[1], 2) self.log.info("Generating new longer chain of 300 more blocks") - self.nodes[1].generate(300) + generate_blocks(self.nodes[1], 300) self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], 1) @@ -245,7 +223,7 @@ class PruneTest(PalladiumTestFramework): self.nodes[0].invalidateblock(curchainhash) assert_equal(self.nodes[0].getblockcount(), self.mainchainheight) assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2) - goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] + goalbesthash = generate_blocks(self.nodes[0], blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") @@ -289,7 +267,7 @@ class PruneTest(PalladiumTestFramework): assert_equal(block1_details["nTx"], len(block1_details["tx"])) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) - node.generate(6) + generate_blocks(node, 6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # Pruned block should still know the number of transactions @@ -320,7 +298,7 @@ class PruneTest(PalladiumTestFramework): assert has_block(2), "blk00002.dat is still there, should be pruned by now" # advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat) - node.generate(288) + generate_blocks(node, 288) prune(1000) assert not has_block(2), "blk00002.dat is still there, should be pruned by now" assert not has_block(3), "blk00003.dat is still there, should be pruned by now" diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 23137c8..f8fd14a 100644 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -50,6 +50,7 @@ from test_framework.script import ( MAX_SCRIPT_ELEMENT_SIZE, OP_0, OP_1, + OP_2, OP_16, OP_2DROP, OP_CHECKMULTISIG, @@ -1377,12 +1378,15 @@ class SegWitTest(PalladiumTestFramework): self.utxo.append(UTXO(tx.sha256, i, split_value)) self.sync_blocks() + taproot_active = self.nodes[0].getblockchaininfo().get('softforks', {}).get('taproot', {}).get('active', False) temp_utxo = [] tx = CTransaction() witness_program = CScript([OP_TRUE]) witness_hash = sha256(witness_program) assert_equal(len(self.nodes[1].getrawmempool()), 0) - for version in list(range(OP_1, OP_16 + 1)) + [OP_0]: + # When taproot is active, OP_1 is no longer a "future" witness version. + future_versions = list(range(OP_2, OP_16 + 1)) if taproot_active else list(range(OP_1, OP_16 + 1)) + for version in future_versions + [OP_0]: # First try to spend to a future version segwit script_pubkey. script_pubkey = CScript([CScriptOp(version), witness_hash]) tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] @@ -1410,7 +1414,8 @@ class SegWitTest(PalladiumTestFramework): test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True) test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True) temp_utxo.pop() # last entry in temp_utxo was the output we just spent - temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + if not taproot_active: + temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) # Spend everything in temp_utxo back to an OP_TRUE output. tx3 = CTransaction() diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index f9471e0..3e9ffaf 100644 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -124,31 +124,36 @@ class BlockchainTest(PalladiumTestFramework): assert_equal(res['prune_target_size'], 576716800) assert_greater_than(res['size_on_disk'], 0) - assert_equal(res['softforks'], { - 'bip34': {'type': 'buried', 'active': True, 'height': 0}, - 'bip66': {'type': 'buried', 'active': True, 'height': 0}, - 'bip65': {'type': 'buried', 'active': True, 'height': 0}, - 'csv': {'type': 'buried', 'active': True, 'height': 0}, - 'segwit': {'type': 'buried', 'active': True, 'height': 0}, - 'testdummy': { - 'type': 'bip9', - 'bip9': { - 'status': 'started', - 'bit': 28, - 'start_time': 0, - 'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value - 'since': 144, - 'statistics': { - 'period': 144, - 'threshold': 108, - 'elapsed': 57, - 'count': 57, - 'possible': True, - }, + softforks = res['softforks'] + assert_equal(softforks['bip34'], {'type': 'buried', 'active': True, 'height': 0}) + assert_equal(softforks['bip66'], {'type': 'buried', 'active': True, 'height': 0}) + assert_equal(softforks['bip65'], {'type': 'buried', 'active': True, 'height': 0}) + assert_equal(softforks['csv'], {'type': 'buried', 'active': True, 'height': 0}) + assert_equal(softforks['segwit'], {'type': 'buried', 'active': True, 'height': 0}) + + assert_equal(softforks['testdummy'], { + 'type': 'bip9', + 'bip9': { + 'status': 'started', + 'bit': 28, + 'start_time': 0, + 'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value + 'since': 144, + 'statistics': { + 'period': 144, + 'threshold': 108, + 'elapsed': 57, + 'count': 57, + 'possible': True, }, - 'active': False} + }, + 'active': False }) + assert 'taproot' in softforks + assert_equal(softforks['taproot']['type'], 'bip9') + assert_equal(softforks['taproot']['active'], True) + def _test_getchaintxstats(self): self.log.info("Test getchaintxstats") diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 0ac68c6..e9b0746 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -242,6 +242,7 @@ def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=N # The maximum number of nodes a single test can spawn MAX_NODES = 12 +MAX_GENERATE_TRIES = 100000000 # Don't assign rpc or p2p ports lower than this PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000)) # The number of ports to "reserve" for p2p and rpc, each @@ -506,8 +507,8 @@ def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): def create_confirmed_utxos(fee, node, count): to_generate = int(0.5 * count) + COINBASE_MATURITY + 1 while to_generate > 0: - node.generate(min(25, to_generate)) - to_generate -= 25 + generated = node.generate(min(25, to_generate), maxtries=MAX_GENERATE_TRIES) + to_generate -= len(generated) utxos = node.listunspent() iterations = count - len(utxos) addr1 = node.getnewaddress() @@ -515,6 +516,10 @@ def create_confirmed_utxos(fee, node, count): if iterations <= 0: return utxos for i in range(iterations): + if not utxos: + utxos.extend(node.listunspent()) + if not utxos: + raise AssertionError("Insufficient confirmed UTXOs while creating test inputs") t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) @@ -527,7 +532,7 @@ def create_confirmed_utxos(fee, node, count): node.sendrawtransaction(signed_tx) while (node.getmempoolinfo()['size'] > 0): - node.generate(1) + node.generate(1, maxtries=MAX_GENERATE_TRIES) utxos = node.listunspent() assert len(utxos) >= count @@ -586,7 +591,7 @@ def mine_large_block(node, utxos=None): utxos.extend(node.listunspent()) fee = 100 * node.getnetworkinfo()["relayfee"] create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee) - node.generate(1) + node.generate(1, maxtries=MAX_GENERATE_TRIES) def find_vout_for_address(node, txid, addr): """ diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index 4236580..45981a6 100644 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -110,7 +110,9 @@ class MultiWalletTest(PalladiumTestFramework): assert_equal(os.path.isfile(wallet_file(wallet_name)), True) # should not initialize if wallet path can't be created - exp_stderr = "boost::filesystem::create_directory:" + # Message wording depends on filesystem backend/library versions: + # boost may report create_directory/create_directories, std::filesystem uses "filesystem error". + exp_stderr = r"(boost::filesystem::create_director(?:y|ies):|filesystem error:)" self.nodes[0].assert_start_raises_init_error(['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')