Page MenuHomePhabricator

No OneTemporary

diff --git a/.arclint b/.arclint
index 716247246..f0a7d2879 100644
--- a/.arclint
+++ b/.arclint
@@ -1,329 +1,332 @@
{
"linters": {
"generated": {
"type": "generated"
},
"clang-format": {
"type": "clang-format",
"version": ">=16.0",
"bin": [
"clang-format-16",
"clang-format"
],
"include": "(^(src|chronik)/.*\\.(h|c|cpp|mm)$)",
"exclude": [
"(^src/(secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)"
]
},
"black": {
"type": "black",
"version": ">=24.0.0",
"include": [
"(\\.py$)",
"(^electrum/electrum-abc$)"
],
"exclude": [
"(^contrib/apple-sdk-tools/)",
"(^electrum/electrumabc_gui/qt/icons.py)",
"(\\_pb2.py$)"
]
},
"flake8": {
"type": "flake8",
"version": ">=5.0",
"include": [
"(\\.py$)",
"(^electrum/electrum-abc$)"
],
"exclude": [
"(^contrib/apple-sdk-tools/)",
"(^electrum/electrumabc_gui/qt/icons.py)",
"(\\_pb2.py$)"
],
"flags": [
"--ignore=A003,E203,E303,E305,E501,E704,W503,W504",
"--require-plugins=flake8-comprehensions,flake8-builtins"
]
},
"lint-format-strings": {
"type": "lint-format-strings",
"include": "(^(src|chronik)/.*\\.(h|c|cpp)$)",
"exclude": [
"(^src/(secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)",
"(^src/test/fuzz/strprintf.cpp$)"
]
},
"check-doc": {
"type": "check-doc",
"include": "(^(src|chronik)/.*\\.(h|c|cpp)$)"
},
"lint-tests": {
"type": "lint-tests",
"include": "(^src/(seeder/|rpc/|wallet/)?test/.*\\.(cpp)$)"
},
"phpcs": {
"type": "phpcs",
"include": "(\\.php$)",
"exclude": [
"(^arcanist/__phutil_library_.+\\.php$)"
],
"phpcs.standard": "arcanist/phpcs.xml"
},
"lint-locale-dependence": {
"type": "lint-locale-dependence",
"include": "(^(src|chronik)/.*\\.(h|cpp)$)",
"exclude": [
"(^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h))",
"(^src/bench/nanobench.h$)"
]
},
"lint-cheader": {
"type": "lint-cheader",
"include": "(^(src|chronik)/.*\\.(h|cpp)$)",
"exclude": [
"(^src/(crypto/ctaes|secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)"
]
},
"spelling": {
"type": "spelling",
"exclude": [
"(^build-aux/m4/)",
"(^depends/)",
"(^doc/release-notes/)",
"(^src/(qt/locale|secp256k1|leveldb)/)",
"(^test/lint/dictionary/)",
"(^web/e.cash/public/animations/)",
"(package-lock.json)",
- "(^electrum/electrumabc/wordlist/)"
+ "(^electrum/electrumabc/wordlist/)",
+ "(^doc/standards/)"
],
"spelling.dictionaries": [
"test/lint/dictionary/english.json"
]
},
"lint-assert-with-side-effects": {
"type": "lint-assert-with-side-effects",
"include": "(^(src|chronik)/.*\\.(h|cpp)$)",
"exclude": [
"(^src/(secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)"
]
},
"lint-include-quotes": {
"type": "lint-include-quotes",
"include": "(^(src|chronik)/.*\\.(h|cpp)$)",
"exclude": [
"(^src/(secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)"
]
},
"lint-include-guard": {
"type": "lint-include-guard",
"include": "(^(src|chronik)/.*\\.h$)",
"exclude": [
"(^src/(crypto/ctaes|secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)",
"(^src/tinyformat.h$)"
]
},
"lint-include-source": {
"type": "lint-include-source",
"include": "(^(src|chronik)/.*\\.(h|c|cpp)$)",
"exclude": [
"(^src/(secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)"
]
},
"lint-std-chrono": {
"type": "lint-std-chrono",
"include": "(^(src|chronik)/.*\\.(h|cpp)$)"
},
"lint-stdint": {
"type": "lint-stdint",
"include": "(^(src|chronik)/.*\\.(h|c|cpp)$)",
"exclude": [
"(^src/(secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)",
"(^src/compat/assumptions.h$)"
]
},
"check-files": {
"type": "check-files"
},
"lint-boost-dependencies": {
"type": "lint-boost-dependencies",
"include": "(^(src|chronik)/.*\\.(h|cpp)$)"
},
"lint-python-encoding": {
"type": "lint-python-encoding",
"include": "(\\.py$)",
"exclude": [
"(^contrib/apple-sdk-tools/)"
]
},
"shellcheck": {
"type": "shellcheck",
"version": ">=0.7.0",
"flags": [
"--external-sources",
"--source-path=SCRIPTDIR"
],
"include": "(\\.(sh|bash)$)",
"exclude": [
"(^src/(secp256k1)/)",
"(^electrum/)"
]
},
"lint-shell-locale": {
"type": "lint-shell-locale",
"include": "(\\.(sh|bash)$)",
"exclude": [
"(^src/(secp256k1)/)",
"(^cmake/utils/log-and-print-on-failure.sh)"
]
},
"lint-cpp-void-parameters": {
"type": "lint-cpp-void-parameters",
"include": "(^(src|chronik)/.*\\.(h|cpp)$)",
"exclude": [
"(^src/(crypto/ctaes|secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)",
"(^src/compat/glibc_compat.cpp$)"
]
},
"lint-logs": {
"type": "lint-logs",
"include": "(^(src|chronik)/.*\\.(h|cpp|rs)$)"
},
"lint-qt": {
"type": "lint-qt",
"include": "(^src/qt/.*\\.(h|cpp)$)",
"exclude": [
"(^src/qt/(locale|forms|res)/)"
]
},
"lint-doxygen": {
"type": "lint-doxygen",
"include": "(^(src|chronik)/.*\\.(h|c|cpp)$)",
"exclude": [
"(^src/(crypto/ctaes|secp256k1|leveldb)/)",
"(^src/bench/nanobench.h$)"
]
},
"lint-whitespace": {
"type": "lint-whitespace",
"include": "(\\.(ac|am|cmake|conf|in|include|json|m4|md|openrc|php|pl|rs|sh|txt|yml)$)",
"exclude": [
"(^src/(secp256k1|leveldb)/)",
- "(^src/bench/nanobench.h$)"
+ "(^src/bench/nanobench.h$)",
+ "(^doc/standards/)"
]
},
"yamllint": {
"type": "yamllint",
"include": "(\\.(yml|yaml)$)",
"exclude": "(^src/(secp256k1|leveldb)/)"
},
"lint-check-nonfatal": {
"type": "lint-check-nonfatal",
"include": [
"(^src/rpc/.*\\.(h|c|cpp)$)",
"(^src/wallet/rpc*.*\\.(h|c|cpp)$)"
],
"exclude": "(^src/rpc/server.cpp)"
},
"lint-markdown": {
"type": "lint-markdown",
"include": [
"(\\.md$)"
],
"exclude": [
- "(^web/chronik.e.cash/)"
+ "(^web/chronik.e.cash/)",
+ "(^doc/standards/)"
]
},
"lint-python-mypy": {
"type": "lint-python-mypy",
"version": ">=0.910",
"include": "(\\.py$)",
"exclude": [
"(^contrib/apple-sdk-tools/)",
"(^contrib/macdeploy/)",
"(^electrum/)"
],
"flags": [
"--ignore-missing-imports",
"--install-types",
"--non-interactive"
]
},
"lint-python-mutable-default": {
"type": "lint-python-mutable-default",
"include": "(\\.py$)",
"exclude": [
"(^contrib/apple-sdk-tools/)"
]
},
"prettier": {
"type": "prettier",
"version": ">=2.6.0",
"include": [
"(^apps/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)",
- "(^doc/standards/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)",
+ "(^doc/standards/.*\\.(css|html|js|json|jsx|scss|ts|tsx)$)",
"(^cashtab/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)",
"(^modules/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)",
"(^web/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)"
],
"exclude": [
"(^web/.*/translations/.*\\.json$)",
"(^web/e.cash/public/animations/)",
"(^web/explorer/explorer-server/templates/)"
]
},
"lint-python-isort": {
"type": "lint-python-isort",
"version": ">=5.6.4",
"include": [
"(\\.py$)",
"(^electrum/electrum-abc$)"
],
"exclude": [
"(^contrib/apple-sdk-tools/)",
"(^electrum/electrumabc_gui/qt/icons.py)",
"(\\_pb2.py$)"
]
},
"rustfmt": {
"type": "rustfmt",
"version": ">=1.5.1",
"include": "(\\.rs$)"
},
"djlint": {
"type": "djlint",
"version": ">=1.34.1",
"include": "(^web/explorer/explorer-server/templates/)"
},
"eslint": {
"type": "eslint",
"version": ">=8.0.0",
"include": [
"(cashtab/.*\\.(js|jsx|ts|tsx)$)",
"(apps/token-server/.*\\.ts$)",
"(apps/alias-server/.*\\.js$)",
"(modules/ecashaddrjs/.*\\.ts$)",
"(apps/ecash-herald/.*\\.ts$)",
"(modules/chronik-client/.*\\.(js|jsx|ts|tsx)$)",
"(^web/e.cash/.*\\.js$)"
]
},
"lint-python-flynt": {
"type": "lint-python-flynt",
"version": ">=0.78",
"include": "(\\.py$)",
"exclude": [
"(^contrib/apple-sdk-tools/)",
"(^electrum/)"
]
}
}
}
diff --git a/doc/standards/2018-nov-upgrade.md b/doc/standards/2018-nov-upgrade.md
new file mode 100644
index 000000000..ffa4aeea5
--- /dev/null
+++ b/doc/standards/2018-nov-upgrade.md
@@ -0,0 +1,66 @@
+---
+layout: specification
+title: 2018 November 15 Network Upgrade Specification
+date: 2018-10-10
+category: spec
+activation: 1542300000
+version: 0.5
+---
+
+## Summary
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1542300000, Bitcoin Cash will execute an upgrade of the network consensus rules according to this specification. Starting from the next block these consensus rules changes will take effect:
+
+* Remove topological transaction order constraint, and enforce canonical transaction order
+* Enable OP_CHECKDATASIG and OP_CHECKDATASIGVERIFY opcodes
+* Enforce minimum transaction size
+* Enforce "push only" rule for scriptSig
+* Enforce "clean stack" rule
+
+The following are not consensus changes, but are recommended changes for Bitcoin Cash implementations:
+
+* Automatic replay protection for future upgrade
+
+## Canonical Transaction Order
+
+With the exception of the coinbase transaction, transactions within a block MUST be sorted in numerically ascending order of the transaction id, interpreted as 256-bit little endian integers. The coinbase transaction MUST be the first transaction in a block.
+
+## OpCodes
+
+New opcodes OP_CHECKDATASIG and OP_CHECKDATASIGVERIFY will be enabled as specified in [op_checkdatasig.md](op_checkdatasig.md) [2].
+
+## Minimum Transaction Size
+
+Transactions that are smaller than 100 bytes shall be considered invalid. This protects against a Merkle tree vulnerability that allows attackers to spoof transactions against SPV wallets [3].
+
+## Push Only
+
+Transactions shall be considered invalid if an opcode with number greater than 96 (hex encoding 0x60) appears in a scriptSig. This is the same as Bitcoin BIP 62 rule #2 [4].
+
+## Clean Stack
+
+For a transaction to be valid, only a single non-zero item must remain on the stack upon completion of Script evaluation. If any extra data elements remain on the stack, the script evaluates to false. This is the same as Bitcoin BIP 62 rule #6 [4].
+
+## Automatic Replay Protection
+
+When the median time past [2] of the most recent 11 blocks (MTP-11) is less than UNIX timestamp 1557921600 (May 2019 upgrade) Bitcoin Cash full nodes MUST enforce the following rule:
+
+ * `forkid` [5] to be equal to 0.
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1557921600 (May 2019 upgrade) Bitcoin Cash full nodes implementing the November 2018 consensus rules SHOULD enforce the following change:
+
+ * Update `forkid` [5] to be equal to 0xFF0001. ForkIDs beginning with 0xFF will be reserved for future protocol upgrades.
+
+This particular consensus rule MUST NOT be implemented by Bitcoin Cash wallet software. Wallets that follow the upgrade should not have to change anything.
+
+## References
+
+[1] Median Time Past is described in [bitcoin.it wiki](https://en.bitcoin.it/wiki/Block_timestamp). It is guaranteed by consensus rules to be monotonically increasing.
+
+[2] https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/op_checkdatasig.md
+
+[3] [Leaf-Node weakness in Bitcoin Merkle Tree Design](https://bitslog.wordpress.com/2018/06/09/leaf-node-weakness-in-bitcoin-merkle-tree-design/)
+
+[4] [BIP 62](https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki)
+
+[5] The `forkId` is defined as per the [replay protected sighash](replay-protected-sighash.md) specification.
diff --git a/doc/standards/2019-05-15-schnorr.md b/doc/standards/2019-05-15-schnorr.md
new file mode 100644
index 000000000..7449c6622
--- /dev/null
+++ b/doc/standards/2019-05-15-schnorr.md
@@ -0,0 +1,220 @@
+---
+layout: specification
+title: 2019-MAY-15 Schnorr Signature specification
+date: 2019-02-15
+category: spec
+activation: 1557921600
+version: 0.5
+author: Mark B. Lundeberg
+---
+
+# Summary
+
+Four script opcodes that verify single ECDSA signatures will be overloaded to also accept Schnorr signatures:
+
+* `OP_CHECKSIG`, `OP_CHECKSIGVERIFY`
+* `OP_CHECKDATASIG`, `OP_CHECKDATASIGVERIFY`
+
+The other two ECDSA opcodes, `OP_CHECKMULTISIG` and `OP_CHECKMULTISIGVERIFY`, will *not* be upgraded to allow Schnorr signatures and in fact will be modified to refuse Schnorr-sized signatures.
+
+ * [Summary](#summary)
+ * [Motivation](#motivation)
+ * [Specification](#specification)
+ * [Public keys](#public-keys)
+ * [Signature verification algorithm](#signature-verification-algorithm)
+ * [Message m calculation](#message-m-calculation)
+ * [OP_CHECKMULTISIG/VERIFY](#op_checkmultisigverify)
+ * [Recommended practices for secure signature generation](#recommended-practices-for-secure-signature-generation)
+ * [Rationale and commentary on design decisions](#rationale-and-commentary-on-design-decisions)
+ * [Schnorr variant](#schnorr-variant)
+ * [Overloading of opcodes](#overloading-of-opcodes)
+ * [Re-use of keypair encodings](#re-use-of-keypair-encodings)
+ * [Non-inclusion of OP_CHECKMULTISIG](#non-inclusion-of-op_checkmultisig)
+ * [Lack of flag byte -- ECDSA / Schnorr ambiguity](#lack-of-flag-byte----ecdsa--schnorr-ambiguity)
+ * [Miscellaneous](#miscellaneous)
+ * [Acknowledgements](#acknowledgements)
+
+# Motivation
+
+(for more detail, see Motivation and Applications sections of [Pieter Wuille's Schnorr specification](https://github.com/sipa/bips/blob/bip-schnorr/bip-schnorr.mediawiki))
+
+Schnorr signatures have some slightly improved properties over the ECDSA signatures currently used in bitcoin:
+* Known cryptographic proof of security.
+* Proven that there are no unknown third-party malleability mechanisms.
+* Linearity allows some simple multi-party signature aggregation protocols. (compactness / privacy / malleability benefits)
+* Possibility to do batch validation, resulting a slight speedup during validation of large transactions or initial block download.
+
+# Specification
+
+Current ECDSA opcodes accept DER signatures (format: `0x30 (N+M+4) 0x02 N <N bytes> 0x02 M <M bytes> [hashtype byte]`) from the stack. This upgrade will allow a Schnorr signature to be substituted in any place where an ECDSA DER signature is accepted. Schnorr signatures taken from stack will have the following 65-byte form for OP_CHECKSIG/VERIFY:
+
+| 32 bytes | 32 bytes | 1 byte |
+|----------|----------|-------------|
+| r | s | hashtype |
+
+and 64 bytes for OP_CHECKDATASIG/VERIFY:
+
+| 32 bytes | 32 bytes |
+|----------|----------|
+| r | s |
+
+* `r` is the unsigned big-endian 256-bit encoding of the Schnorr signature's *r* integer.
+* `s` is the unsigned big-endian 256-bit encoding of the Schnorr signature's *s* integer.
+* `hashtype` informs OP_CHECKSIG/VERIFY [mechanics](replay-protected-sighash.md).
+
+These constant length signatures can be contrasted to ECDSA signatures which have variable length (typically 71-72 bytes but in principle may be as short as 8 bytes).
+
+Upon activation, all 64-byte signatures passed to OP_CHECKDATASIG/VERIFY will be processed as Schnorr signatures, and all 65-byte signatures passed to OP_CHECKSIG/VERIFY will be processed as Schnorr signatures. 65-byte signatures passed to OP_CHECKMULTISIG/VERIFY will trigger script failure (see below for more detailss).
+
+## Public keys
+
+All valid ECDSA public keys are also valid Schnorr public keys: compressed (starting byte 2 or 3) and uncompressed (starting byte 4), see [SEC1 §2.3.3](http://www.secg.org/sec1-v2.pdf#subsubsection.2.3.3). The formerly supported ECDSA hybrid keys (see [X9.62 §4.3.6](citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.202.2977&rep=rep1&type=pdf#page=22)) would also be valid, except that these have already been forbidden by the STRICTENC rule that was activated long ago on BCH.
+
+(Schnorr private keys are also identical to the ECDSA private keys.)
+
+## Signature verification algorithm
+
+We follow essentially what is an older variant of Pieter Wuille's [BIP-Schnorr](https://github.com/sipa/bips/blob/bip-schnorr/bip-schnorr.mediawiki). Notable design choices:
+
+* Operates on secp256k1 curve.
+* Uses (*R*,*s*) Schnorr variant, not (*e*,*s*) variant.
+* Uses pubkey-prefixing in computing the internal hash.
+* The Y coordinate of *R* is dropped, so just its X coordinate, *r*, is serialized. The Y coordinate is uniquely reconstructed from *r* by choosing the quadratic residue.
+* Unlike the currently proposed BIP-Schnorr, we use full public keys that do *not* have the Y coordinate removed; this distinction is maintained in the calculation of *e*, below, which makes the resulting signatures from the algorithms incompatible. We do this so that all existing keys can use Schnorr signatures, and both compressed and uncompressed keys are allowed as inputs (though are converted to compressed when calculating *e*).
+
+In detail, the Schnorr signature verification algorithm takes a message byte string `m`, public key point *P*, and nonnegative integers *r*, *s* as inputs, and does the following:
+
+1. Fail if point *P* is not actually on the curve, or if it is the point at infinity.
+2. Fail if *r* >= *p*, where *p* is the field size used in secp256k1.
+3. Fail if *s* >= *n*, where *n* is the order of the secp256k1 curve.
+4. Let `BP` be the 33-byte encoding of *P* as a compressed point.
+5. Let `Br` be the 32-byte encoding of *r* as an unsigned big-endian 256-bit integer.
+6. Compute integer *e* = *H*(`Br | BP | m`) mod *n*. Here `|` means byte-string concatenation and function *H*() takes the SHA256 hash of its 97-byte input and returns it decoded as a big-endian unsigned integer.
+7. Compute elliptic curve point *R*' = *sG* - *eP*, where *G* is the secp256k1 generator point.
+8. Fail if *R*' is the point at infinity.
+9. Fail if the X coordinate of *R*' is not equal to *r*.
+10. Fail if the Jacobi symbol of the Y coordinate of *R*' is not 1.
+11. Otherwise, the signature is valid.
+
+We stress that bytestring `BP` used in calculating *e* shall always be the *compressed* encoding of the public key, which is not necessarily the same as the encoding taken from stack (which could have been uncompressed).
+
+## Message `m` calculation
+
+In all cases, `m` is 32 bytes long.
+
+For OP_CHECKSIG/VERIFY, `m` is obtained according to the [sighash digest algorithm](replay-protected-sighash.md#digest-algorithm) as informed by the `hashtype` byte, and involves hashing **twice** with SHA256.
+
+For OP_CHECKDATASIG/VERIFY, `m` is obtained by popping `msg` from stack and hashing it **once** with SHA256.
+
+This maintains the same relative hash-count semantics as with [the ECDSA versions of OP_CHECKSIG and OP_CHECKDATASIG](op_checkdatasig.md). Although there is an additional SHA256 in step 6 above, it can be considered as being internal to the Schnorr algorithm and it is shared by both opcodes.
+
+## OP_CHECKMULTISIG/VERIFY
+
+Due to complex conflicts with batch verification (see rationale below), OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY are not permitted to accept Schnorr signatures for the time being.
+
+After activation, signatures of the same length as Schnorr (=65 bytes: signature plus hashtype byte) will be disallowed and cause script failure, regardless of the signature contents.
+
+* OP_CHECKDATASIG before upgrade: 64 byte signature is treated as ECDSA.
+* OP_CHECKDATASIG after upgrade: 64 byte signature is treated as *Schnorr*.
+* OP_CHECKSIG before upgrade: 65 byte signature is treated as ECDSA.
+* OP_CHECKSIG after upgrade: 65 byte signature is treated as *Schnorr*.
+* OP_CHECKMULTISIG before upgrade: 65 byte signature is treated as ECDSA.
+* OP_CHECKMULTISIG after upgrade: 65 byte signature *causes script failure*.
+
+Signatures shorter or longer than this exact number will continue to be treated as before. Note that it is very unlikely for a wallet to produce a 65 byte ECDSA signature (see later section "Lack of flag byte...").
+
+# Recommended practices for secure signature generation
+
+Signature generation is not part of the consensus change, however we would like to provide some security guidelines for wallet developers when they opt to implement Schnorr signing.
+
+In brief, creation of a signature starts with the generation of a unique, unpredictable, secret nonce *k* value (0 < *k* < *n*). This produces *R* = *k*'*G* where *k*' = ±*k*, the sign chosen so that the Y coordinate of *R* has Jacobi symbol 1. Its X coordinate, *r*, is now known and in turn *e* is calculable as above. The signature is completed by calculating *s* = *k*' + *ex* mod *n* where *x* is the private key (i.e., *P* = *xG*).
+
+As in ECDSA, there are security concerns arising in nonce generation. Improper nonce generation can in many cases lead to compromise of the private key *x*. A fully random *k* is secure, but unfortunately in many cases a cryptographically secure random number generator (CSRNG) is not available or not fully trusted/auditable.
+
+A deterministic *k* (pseudorandomly derived from *x* and `m`) may be generated using an algorithm like [RFC6979](https://tools.ietf.org/html/rfc6979)(*modified*) or the algorithm suggested in Pieter Wuille's specification. However:
+
+* Signers MUST NOT use straight RFC6979, since this is already used in many wallets doing ECDSA.
+ * Suppose the same unsigned transaction were accidentally passed to both ECDSA and Schnorr wallets holding same key, which in turn were to generate the same RFC6979 *k*. This would be obvious (same *r* values) and in turn allow recovery of the private key from the distinct Schnorr *s* and ECDSA *s*' values: *x* = (±*ss*'-*z*)/(*r*±*s*'*e*) mod *n*.
+ * We suggest using the RFC6979 sec 3.6 'additional data' mechanism, by appending the 16-byte ASCII string "Schnorr+SHA256␣␣" (here ␣ represents 0x20 -- ASCII space). The popular library libsecp256k1 supports passing a parameter `algo16` to `nonce_function_rfc6979` for this purpose.
+* When making aggregate signatures, in contrast, implementations MUST NOT naively use deterministic *k* generation approaches, as this creates a vulnerability to nonce-reuse attacks from signing counterparties (see [MuSig paper section 3.2](https://eprint.iacr.org/2018/068)).
+
+Hardware wallets SHOULD use deterministic nonce due to the lack of CSRNG and also for auditability reasons (to prove that kleptographic key leakage firmware is not installed). Software implementations are also recommended to use deterministic nonces even when CSRNG are available, as deterministic nonces can be unit tested.
+
+# Rationale and commentary on design decisions
+
+## Schnorr variant
+
+Using the secp256k1 curve means that bitcoin's ECDSA keypairs (P,x) can be re-used as Schnorr keypairs. This has advantages in reducing the codebase, but also allows the opcode overloading approach described above.
+
+This Schnorr variant has two advantages inherited from the EdDSA Schnorr algorithms:
+
+* (R,s) signatures allow batch verification.
+* Pubkey prefixing (in the hash) stops some related-key attacks. This is particularly relevant in situations when additively-derived keys (like in unhardened BIP32) are used in combination with OP_CHECKDATASIG (or with a possible future SIGHASH_NOINPUT).
+
+The mechanism of Y coordinate stripping and Jacobi symbol symmetry breaking originates from Pieter Wuille and Greg Maxwell:
+
+* It is important for batch verification that each *r* quickly maps to the intended *R*. It turns out that a natural choice presents itself during 'decompression' of X coordinate *r*: the default decompressed Y coordinate, *y* = (*r*<sup>3</sup> + 7)<sup>(*p*+1)/4</sup> mod *p* appears, which is a quadratic residue and has Jacobi symbol 1. (The alternative Y coordinate, -*y*, is always a quadratic nonresidue and has Jacobi symbol -1.)
+* During single signature verification, Jacobian coordinates are typically used for curve operations. In this case it is easier to calculate the Jacobi symbol of the Y coordinate of *R*', than to perform an affine conversion to get its parity or sign.
+* As a result this ends up slightly *more* efficient, both in bit size and CPU time, than if the parity or sign of Y were retained in the signature.
+
+## Overloading of opcodes
+
+We have chosen to *overload* the OP_CHECKSIG opcode since this means that a "Schnorr P2PKH address" looks just like a regular P2PKH address.
+
+If we used a new opcode, this would also would prevent the advantages of keypair reuse, described below:
+
+## Re-use of keypair encodings
+
+An alternative overloading approach might have been to allocate a different public key prefix byte (0x0a, 0x0b) for Schnorr public keys, that distinguishes them from ECDSA public keys (prefixes 2,3,4,6,7). This would at least allow Schnorr addresses to appear like normal P2PKH addresses.
+
+The advantage of re-using the same encoding (and potentially same keypairs) is that it makes Schnorr signatures into a 'drop-in-place' alternative to ECDSA:
+
+* Existing wallet software can trivially switch to Schnorr signatures at their leisure, without even requiring users to generate new wallets.
+* Does not create more confusion with restoration of wallet seeds / derivation paths ("was it an ECDSA or Schnorr wallet?").
+* No new "Schnorr WIF private key" version is required.
+* No new xpub / xprv versions are required.
+* Protocols like BIP47 payment codes and stealth addresses continue to work unchanged.
+* No security-weakening interactions exist between the ECDSA and Schnorr schemes, so key-reuse is not a concern.
+* It may be possible eventually to remove ECDSA support (and thereby allow fully batched verification), without blocking any old coins.
+
+There is a theoretical disadvantage in re-using keypairs. In the case of a severe break in the ECDSA or Schnorr algorithm, all addresses may be vulnerable whether intended solely for Schnorr or ECDSA --- "the security of signing becomes as weak as the weakest algorithm".<sup>[ref](https://lists.bitcoinunlimited.info/pipermail/bch-dev/2018-December/000002.html)</sup>
+
+For privacy reasons, it may be beneficial for wallet developers to coordinate a 'Schnorr activation day' where all wallets simultaneously switch to produce Schnorr signatures by default.
+
+## Non-inclusion of OP_CHECKMULTISIG
+
+The design of OP_CHECKMULTISIG is strange, in that it requires checking a given signature against possibly multiple public keys in order to find a possible match. This approach unfortunately conflicts with batch verification where it is necessary to know ahead of time, which signature is supposed to match with which public key.
+
+Going forward we would like to permanently support OP_CHECKMULTISIG, including Schnorr signature support but in a modified form that is compatible with batch verification. There are simple ways to do this, however the options are still being weighed and there is insufficient time to bring the new approach to fruition in time for the May 2019 upgrade.
+
+In this upgrade we have chosen to take a 'wait and see' approach, by simply forbidding Schnorr signatures (and Schnorr-size signatures) in OP_CHECKMULTISIG for the time being. Schnorr multisignatures will still be possible through aggregation, but they are not a complete drop-in replacement for OP_CHECKMULTISIG.
+
+## Lack of flag byte -- ECDSA / Schnorr ambiguity
+
+In a previous version of this proposal, a flag byte (distinct from ECDSA's 0x30) was prepended for Schnorr signatures. There are some slight disadvantages in not using such a distinguishing byte:
+
+* After the upgrade, if a user generates a 65-byte ECDSA signature (64-byte in CHECKDATASIG), then this will be interpreted as a Schnorr signature and thus unexpectedly render the transaction invalid.
+* A flag byte could be useful if yet another signature protocol were to be added, to help distinguish a third type of signature.
+
+However, these considerations were deemed to be of low significance:
+
+* The probability of a user accidentally generating such a signature is 2<sup>-49</sup>, or 1 in a quadrillion (10<sup>15</sup>). It is thus unlikely that such an accident will occur to *any* user. Even if it happens, that individual can easily move on with a new signature.
+* A flag byte distinction would only be relevant if a new protocol were to also use the secp256k1 curve. The next signature algorithm added to bitcoin will undoubtedly be something of a higher security level, in which case the *public key* would be distinguished, not the signature.
+* Omitting the flag byte does save 1 byte per signature. This can be compared to the overall per-input byte size of P2PKH spending, which is currently ~147.5 for ECDSA signatures, and will be 141 bytes for Schnorr signatures as specified here.
+
+Without a flag byte, however, implementors must take additional care in how signature byte blobs are treated. In particular, a malicious actor creating a short valid 64/65-byte ECDSA signature before the upgrade must not cause the creation of a cache entry wherein the same signature data would be incorrectly remembered as valid Schnorr signature, after the upgrade.
+
+## Miscellaneous
+
+* Applications that copy OP_CHECKSIG signatures into OP_CHECKDATASIG (such as zero-conf forfeits and self-inspecting transactions/covenants) will be unaffected as the semantics are identical, in terms of hash byte placement and number of hashes involved.
+* As with ECDSA, the flexibility in nonce *k* means that Schnorr signatures are not *unique* signatures and are a source of first-party malleability. Curiously, however, aggregate signatures cannot be "second-party" malleated; producing a distinct signature requires the entire signing process to be restarted, with the involvement of all parties.
+
+# Implementation / unit tests
+
+The Bitcoin ABC implementation involved a number of Diffs: https://reviews.bitcoinabc.org/T527
+
+Pieter Wuille's specification comes with a handy set of test vectors for checking cryptographic corner cases: https://github.com/sipa/bips/blob/bip-schnorr/bip-schnorr/test-vectors.csv
+
+# Acknowledgements
+
+Thanks to Amaury Séchet, Shammah Chancellor, Antony Zegers, Tomas van der Wansem, Greg Maxwell for helpful discussions.
diff --git a/doc/standards/2019-05-15-segwit-recovery.md b/doc/standards/2019-05-15-segwit-recovery.md
new file mode 100644
index 000000000..22c6a2b76
--- /dev/null
+++ b/doc/standards/2019-05-15-segwit-recovery.md
@@ -0,0 +1,112 @@
+---
+layout: specification
+title: 2019-MAY-15 Segwit Recovery Specification
+date: 2019-05-13
+category: spec
+activation: 1557921600
+version: 0.4
+---
+
+Segwit Recovery Specification
+===============================================
+
+## Motivation
+Prior to the [November 2018 upgrade](2018-nov-upgrade.md), miners were able to recover coins accidentally sent to segwit pay-to-script-hash [(P2SH)](https://github.com/bitcoin/bips/blob/master/bip-0016.mediawiki) addresses. These P2SH addresses have a two-push redeem script that contains no signature checks, and they were thus spendable by any miner (though not spendable by normal users due to relay rules). In practice, such coins were sometimes recovered by the intended recipient with the help of miners, and sometimes recovered by anonymous miners who simply decided to assert ownership of these anyone-can-spend coins.
+
+In November 2018, the CLEANSTACK consensus rule was activated, with the intent of reducing malleability mechanisms. This had the unfortunate side effect of also making these segwit scripts *unspendable*, since attempting to spend these coins would always leave two items on the stack.
+
+Starting in May 2019, transactions spending segwit P2SH coins will be allowed once again to be included in blocks.
+
+## Specification
+A transaction input
+1. that spends a P2SH coin (scriptPubKey=`OP_HASH160 <hash160 of the redeem script> OP_EQUAL`); and
+2. where the scriptSig only pushes one item onto the stack: a redeem script that correctly hashes to the value in the scriptPubKey; and
+3. where the redeem script is a witness program;
+
+shall be considered valid under the consensus rules to be activated in May 2019.
+
+A witness program has a 1-byte push opcode (for a number between 0 and 16, inclusive) followed by a data push between 2 and 40 bytes (inclusive), both in minimal form.
+Equivalently, a witness program can be identified by examining the length and the first two bytes of the redeem script:
+* The redeem script byte-length is at least 4 and at most 42.
+* The first byte is 0x00, or in the range 0x51 – 0x60. (OP_0, or OP_1 – OP_16).
+* The second byte is equal to to the redeem script byte-length, minus two.
+
+All witness-like scripts will be considered valid, even if their execution would normally result in an invalid transaction (e.g. due to a zero value on the stack). Note that because the witness program contains only push operations (among other restrictions), the P2SH script matching the provided hash is the only meaningful validation criteria. The only consequence of this specification is that an intentionally unspendable script resembling a witness program may now be spendable.
+
+This exemption should not be applied for the acceptance of transactions from network peers (i.e., only to acceptance of new blocks), so that segwit recovery transactions remain non-standard (and thus require a miner's cooperation to perform).
+
+## Test cases
+
+#### Valid segwit recoveries:
+ V1) Recovering v0 P2SH-P2WPKH:
+ scriptSig: 0x16 0x001491b24bf9f5288532960ac687abb035127b1d28a5
+ scriptPubKey: OP_HASH160 0x14 0x17743beb429c55c942d2ec703b98c4d57c2df5c6 OP_EQUAL
+
+ V2) Recovering v0 P2SH-P2WSH:
+ scriptSig: 0x22 0x00205a0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
+ scriptPubKey: OP_HASH160 0x14 0x17a6be2f8fe8e94f033e53d17beefda0f3ac4409 OP_EQUAL
+
+ V3) Max allowed version, v16:
+ scriptSig: 0x22 0x60205a0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
+ scriptPubKey: OP_HASH160 0x14 0x9b0c7017004d3818b7c833ddb3cb5547a22034d0 OP_EQUAL
+
+ V4) Max allowed length, 42 bytes:
+ scriptSig: 0x2a 0x00285a0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f2021222324252627
+ scriptPubKey: OP_HASH160 0x14 0xdf7b93f88e83471b479fb219ae90e5b633d6b750 OP_EQUAL
+
+ V5) Min allowed length, 4 bytes:
+ scriptSig: 0x04 0x00025a01
+ scriptPubKey: OP_HASH160 0x14 0x86123d8e050333a605e434ecf73128d83815b36f OP_EQUAL
+
+ V6) Valid in spite of a false boolean value being left on stack, 0:
+ scriptSig: 0x04 0x00020000
+ scriptPubKey: OP_HASH160 0x14 0x0e01bcfe7c6f3fd2fd8f81092299369744684733 OP_EQUAL
+
+ V7) Valid in spite of a false boolean value being left on stack, minus 0:
+ scriptSig: 0x04 0x00020080
+ scriptPubKey: OP_HASH160 0x14 0x10ddc638cb26615f867dad80efacced9e73766bc OP_EQUAL
+
+#### Invalid segwit recoveries:
+ I1) Non-P2SH output:
+ scriptSig: 0x16 0x001491b24bf9f5288532960ac687abb035127b1d28a5
+ scriptPubKey: OP_TRUE
+
+ I2) Redeem script hash does not match P2SH output:
+ scriptSig: 0x16 0x001491b24bf9f5288532960ac687abb035127b1d28a5
+ scriptPubKey: OP_HASH160 0x14 0x17a6be2f8fe8e94f033e53d17beefda0f3ac4409 OP_EQUAL
+
+ I3) scriptSig pushes two items onto the stack:
+ scriptSig: OP_0 0x16 0x001491b24bf9f5288532960ac687abb035127b1d28a5
+ scriptPubKey: OP_HASH160 0x14 0x17743beb429c55c942d2ec703b98c4d57c2df5c6 OP_EQUAL
+
+ I4) Invalid witness program, non-minimal push in version field:
+ scriptSig: 0x17 0x01001491b24bf9f5288532960ac687abb035127b1d28a5
+ scriptPubKey: OP_HASH160 0x14 0x0718743e67c1ef4911e0421f206c5ff81755718e OP_EQUAL
+
+ I5) Invalid witness program, non-minimal push in program field:
+ scriptSig: 0x05 0x004c0245aa
+ scriptPubKey: OP_HASH160 0x14 0xd3ec673296c7fd7e1a9e53bfc36f414de303e905 OP_EQUAL
+
+ I6) Invalid witness program, too short, 3 bytes:
+ scriptSig: 0x03 0x00015a
+ scriptPubKey: OP_HASH160 0x14 0x40b6941895022d458de8f4bbfe27f3aaa4fb9a74 OP_EQUAL
+
+ I7) Invalid witness program, too long, 43 bytes:
+ scriptSig: 0x2b 0x00295a0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728
+ scriptPubKey: OP_HASH160 0x14 0x13aa4fcfd630508e0794dca320cac172c5790aea OP_EQUAL
+
+ I8) Invalid witness program, version -1:
+ scriptSig: 0x22 0x4f205a0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
+ scriptPubKey: OP_HASH160 0x14 0x97aa1e96e49ca6d744d7344f649dd9f94bcc35eb OP_EQUAL
+
+ I9) Invalid witness program, version 17:
+ scriptSig: 0x23 0x0111205a0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
+ scriptPubKey: OP_HASH160 0x14 0x4b5321beb1c09f593ff3c02be4af21c7f949e101 OP_EQUAL
+
+ I10) Invalid witness program, OP_RESERVED in version field:
+ scriptSig: 0x22 0x50205a0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
+ scriptPubKey: OP_HASH160 0x14 0xbe02794ceede051da41b420e88a86fff2802af06 OP_EQUAL
+
+ I11) Invalid witness program, more than 2 stack items:
+ scriptSig: 0x23 0x00205a0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f51
+ scriptPubKey: OP_HASH160 0x14 0x8eb812176c9e71732584123dd06d3246e659b199 OP_EQUAL
diff --git a/doc/standards/2019-05-15-upgrade.md b/doc/standards/2019-05-15-upgrade.md
new file mode 100644
index 000000000..6e42e5b20
--- /dev/null
+++ b/doc/standards/2019-05-15-upgrade.md
@@ -0,0 +1,47 @@
+---
+layout: specification
+title: 2019-MAY-15 Network Upgrade Specification
+date: 2019-02-28
+category: spec
+activation: 1557921600
+version: 0.5
+---
+
+## Summary
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1557921600, Bitcoin Cash will execute an upgrade of the network consensus rules according to this specification. Starting from the next block these consensus rules changes will take effect:
+
+* Enable Schnorr signatures.
+* Allow Segwit recovery.
+
+The following are not consensus changes, but are recommended changes for Bitcoin Cash implementations:
+
+* Automatic replay protection for future upgrade
+
+## Enable Schnorr signatures
+
+Support Schnorr signatures in CHECKSIG and CHECKDATASIG per [2019-05-15-schnorr.md](2019-05-15-schnorr.md).
+
+## Allow Segwit recovery
+
+In the last upgrade, coins accidentally sent to Segwit P2SH addresses were made unspendable by the CLEANSTACK rule. This upgrade will make an exemption for these coins and return them to the previous situation, where they are spendable. This means that once the P2SH redeem script pre-image is revealed (for example by spending coins from the corresponding BTC address), any miner can take the coins.
+
+Details: [2019-05-15-segwit-recovery.md](2019-05-15-segwit-recovery.md)
+
+## Automatic Replay Protection
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is less than UNIX timestamp 1573819200 (Nov 2019 upgrade) Bitcoin Cash full nodes MUST enforce the following rule:
+
+ * `forkid` [2] to be equal to 0.
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1573819200 (Nov 2019 upgrade) Bitcoin Cash full nodes implementing the May 2019 consensus rules SHOULD enforce the following change:
+
+ * Update `forkid` [2] to be equal to 0xFF0002. ForkIDs beginning with 0xFF will be reserved for future protocol upgrades.
+
+This particular consensus rule MUST NOT be implemented by Bitcoin Cash wallet software. Wallets that follow the upgrade should not have to change anything.
+
+## References
+
+[1] Median Time Past is described in [bitcoin.it wiki](https://en.bitcoin.it/wiki/Block_timestamp). It is guaranteed by consensus rules to be monotonically increasing.
+
+[2] The `forkId` is defined as per the [replay protected sighash](replay-protected-sighash.md) specification.
diff --git a/doc/standards/2019-11-15-minimaldata.md b/doc/standards/2019-11-15-minimaldata.md
new file mode 100644
index 000000000..a3366244c
--- /dev/null
+++ b/doc/standards/2019-11-15-minimaldata.md
@@ -0,0 +1,133 @@
+---
+layout: specification
+title: 2019-NOV-15 minimal push and minimal number encoding rules
+date: 2019-08-11
+category: spec
+activation: 1573819200
+version: 1.0
+author: Mark B. Lundeberg
+---
+
+# Summary
+
+In the November 2019 upgrade, two new consensus rules are introduced to Bitcoin Cash:
+- during script execution, executed push opcodes are restricted to be the minimal form for the resultant stack element.
+- during script execution, the decoding of stack elements as numbers are restricted to only allow minimal forms, in most cases.
+
+# Motivation
+
+Third-party malleation is when anyone, such as an uninvolved miner, is able to modify parts of a transaction while keeping it valid, yet changing the transaction identifier. The validity of child transactions is contingent on having the correct transaction identifier for the parent, and so third-party malleability threatens to invalidate chains of transactions, whether they are held in secret, in mempool, or even already confirmed (i.e., during blockchain reorganization). A variety of past consensus rule changes have tried to address third-party malleability vectors: BIP66 strict ECDSA encoding, the ECDSA low-S rule, the strict encoding rule for hashtype, the scriptSig push-only rule, and the cleanstack rule. This effort is incomplete, as there remains a significant malleability vector that means that currently, *all* transactions on BCH are still third-party malleable:
+
+* The push opcodes used during scriptSig execution can be modified. For example, the length-one stack element `{0x81}` can be equivalently pushed using any of the following five script phrases (in hex): `4f`, `0181`, `4c0181`, `4d010081`, `4e0100000081`. A third party can substitute any of these for each other.
+
+For some transactions, an additional malleability mechanism is also present:
+
+* Some smart contracts perform operations on numbers that are taken from the scriptSig, and numbers in bitcoin's Script language are allowed to have multiple representations on stack. The number -1, for example, can be represented by `{0x81}`, `{0x01, 0x80}`, `{0x01, 0x00, 0x80}`, `{0x01, 0x00, 0x00, 0x80}`.
+
+For years now, the "MINIMALDATA" flag, which restricts both of the aforementioned malleability vectors, has been active at the mempool layer of most nodes but not at the consensus layer. The upgrade converts the existing MINIMALDATA rules to consensus. For reference, this document contains a full specification of these rules.
+
+It is of course impossible to completely remove third-party malleability in bitcoin (not even using techniques like SegWit) since a transaction can be made that involves no signature or where the signing key is not a secret, or where permutations are permitted (e.g., [SINGLE|ANYONECANPAY](https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki#specification)). We can, however, remove it for large classes of transactions, and this has been the goal of the past upgrades. Bringing MINIMALDATA to the consensus layer, along with the [dummy element restrictions in the OP_CHECKMULTISIG upgrade](2019-11-15-schnorrmultisig.md), finally achieves the goal of removing third-party malleability from the vast majority of transactions performed on BCH.
+
+# Technical background
+
+**Push opcodes** — Bitcoin's Script system is a stack-based language. The stack elements are simply byte arrays of length 0 to 520. Push opcodes append a byte array onto the stack, and there are a variety of different opcodes for pushing arbitrary data of various lengths, or pushing specific one-byte arrays:
+* Opcode 0 (OP_0) pushes an empty element onto the stack.
+* Opcodes 1 through to 75 push an arbitrary byte array of the corresponding length onto the stack.
+* Opcode 76 (PUSHDATA1) takes a one-byte length as parameter, and then pushes an arbitrary byte-array of that length.
+* Opcode 77 (PUSHDATA2) takes a two-byte length as parameter, and then pushes an arbitrary byte-array of that length.
+* Opcode 78 (PUSHDATA2) takes a four-byte length as parameter, and then pushes an arbitrary byte-array of that length.
+* Opcode 79 (OP_1NEGATE) pushes the one-byte element `{0x81}`.
+* Opcode 81 (OP_1) pushes the one-byte element `{0x01}`.
+* Opcode 82 (OP_2) pushes the one-byte element `{0x02}`.
+* ...
+* Opcode 95 (OP_15) pushes the one-byte element `{0x0f}`.
+* Opcode 96 (OP_16) pushes the one-byte element `{0x10}`.
+
+It can be seen from the above list that any given byte array can be pushed in a variety of ways. However, for any given byte array there is a unique shortest possible way to push the byte array.
+
+**Number representation** — Although bitcoin's stack is just a sequence of byte arrays, there are numerous Script opcodes that expect to take integers from the stack, which means they decode the byte array to an integer before logically using the integer. The way Script represents numbers as byte arrays is using a variable-length, little-endian [sign-and-magnitude representation](https://en.wikipedia.org/wiki/Signed_number_representations#Signed_magnitude_representation_(SMR)). This is typical for a multiprecision or 'bignum' arithmetic computing environment, but may be unfamiliar for programmers who are used to 'bare-metal' integer computing that uses fixed-width two's complement (or rarely, ones' complement) representation.
+
+Currently, the only consensus restriction is that the byte arrays used during number decoding shall be at most 4 bytes in length (except for four special cases, noted in the specification below). This restricts the range of numbers to be \[-2<sup>31</sup> + 1 ... 2^<sup>31</sup> - 1\] (inclusive), but does not pose any further restrictions on encoding. So, there are various ways to encode a given number as a stack element by padding the number with excess groups of zero bits just before the sign bit. For example, the number -6844 can be represented in three valid ways: `{0xbc, 0x9a}`, `{0xbc, 0x1a, 0x80}`, `{0xbc, 0x1a, 0x00, 0x80}`. The number 39612 can be represented as `{0xbc, 0x9a, 0x00}` or `{0xbc, 0x9a, 0x00, 0x00}`. The number 0 has nine valid representations. While all opcodes that output numbers will minimally encode said output, at the current time they are happy to accept any representation for a numeric input.
+
+For any given number, there is exactly one minimal (shortest) representation. A simple test can be applied to a byte array to see whether it is the minimal encoding of the corresponding number:
+
+* The byte array holds a minimally encoded number if any of the following apply:
+ * The byte array has length 0. (this is the minimal representation of the number 0)
+ * The byte array has length of 1 or larger, and the the last byte has any bits set besides the high bit (the sign bit).
+ * The byte array has length of 2 or larger, and the *second-to-last* byte has its high bit set.
+* If none of the above apply, the byte array holds a non-minimal encoding of the given number.
+
+Note that bitcoin's number system treats "negative 0" encodings such as `{0x80}`, `{0x00, 0x80}`, etc. as a representation of 0, and the minimal encoding of 0 is an empty byte array: `{}`. The above rules indicate that neither `{0x80}` nor `{0x00}` are minimal encodings.
+
+# Specification
+
+Though conventionally appearing under one flag "MINIMALDATA", there are two unrelated rules that do not interact. The specifications have been accordingly split into two sections.
+
+## Minimal push rule
+
+Upon the execution of a push opcode (be it during scriptSig, scriptPubKey, or P2SH redeemScript execution), the data pushed on stack shall be examined in order to decide if the just-executed push opcode was minimal:
+* An empty stack element `{}` must be pushed using OP_0.
+* A one-byte element must be pushed using opcode 1 followed by the given byte, *except* for the following 17 special cases where a special opcode must be used instead:
+ * `{0x81}` must be pushed using OP_1NEGATE
+ * `{0x01}` must be pushed using OP_1
+ * `{0x02}` must be pushed using OP_2
+ * ...
+ * `{0x0f}` must be pushed using OP_15
+ * `{0x10}` must be pushed using OP_16
+* An element of length N=2 to length N=75 must be pushed using opcode N.
+* An element of length 76 to 255 must be pushed using PUSHDATA1.
+* An element of length 256 to 65535 must be pushed using PUSHDATA2.
+
+In practice, PUSHDATA2 can only push lengths up to 520, but in case script is upgraded one day, the limit for PUSHDATA2 remains at 65535. Since the above rules cover all possible stack element lengths, this means that PUSHDATA4 cannot appear in executed parts of scripts (it must still, however, be *parsed* correctly in an unexecuted branch).
+
+It is worth emphasizing that the above rules only apply at the moment when push opcodes are actually *executed*, i.e., when data is actually being placed onto the stack. Thus:
+* These rules do *not* apply to push opcodes found in unexecuted branches (those behind OP_IF/OP_NOTIF) of executed scripts.
+* These rules do *not* apply to scripts appearing in transaction outputs, as they have not yet been executed.
+* These rules do *not* apply to coinbase scriptSigs, which are not executed. Note that BIP34 imposes a (slightly distinct) encoding requirement for the mandatory height push at the start of the coinbase scriptSig.
+
+## Minimal number encoding
+
+Most opcodes that take numbers from the stack shall require the stack element to be a minimally encoded representation. To be specific, these operands must be minimally encoded numbers:
+* The single operand of OP_PICK and OP_ROLL.
+* The single operand of OP_1ADD, OP_1SUB, OP_NEGATE, OP_ABS, OP_NOT, OP_0NOTEQUAL.
+* Both operands of OP_ADD, OP_SUB, OP_DIV, OP_MOD, OP_BOOLAND, OP_BOOLOR, OP_NUMEQUAL, OP_NUMEQUALVERIFY, OP_NUMNOTEQUAL, OP_LESSTHAN, OP_GREATERTHAN, OP_LESSTHANOREQUAL, OP_GREATERTHANOREQUAL, OP_MIN, OP_MAX.
+* All three operands of OP_WITHIN.
+* The "keys count" and "signatures count" operands of OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY.
+* The second operand ("position") of OP_SPLIT.
+* The second operand ("size") of OP_NUM2BIN, *but not the first (see below)*.
+* In general, all number-accepting opcodes added in future will require minimal encoding as well.
+
+However, four opcodes are special in the numeric inputs they accept:
+
+* OP_CHECKLOCKTIMEVERIFY and OP_CHECKSEQUENCEVERIFY both take up to **5-byte** numbers from the stack, a deviation from the usual 4-byte limit. Regardless, we shall require that these 5-byte numbers also be minimally encoded.
+* The first operand of OP_NUM2BIN and the single operand of OP_BIN2NUM will continue to have *no minimal encoding restrictions* and *no length restrictions* (see [their specification](may-2018-reenabled-opcodes.md) for more information).
+
+The following opcodes notably do not appear in the above lists since they do *not* decode their inputs as numbers, and thus they have no minimal number encoding rules: OP_IF, OP_NOTIF, OP_VERIFY, OP_IFDUP, OP_AND, OP_OR, OP_XOR.
+
+# Rationale and commentary on design decisions
+
+## Over-restrictions on minimal push
+
+To prevent push malleability, it is only necessary to restrict the scriptSig. The push forms used during scriptPubKey and P2SH redeemScript execution cannot be malleated, since they are committed by hashing into the prior transaction's identifier. Thus it may seem like 'overkill' to restrict these as well.
+
+Despite this, the MINIMALDATA standardness rule has applied these restrictions to scriptPubKey and redeemScript for quite a while now, and it does not appear to be causing an issue. In addition, it is technically cleaner in some ways, if the same script interpretation rules can be applied to all executing scripts.
+
+## Restrictions of number encoding
+
+By far, the most common usage of numbers is in OP_CHECKMULTISIG where they are provided in the locking script and cannot be malleated. Only rare smart contracts take numbers from the scriptSig, and in fact, smart contracts that require minimal number encoding could easily enforce this themselves, by using tricks such as `OP_DUP OP_DUP OP_0 OP_ADD OP_EQUALVERIFY` (taking advantage of the fact that adding 0 to a number returns its minimal encoding), or more recently: `OP_DUP OP_DUP OP_BIN2NUM OP_EQUALVERIFY`.
+
+However, the number encoding rule has been standard for quite some time, and adopting it now should cause no issue. It also makes it so that smart contract authors can save their limited opcodes for more valuable tasks, and need not use such tricks.
+
+## Not restricting boolean encodings
+
+Four opcodes interpret their input as a boolean without any restriction: OP_IF, OP_NOTIF, OP_VERIFY, OP_IFDUP. Any byte array of any length that is all zeros, or that is all zeros besides a final byte of 0x80, is interpreted as 'false', and any other byte array is interpreted as 'true'. The script interpreter also accepts such unrestricted boolean representations for the final stack value used to determine pass/fail of a script.
+
+Two additional 'boolean' opcodes (OP_BOOLAND, OP_BOOLOR) have a semi-restricted input, as they interpret their inputs as numbers. These must be at most 4 bytes long, and as mentioned above they will be restricted according to the number encoding rules. However, while there will be only one valid representation for 'false' (the number 0, i.e., `{}`), any nonzero number can be used as 'true'.
+
+In theory, we could restrict all of these boolean-expecting operations to accept only `{}` for 'false', and `{0x01}` for 'true'; this would be analogous to the number encoding restrictions. However, no such standardness rule exists at this time so it would be too sudden to impose any hard rule for this upgrade.
+
+Also, it is easier for scripts to avoid malleable boolean inputs without having to use up additional opcodes, as demonstrated by the following example. Among smart contracts, it is common to see a construction of a form like `OP_IF pubkey_A OP_CHECKSIGVERIFY <clause 1 conditions> OP_ELSE pubkey_B OP_CHECKSIGVERIFY <clause 2 conditions> OP_ENDIF`. Transactions spending such smart contracts will remain malleable, since the input to OP_IF comes from scriptSig. However, it is easy for script programmers to tweak such smart contracts to a non-malleable form: `pubkey_A OP_CHECKSIG OP_IF <clause 1 conditions> OP_ELSE pubkey_B OP_CHECKSIGVERIFY <clause 2 conditions> OP_ENDIF`. This takes advantage of the fact that OP_CHECKSIG simply returns false if the provided signature is not valid. Due to the already-adopted NULLFAIL rule, `{}` is the only permitted invalid signature, and cannot be malleated.
+
+# Acknowledgements
+
+Thanks to Antony Zegers and Amaury Sechet for valuable feedback.
diff --git a/doc/standards/2019-11-15-schnorrmultisig.md b/doc/standards/2019-11-15-schnorrmultisig.md
new file mode 100644
index 000000000..c974f6a28
--- /dev/null
+++ b/doc/standards/2019-11-15-schnorrmultisig.md
@@ -0,0 +1,203 @@
+---
+layout: specification
+title: 2019-NOV-15 Schnorr OP_CHECKMULTISIG specification
+date: 2019-08-11
+category: spec
+activation: 1573819200
+version: 1.0
+author: Mark B. Lundeberg
+---
+
+# Summary
+
+OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY will be upgraded to accept Schnorr signatures in a way that increases verification efficiency and is compatible with batch verification.
+
+*note: this document assumes knowledge of [the prior Schnorr signature upgrade](2019-05-15-schnorr.md).*
+
+# Motivation
+
+In [the last upgrade](2019-05-15-upgrade.md), we added Schnorr support to OP_CHECKSIG and OP_CHECKDATASIG, but not OP_CHECKMULTISIG.
+
+Although we could have added support to OP_CHECKMULTISIG as well (which would have been overall simpler), this would conflict with the desire to do batch verification in future: Currently with OP_CHECKMULTISIG validation, it is needed to check a signature against multiple public keys in order to find a possible match. In Schnorr batch verification however, it is required to know ahead of time, which signatures are supposed to match with which public keys. Without a clear path forward on how to resolve this, we postponed the issue and simply prevented Schnorr signatures from being used in OP_CHECKMULTISIG.
+
+Schnorr aggregated signatures (with OP_CHECKSIG) are one way to do multisignatures, but they have different technical properties than the familiar Bitcoin multisig, and thus are far from being a drop-in replacement for it. Besides that, it is also desirable that any existing coin can be spent using Schnorr signatures, and there are numerous OP_CHECKMULTISIG-based wallets and coins in existence that we want to be able to take advantage of Schnorr signatures.
+
+# Specification
+
+OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY will be upgraded to allow *two* execution modes, based on the value of the dummy element.
+
+Mode 1 (legacy ECDSA support, M-of-N; consumes N+M+3 items from stack):
+
+ <dummy> <sig0> ... <sigM> M <pub0> ... <pubN> N OP_CHECKMULTISIG
+
+The precise validation mechanics of this are complex and full of corner cases; the source code is the best reference. Most notably, for 2-of-3 (M=2, N=3), `sig0` may be a valid ECDSA transaction signature from `pub0` or from `pub1`; `sig1` may be from `pub1` (if `sig0` is from `pub0`) or `pub2`. Historical transactions (prior to FORKID, STRICTENC and NULLFAIL rules) had even more freedoms and [weirdness](https://decred.org/research/todd2014.pdf)). Upon activation, the `dummy` element must be null, i.e., an empty byte array.
+
+Mode 2 (new Schnorr support, M-of-N; consumes N+M+3 items from stack):
+
+ <checkbits> <sig0> ... <sigM> M <pub0> ... <pubN> N OP_CHECKMULTISIG
+
+* The `dummy` element has now been repurposed as a bitfield that we call `checkbits`, and indicates which public keys should have a signature checked against them.
+* This mode activates when `dummy` (`checkbits`) is non-null, i.e., not an empty byte array.
+* Crucially, each of the signature checks requested by `checkbits` *must* be valid, or else the script fails.
+* In mode 2, ECDSA signatures are not allowed.
+
+## Triggering and execution mechanism
+
+Whether to execute in mode 1 or mode 2 is determined by the size of the dummy / checkbits element.
+* If the checkbits element is NULL (length 0), then Mode 1 is executed
+* If the checkbits element is non-NULL (length > 0), then Mode 2 is executed.
+
+The new mode operates similar to legacy mode but only checks signatures as requested, according to the `checkbits` field. If the least significant bit of `checkbits` is set, then the bottom (first-pushed) signature should be checked against the bottom public key, and so on. For a successful verification in the new mode, `checkbits` must have exactly `M` bits set, and the signatures must be correctly ordered. On stack, `checkbits` is encoded as a byte array of length `floor((N + 7)/8)`, i.e., the shortest byte array that can hold `N` bits. It is encoded in little-endian order, i.e., the least significant bit occurs in the first byte.
+
+In pseudocode, the full OP_CHECKMULTISIG code is:
+
+ Get N (number of pubkeys) from stack ; check bounds 0 <= N <= 20.
+ Add N to nOpCount; if nOpCount exceeds 201 limit, fail script.
+ Get M (number of signatures) from stack ; check bounds 0 <= M <= N.
+ Calculate scriptCode.
+ If activated, and the dummy element is not null, then:
+ # New mode (2)
+ Set a cursor on the bottom signature (first signature pushed on stack).
+ Set another cursor on the bottom public key (first key pushed on stack).
+ Fail if the dummy element does not have length in bytes = floor((N+7)/8)
+ Set checkbits := 0, then iterate over the bytes in the dummy element in reverse order:
+ For each byte X, checkbits := (checkbits << 8) | X
+ Loop while the signature and key cursors are not depleted:
+ If the least significant bit of checkbits is 1, then:
+ Check public key encoding.
+ Check signature encoding; exclude non-Schnorr signatures.
+ Validate the current signature against the current public key; if invalid, fail script.
+ Move the signature cursor up by one position.
+ Bitshift checkbits down by one bit. (checkbits := checkbits >> 1)
+ Move the public key cursor up by one position.
+ If the final checkbits value is nonzero, fail script.
+ If the signature cursor has not been depleted, fail script.
+ Else:
+ # Legacy mode (1)
+ Set a cursor on the top signature (last signature pushed on stack).
+ Set another cursor on the top public key (last key pushed on stack).
+ If pre-BCH-fork, then run findAndDelete on scriptCode.
+ Loop while the signature cursor is not depleted:
+ Check public key encoding.
+ Check signature encoding; exclude Schnorr signatures (64+1 bytes).
+ Validate the current signature against the current public key.
+ If valid, then move signature cursor deeper by one position.
+ Move the public key cursor deeper by one position.
+ If more signatures remain than public keys, set success=False and abort loop early.
+ If loop was not aborted, set success=True.
+ [non-consensus] Check NULLDUMMY rule.
+
+ If success is False, then ensure all signatures were null. (NULLFAIL rule)
+ Clean up the used stack items.
+
+ Push success onto stack
+ If opcode is OP_CHECKMULTISIGVERIFY:
+ Pop success from stack
+ If not success:
+ FAIL
+
+## Notes
+
+The mechanics of CHECKMULTISIG are complicated due to the order of signature checking, the timing of when key/signature encodings are checked, the ability to either hard-fail (fail script & invalidate transaction) or soft-fail (return False on stack), and the interaction with previously activated consensus rules.
+Some features of the specification are worth emphasizing:
+
+- The legacy mode has unaltered functionality, except being restricted to only use a null dummy element.
+- Compatibility is good, as basically any reasonable smart contract using OP_CHECKMULTISIG can be spent using either legacy or new mode. (Of course, with effort a script could be deliberately crafted to only allow one mode.)
+- In both modes, public keys only have their encoding checked just prior to performing a signature check. The unchecked public keys may be arbitrary data.
+ - In legacy mode, the precise order of checking is critical to obtaining a correct implementation, due to the public key encoding rule. Signature and pubkey iteration always starts at the top public key and signature (the last pushed on stack).
+ - Some multisig scripts were made unspendable on Aug 1 2017, due to the last-pushed public key having incorrect encoding. These will now be spendable, but only in the new mode.
+- Note that the numbers `N`, `M` will require minimal encoding, upon activation of the minimal number encoding rule (see https://github.com/bitcoincashorg/bitcoincash.org/pull/376/files).
+- In the new mode, `checkbits` must have exactly `M` of the lower `N` bits set, and all other bits must be clear:
+ - Only the least significant N bits may be set in `checkbits`, i.e., if `checkbits` taken as an integer exceeds 2<sup>N</sup>-1 then the script will fail.
+ - If `checkbits` has more than `M` bits set, the script will fail.
+ - If `checkbits` is nonzero but has fewer than `M` bits set, then the script will fail because too few signature verifications were performed.
+- In normal circumstances the new mode cannot be third-party malleated, since the new mode design means that `checkbits` should have only one valid value for a given set of signatures
+ - Third-party malleation can still occur in some very unusual cases. For example, if some public key points are repeated in the list of keys, then signatures can be reordered and/or the `checkbits` can be adjusted. Also, if `M=0` then two possible values of the dummy element are permitted.
+ - Likewise the design stops the malleation vector of the legacy mode, since the dummy element now must be null for it to execute. The non-consensus NULLDUMMY rule will thus be made redundant, after this rule activates.
+- The legacy mode can require up to N signature checks in order to complete. In the new mode, exactly M signature checks occur for a sucessful operation.
+- A soft-failing CHECKMULTISIG (that returns False on stack) can only occur with all null signatures, due to NULLFAIL. For simplicity and avoiding malleability, the new mode does not allow a failing case, and a soft-failing CHECKMULTISIG must execute in the legacy mode (which will require a NULL dummy element). Note that even such a soft-failing checkmultisig still requires the top public key to be correctly encoded due to the legacy mechanics.
+- For M=0, the opcode returns True without checking any key encodings. This is true in both new and legacy mode.
+
+And, some clarifications:
+- As usual, checking public key encoding means permitting only 65-long byte arrays starting with 0x04, or 33-long byte arrays starting with 0x02 or 0x03.
+- As usual, checking signature encoding for either ECDSA or Schnorr involves permitting only recognized hashtype bytes; Schnorr signatures must have a given length, while ECDSA signatures must follow DER encoding and Low-S rules, and must not have the length allocated to Schnorr signatures. Null signatures (empty stack elements) are also treated as 'correctly encoded'.
+- The findAndDelete operation only applies to old transactions prior to August 2017, and does not impact current transactions, not even in legacy mode.
+
+
+# Wallet implementation guidelines
+
+(Currently, the common multisig wallet uses P2SH-multisig, i.e., a redeemScript of the form `M <pub0> ... <pubN> N OP_CHECKMULTISIG`. We'll focus on this use case and assume M > 0.)
+
+In the new Schnorr mode, *all* signatures must be Schnorr; no mixing with ECDSA is supported. Multisig wallets that wish to use the new Schnorr signatures will need to update their co-signing pools infrastructure to support a new type of signing. If some parties are unable to generate a Schnorr signature, then it will not be possible to generate a successful transaction except by restarting to make an ECDSA multisig. This creates some problems in particular when some of the parties are a hardware wallet, which may only support ECDSA for the forseeable future.
+
+We suggest the following for wallet software producers that wish to make Schnorr multisig spends while remaining backwards compatible:
+
+* Add an optional marker to the initial setup process, such as appending `?schnorr=true` to the `xpub`.
+* Add a new kind of non-backwards-compatible multisignature request that indicates schnorr signatures are needed.
+* If it is not known that all parties can accept Schnorr requests, then only generate ECDSA multisignature requests.
+* Have the ability to participate in either ECDSA or Schnorr multisignatures, as requested.
+
+It may also be helpful to include *both* an ECDSA and Schnorr signature in the partially signed transaction format, so that if one cosigner is unable to sign Schnorr, then an ECDSA fallback is possible without needing a retry. This introduces no additional malleability concerns since already any of the cosigners is able to malleate their own signature.
+
+## Calculating and pushing checkbits
+
+In order to complete a multisignature, whether in the new mode or legacy mode, wallets need to keep track of which signatures go with which public keys. In the new mode, wallets must not just correctly order the signatures, but must also correctly include the `checkbits` parameter.
+
+Once the `checkbits` parameter is determined, it needs to be encoded to bytes, and then minimally pushed in the scriptSig. While the encoding to bytes is straight forward, it is worth emphasizing that certain length-1 byte vectors must be pushed using special opcodes.
+
+* For N <= 8, a length-1 byte array is to be pushed.
+ * The byte arrays `{0x01}` through `{0x10}` must be pushed using OP_1 through OP_16, respectively.
+ * The byte array `{0x81}` must be pushed using OP_1NEGATE. This can only occur for a 2-of-8 multisig, where the checkbits bit pattern is 10000001.
+ * Other cases will be pushed using no special opcode, i.e., using `0x01 <checkbits>`.
+* For 9 <= N <= 16, a length-2 byte array is to be pushed.
+ * The push will always be `0x02 LL HH`, where `LL` is the least significant byte of `checkbits`, and `HH` is the remaining high bits.
+* For 17 <= N <= 20, a length-3 byte array is to be pushed.
+ * The push will always be `0x03 LL II HH`, where where `LL` is the least significant byte of `checkbits`, `II` is the next-least significant byte, and `HH` is the remaining high bits.
+
+## ScriptSig size
+
+Wallets need to know ahead of time the maximum transaction size, in order to set the transaction fee.
+
+Let `R` be the length of the redeemScript and its push opcode, combined.
+
+The legacy mode scriptSig `<dummy> <sig0> ... <sigM>` can be as large as 73M + 1 + R bytes, which is the upper limit assuming all max-sized ECDSA signatures.
+
+In the new mode scriptSig `<checkbits> <sig0> ... <sigN>`, each Schnorr signature will contribute a fixed size of 66 bytes (including push opcode), however the length of `checkbits` will vary somewhat. Wallets should allocate for fees based on the largest possible encoding, which gives a scriptSig size of:
+* N <= 4: `checkbits` will always be pushed using OP_1 through OP_15, so always 66M + R + 1 bytes.
+* 5 <= N <= 8: `checkbits` may sometimes be pushed using a single-byte opcode, or may need to be pushed as `0x01 0xnn` -- up to 66M + R + 2 bytes.
+* 9 <= N <= 16: `checkbits` will be pushed as `0x02 0xnnnn` -- always 66M + R + 3 bytes.
+* 17 <= N <= 20: `checkbits` will be pushed as `0x03 0xnnnnnn` -- always 66M + R + 4 bytes.
+
+## Pubkey Encoding
+
+It is strongly recommended that wallets never create scripts with invalid pubkeys, even though this specification allows them to exist in the public key list as long as they are unused. It is possible that a future rule may stipulate that all pubkeys must be strictly encoded. If that were to happen, any outputs violating this rule would become unspendable.
+
+# Rationale and commentary on design decisions
+
+## Repurposing of dummy element
+
+In an earlier edition it was proposed to require N signature items (either a signature or NULL) for new mode instead of M items and a dummy element. The following problems inspired a move away from that approach:
+
+* Triggering mechanics for the new mode were somewhat of a kluge.
+* Some scripts rely on a certain expected stack layout. This is particularly the case for recently introduced high-level smart contracting languages that compile down to script, which reach deep into the stack using OP_PICK and OP_ROLL.
+
+That said, a scan of the blockchain only found about a hundred instances of scripts that would be impacted by stack layout changes. All were based on a template as seen in [this spend](https://blockchair.com/bitcoin-cash/transaction/612bd9fc5cb40501f8704028da76c4c64c02eb0ac80e756870dba5cf32650753), where OP_DEPTH was used to choose an OP_IF execution branch.
+
+## Use of a bitfield instead of a number
+
+Another draft of this specification proposed decoding the dummy element as a number, using the standard number decoding rules. The change to using a custom bitfield representation was motivated by the fact that the bitwise operators (OP_AND, OP_OR, OP_XOR) do not cleanly operate on bitcoin's numbers, since numbers are encoded using variable lengths whereas the bitwise operators require the operands to have equal lengths.
+
+The current specification guarantees that for a successful multisig in the new mode, the dummy element always has a specific length of either 1, 2, or 3, depending only on `N`. Smart contracts can use this property to perform a multisignature and then do bit inspection on which signatures were actually checked.
+
+## No mixing ECSDA / Schnorr
+
+Allowing mixed signature types might help alleviate the issue of supporting mixed wallet versions that do support / don't support Schnorr signatures.
+However, this would mean that an all-ECDSA signature list could be easily converted to the new mode, unless extra complicated steps were taken to prevent that conversion. As this is an undesirable malleability mechanism, we opted to simply exclude ECDSA from the new mode, just as Schnorr are excluded from the legacy mode.
+
+# Implementation
+
+https://reviews.bitcoinabc.org/D3474
+
+# Acknowledgements
+
+Thanks to Tendo Pein, Rosco Kalis, Amaury Sechet, and Antony Zegers for valuable feedback.
diff --git a/doc/standards/2019-11-15-upgrade.md b/doc/standards/2019-11-15-upgrade.md
new file mode 100644
index 000000000..334982cf5
--- /dev/null
+++ b/doc/standards/2019-11-15-upgrade.md
@@ -0,0 +1,66 @@
+---
+layout: specification
+title: 2019-NOV-15 Network Upgrade Specification
+date: 2019-10-23
+category: spec
+activation: 1573819200
+version: 0.4
+---
+
+## Summary
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1573819200,
+Bitcoin Cash will execute an upgrade of the network consensus rules according to this specification.
+Starting from the next block these consensus rules changes will take effect:
+
+* Enable Schnorr signatures for OP_CHECKMULTISIG(VERIFY).
+* Enforce minimal push and minimal number encoding rules in Script.
+
+The following are not consensus changes, but are recommended changes for Bitcoin Cash implementations:
+
+* Automatic replay protection for future upgrade
+
+## Schnorr Signatures for OP_CHECKMULTISIG(VERIFY)
+
+Use of Schnorr signatures is enabled in OP_CHECKMULTISIG(VERIFY). The dummy element is repurposed to flag
+Schnorr mode when it is non-null, and the order of signatures in Schnorr mode is constrained according to
+the bitfield encoded in the repurposed dummy element.
+
+Details can be found in the [full specification: 2019-11-15-schnorrmultisig.md](2019-11-15-schnorrmultisig.md).
+
+NOTE: The repurposing of the dummy element as a flag and bitfield supersedes the need for NULLDUMMY.
+
+## Enforce MINIMALDATA in Script.
+
+Enforce existing standardness checks that all executed data pushes use minimal push operators, and all numbers are encoded minimally,
+together known as the "MINIMALDATA" rule. This goes into effect at the consensus layer.
+
+Details can be found in the [full specification: 2019-11-15-minimaldata.md](2019-11-15-minimaldata.md).
+
+## Automatic Replay Protection
+
+The purpose of Automatic Replay Protection is to serve as a full node version-deprecation mechanism. It is intended to cause
+full validating nodes which do not upgrade, to automatically separate themselves from the main network after the next
+upgrade on 15 May 2020. Nodes which implement the next upgrade will remove this automatic replay protection, and thus all regular
+wallets can continue using the default ForkID with no change to follow the main upgraded chain.
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is less than UNIX timestamp 1589544000 (May 2020 upgrade)
+Bitcoin Cash full nodes MUST enforce the following rule:
+
+ * `forkid` [2] to be equal to 0.
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1589544000
+(May 2020 upgrade) Bitcoin Cash full nodes implementing the Nov 2019 consensus rules SHOULD enforce the following change:
+
+ * Update `forkid` [2] to be equal to `0xFFXXXX`, where `XXXX` is some arbitrary hex value.
+ ForkIDs beginning with 0xFF will be reserved for future protocol upgrades.
+
+This particular consensus rule MUST NOT be implemented by Bitcoin Cash wallet software. Wallets that follow the upgrade
+should not have to change anything.
+
+## References
+
+[1] Median Time Past is described in [bitcoin.it wiki](https://en.bitcoin.it/wiki/Block_timestamp).
+It is guaranteed by consensus rules to be monotonically increasing.
+
+[2] The `forkId` is defined as per the [replay protected sighash](replay-protected-sighash.md) specification.
diff --git a/doc/standards/2020-05-15-op_reversebytes.md b/doc/standards/2020-05-15-op_reversebytes.md
new file mode 100644
index 000000000..155260659
--- /dev/null
+++ b/doc/standards/2020-05-15-op_reversebytes.md
@@ -0,0 +1,165 @@
+---
+layout: specification
+title: OP_REVERSEBYTES Specification
+category: spec
+date: 2019-05-29
+activation: 1589544000
+version: 0.2
+author: Tobias Ruck
+---
+
+OP_REVERSEBYTES
+==========
+
+OP_REVERSEBYTES reverses the bytes of the top stackitem.
+
+Rationale
+---------
+
+Bitcoin's protocol almost exclusively uses little-endian encoding [8], and Script provides various tools for using integers encoded in little endian, such as `OP_NUM2BIN` and `OP_BIN2NUM` [11]. Using covenants [2], sophisticated smart contracts can be created, and Script already has a great arsenal of arithmetic operators (opcodes 139 to 165) to enforce e.g. how input and output amounts of transactions have to be related.
+
+However, many protocols do not use little endian encoding, and it is by no means clear that one is superior to the other. Both AMQP [12] and Apache Thrift [13], for instance, use big-endian encoding. The Simple Ledger Protocol (SLP) uses big-endian encoding as well [1]. Bitdb, when using the `hN` elements, returns stack items in a format that can be directly interpreted as base16 big-endian encoded numbers, and to use this feature, it has to be possible to encode values as big-endian.
+
+Further, now that oracles using OP_CHECKDATASIG are possible, likely used to retrieve numeric data, it would be unnecessarily limiting to assume all oracles will use little-endian encoding.
+
+Among the mentioned protocols, SLP tokens are likely the most important ones. Various new use cases combining the power of covenants and looping transactions [5] emerge, among them:
+
+* Decentralized exchanges (such as SLP Agora or SLPDEX) [3] [6] [4]
+* Donation mintable tokens
+* DAOs, which charge a fee for services and distribute revenue proportional to shares [7]
+* Native tokens (not yet possible)
+
+Note that values can be converted to big-endian encoding if the size of the encoding is both fixed and not too large. Currently, Script only supports 32-bit integers, and they can be encoded in big-endian using OP_SPLIT, OP_SWAP and OP_CAT:
+
+```
+// initial: // <value>
+// convert to little-endian
+PUSH 4 // <value> 4
+OP_NUM2BIN // <value 4-byte little endian>
+
+// split into individual bytes
+PUSH 1 // <value 4-byte little endian> 1
+OP_SPLIT // <value 1st byte> <value 2nd-4th byte>
+PUSH 1 // <value 1st byte> <value 2nd-4th byte> 1
+OP_SPLIT // <value 1st byte> <value 2nd byte> <value 3rd-4th byte>
+PUSH 1 // <value 1st byte> <value 2nd byte> <value 3rd-4th byte> 1
+OP_SPLIT // <value 1st byte> <value 2nd byte> <value 3rd byte> <value 4th byte>
+
+// reverse individual bytes and concat
+// results in 4-byte big endian
+OP_SWAP // <value 1st byte> <value 2nd byte> <value 4th byte> <value 3rd byte>
+OP_CAT // <value 1st byte> <value 2nd byte> <value 4th, 3rd byte>
+OP_SWAP // <value 1st byte> <value 4th, 3rd byte> <value 2nd byte>
+OP_CAT // <value 1st byte> <value 4th, 3rd, 2nd byte>
+OP_SWAP // <value 4th, 3rd, 2nd byte> <value 1st byte>
+OP_CAT // <value 4-byte big endian>
+```
+
+However, if with OP_REVERSEBYTES, this becomes trivial:
+
+```
+// convert to bytes
+PUSH 4 // <SLP value> 4
+OP_NUM2BIN // <SLP value 4-byte little endian>
+OP_REVERSEBYTES // <SLP value 4-byte big endian>
+```
+
+That's 11 bytes (9 operations and 3 pushdata) saved.
+
+There are multiple reasons why the second version would be preferable:
+
+* Covenants and looping scripts usually take the script code of the preimage [9] as input, which means every operation counts twice: Once for the stack item containing the script code, and once for the P2SH script stack item [10]. For a conversion to 8-byte big-endian, this would save 32 bytes per conversion, and if there's, say, three of those conversions in a script, it would already amount to 96 bytes - a non-trivial number of bytes for a transaction.
+* The cognitive load of developing scripts using the larger snippet above is increased unnecessarily. Developing scripts, by hand or by using tools such as macros or Spedn, already puts a lot of cognitive load on developers, and errors can be devastating to the community. A prominent example of such a failure is the contentious hard-fork on the Ethereum blockchain that was caused by a bug in The DAO smart contract.
+* The first version assumes that Script uses 32-bit numbers, however, once integers with larger width are implemented, the script gets linearly longer (4 bytes/byte) with each additional byte. For 256-bit numbers, it would require a whopping 124 bytes (93 operations and 31 pushdata) to convert to big-endian. As the opcode limit currently is 201, that wouldn't leave much room for other operations. In contrast, `<N> OP_NUM2BIN OP_REVERSEBYTES` always encodes integers as N-byte big-endian number, with a constant script size independent of N.
+
+Also, suppose an oracle returns an ordered list of 1-byte items (e.g. indices), however, if the script requires the bytes to be in the reversed order, then OP_REVERSEBYTES would allow to do this trivially.
+
+### A Note On Signs
+
+For unsigned integers, the behavior is always the expected one: the number will be encoded as unsigned big-endian integer. However, as integers in Script are encoded rather curiously, signed integers might result in unexpected behavior:
+
+`-1 4 OP_NUM2BIN OP_REVERSEBYTES -> {0x80, 0x00, 0x00, 0x01}`
+
+Here, the sign bit is the first bit of the resulting stackitem. Usually, negative numbers are encoded in two's complement, and the number should be `{0xff, 0xff, 0xff, 0xff}`. However, as long as developers are aware of this quite Script specific encoding, there's no issue at hand.
+
+OP_REVERSEBYTES Specification
+-----------------------------
+
+This specification uses the same syntax for the stack/stackitems as [11].
+
+### Semantics
+
+`a OP_REVERSEBYTES -> b`.
+
+OP_REVERSEBYTES fails immediately if the stack is empty.
+
+Otherwise, the top stack item is removed from the stack, and a byte-reversed version is pushed onto the stack.
+
+Examples:
+
+* `{} OP_REVERSEBYTES -> {}`
+* `{0x01} OP_REVERSEBYTES -> {0x01}`
+* `{0x01, 0x02, 0x03, 0x04} OP_REVERSEBYTES -> {0x04, 0x03, 0x02, 0x01}`
+
+### Opcode Number
+
+OP_REVERSEBYTES proposes to use the previously unused opcode with number 188 (0xbc in hex encoding), which comes after the most recently added opcode, `OP_CHECKDATASIGVERIFY`.
+
+### Name
+
+The naming of this opcode turned out to become a bit of a bikeshed. In a previous proposal, this opcode has been named `OP_REVERSE`. After that, it has been renamed to `OP_BSWAP`, as that is a more technically accurate term, which is commonly used for reversing the byteorder of integers [14] [15]. However, after some more consideration, it has been renamed to `OP_ENDIAN_REVERSE` following Boost‘s nomenclature [16], then to `OP_REVERSEENDIAN` and finally to `OP_REVERSEBYTES`, which are both more consistent with Script‘s opcode naming system. However, as, “endian” is usually used for numbers which are a power of two—which isn‘t the case for this opcode—`OP_REVERSEBYTES` is the prefered choice here.
+
+`OP_REVERSEBYTES` is preferable to `OP_BSWAP` because `OP_BSWAP` is lexically very similar to the already existing `OP_SWAP` and would make Script harder to read. Also, while the technical term for the instruction is indeed `bswap`, it isn‘t well known for developers of higher level languages and could thus spark confusion that would be avoided by using the name `OP_REVERSEBYTES`, which is more self-descriptive.
+
+### Activation
+
+The opcode will be activated during the 15th May 2020 hardfork.
+
+### Unit Tests
+
+The following unit tests are used by the ABC implementation of the opcode as of Feb 17th 2020.
+- `<item> OP_REVERSEBYTES` fails if 15th May 2020 protocol upgrade is not yet activated.
+- `OP_REVERSEBYTES` fails if the stack is empty.
+- `{} OP_REVERSEBYTES -> {}`
+- `{99} OP_REVERSEBYTES -> {99}`
+- `{0xde, 0xad} OP_REVERSEBYTES -> {0xad, 0xde}`
+- `{0xde, 0xad, 0xa1} OP_REVERSEBYTES -> {0xa1, 0xad, 0xde}`
+- `{0xde, 0xad, 0xbe, 0xef} OP_REVERSEBYTES -> {0xef, 0xbe, 0xad, 0xde}`
+- `{0x12, 0x34, 0x56} OP_REVERSEBYTES -> {0x56, 0x34, 0x12}`
+- for all n ∈ [0; 520]: `{i mod 256 | i < n} OP_REVERSEBYTES -> {(n - i - 1) mod 256 | i < n}`
+- for all n ∈ [0; 520]: `{(if (i < (n + 1) / 2) then (i) else (n - i - 1)) % 256) | i < n} OP_DUP OP_REVERSEBYTES OP_EQUAL -> OP_TRUE`
+
+References
+----------
+
+[1] SLP Token specification: https://github.com/simpleledger/slp-specifications/blob/master/slp-token-type-1.md
+
+[2] Spending constraints with OP_CHECKDATASIG: https://honest.cash/pein_sama/spending-constraints-with-op_checkdatasig-172
+
+[3] SLP Agora: https://github.com/EyeOfPython/slpagora
+
+[4] Sample SLPDEX transaction: https://blockchair.com/bitcoin-cash/transaction/2e69f47a985673c5a645e20ad09025a0892321f096224679657f98e6152c845c
+
+[5] Let's play chess on the BCH Blockchain: https://tobiasruck.com/content/lets-play-chess-on-bch/
+
+[6] SLPDEX (discontinued): slpdex.cash
+
+[7] DAO: https://en.wikipedia.org/wiki/Decentralized_autonomous_organization
+
+[8] Bitcoin protocol documentation, common structures: https://en.bitcoin.it/wiki/Protocol_documentation#Common_structures
+
+[9] BIP143: https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
+
+[10] BIP16: https://github.com/bitcoin/bips/blob/master/bip-0016.mediawiki
+
+[11] May 2018, reenabled opcodes: https://github.com/EyeOfPython/bitcoincash.org/blob/master/spec/may-2018-reenabled-opcodes.md
+
+[12] AMQP specification, page 14: http://www.amqp.org/sites/amqp.org/files/amqp.pdf
+
+[13] Apache Thrift binary protocol: https://github.com/apache/thrift/blob/master/doc/specs/thrift-binary-protocol.md
+
+[14] https://docs.rs/bswap/1.0.0/bswap/
+
+[15] https://www.npmjs.com/package/bswap
+
+[16] https://www.boost.org/doc/libs/1_63_0/libs/endian/doc/conversion.html#endian_reverse
diff --git a/doc/standards/2020-05-15-sigchecks-plotblocks.png b/doc/standards/2020-05-15-sigchecks-plotblocks.png
new file mode 100644
index 000000000..96aff1ccb
Binary files /dev/null and b/doc/standards/2020-05-15-sigchecks-plotblocks.png differ
diff --git a/doc/standards/2020-05-15-sigchecks-plotinputs.png b/doc/standards/2020-05-15-sigchecks-plotinputs.png
new file mode 100644
index 000000000..0417e1e00
Binary files /dev/null and b/doc/standards/2020-05-15-sigchecks-plotinputs.png differ
diff --git a/doc/standards/2020-05-15-sigchecks.md b/doc/standards/2020-05-15-sigchecks.md
new file mode 100644
index 000000000..bcaf73940
--- /dev/null
+++ b/doc/standards/2020-05-15-sigchecks.md
@@ -0,0 +1,167 @@
+---
+layout: specification
+title: 2020-MAY-15 script SigChecks counting and limiting specification
+date: 2020-03-05
+category: spec
+activation: 1589544000
+version: 0.2 (DRAFT)
+author: Mark B. Lundeberg
+---
+
+# Summary
+
+Bitcoin Cash's SigOps counting and limiting system will be replaced with a new system, referred to as SigChecks.
+
+# Motivation
+
+Since early days, Bitcoin has had a SigOps counting rule for limiting the amount of CPU usage possible in a given transaction or block, based on the principle that signature verifications are by far the most CPU-intense operations.
+
+Although partly effective, there are well known issues with sigops, which mainly stem from the fact that SigOps are judged by parsing scripts, rather than executing them.
+Bitcoin splits scripts into two transactions (the scriptPubKey of the transaction that creates a coin, and the scriptSig of the transaction that spends it), yet the actual CPU work of verifying a transaction solely happens in the spending transaction, and this leads to some paradoxical situations: a transaction/block that contains high sigops might involve very little CPU work, and conversely a transaction with low sigops may require very high CPU work.
+
+The essential idea of SigChecks is to perform counting solely in the spending transaction, and count actual executed signature check operations.
+
+# Specification
+
+## Counting rule
+
+The SigChecks count for a given script is discovered during execution of the script.
+
+- Executing OP_CHECKSIG / OP_CHECKSIGVERIFY / OP_CHECKDATASIG / OP_CHECKDATASIGVERIFY increments SigChecks by:
+ - +0, if signature is NULL.
+ - +1, if signature is non-NULL.
+- Executing an M-of-N OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY increments SigChecks by:
+ - +0, if all M signatures are NULL.
+ - +M, if at least one signature is non-NULL and the verification is in [New/Schnorr mode](2019-11-15-schnorrmultisig.md) (dummy element is non-NULL).
+ - +N, if at least one signature is non-NULL and the verification is in Old/ECDSA mode (dummy element is NULL).
+
+Here NULL means a script stack element that has length 0; passing NULL in place of an expected signature is the canonical way of cancelling the signature check, i.e., making the signature checking opcode fail / return false (and the only way permitted way to cause this result ever since NULLFAIL rule activated).
+
+## Per-block limitation (consensus rule)
+
+After activation, any block where the total number of SigChecks accumulated (during all script executions - scriptSig, scriptPubKey, and P2SH redeemScript - in all inputs excepting coinbase) violates the following limit based on the maximum block size shall be rejected:
+
+ block_SigChecks <= max_Blocksize // 141
+
+Here, max_Blocksize refers to the consensus limit that is enforced on the full serialized block size (including block header, transaction counter, and all serialized transaction).
+
+Currently, `max_BlockSize` = 32000000 so the maximum allowed `block_SigChecks` would be 226950 for all blocks. However, in future block size increases, it should be assumed that the SigChecks limit increases proportionally.
+
+## Per-transaction limits (consensus rule)
+
+After activation, any transaction where the total number of SigChecks accumulated (during all script executions - scriptSig, scriptPubKey, and P2SH redeemScript - in all inputs excepting coinbase) violates the following limit shall be rejected:
+
+ transaction_SigChecks <= 3000
+
+## Per-input limitation (standardness rule)
+
+For a given transaction input, the number of SigChecks accumulated during all script execution (scriptSig, scriptPubKey, and P2SH redeemScript) is to be limited according to the byte-length of the scriptSig, `len_scriptSig`:
+
+ txin_SigChecks <= (len_scriptSig + 60) // 43, where // indicates floor division.
+ - or equivalently -
+ len_scriptSig >= 43 * txin_SigChecks - 60
+
+Any transaction that contains an input violating this limit should be rejected from nodes' mempools and not relayed. However, blocks may contain inputs that violate this limit.
+
+This is only a non-consensus standardness (transaction relay) rule, meaning that transactions within blocks do not need to obey this rule. Nodes should only enforce this rule starting at the activation time, and if any transactions in mempool violate this rule at precisely the time of activation, they should be ejected.
+
+## Removal of SigOps
+
+After the activation, nodes shall disable the all consensus rules and all standardness rules relating to the old SigOps counting mechanism. There are four such rules:
+
+- The consensus limit of 20000 sigops per MB of block shall be disabled.
+- The consensus limit of 20000 sigops per transaction shall be disabled.
+- The standardness limit of 4000 sigops per transaction shall be disabled.
+- The standardness limit of 15 sigops per P2SH input shall be disabled.
+
+## Notes
+
+- The question of whether all signatures are null is not precisely the inverse of whether the opcode returns true/false to stack: consider the case of 0-of-N OP_CHECKMULTISIG, which always returns true, yet also has "all null" signatures. Also, historically pre-NULLFAIL opcodes would return false for non-null invalid signatures, instead of failing.
+
+# Rationale and commentary on design decisions
+
+## Counting rule
+
+The proposed counting rule is easy to implement, but it's not the simplest / most obvious approach. There is one primary design feature we wanted to ensure: *The proposed counting rule means that the sigchecks count can be evaluated by executing a script with a 'dummy'/deferred signature verifier, i.e., without performing any CPU-intensive elliptic curve math.*
+
+We currently have the NULLFAIL rule, which means that signature check opcodes will either:
+- fail with error, because the non-null signatures were not valid, or there was a mixture of null / non-null signatures, or because the checked public keys were incorrectly encoded, or some other reason.
+- return true, if all signatures are non-null and valid, or,
+- return false, if all signatures are null and there is at least 1 signature.
+
+Right now, nodes can optionally use this fact to defer public key and signature checks until after script execution, simply placing true/false on stack depending on whether the signatures are null or not, and continuing execution as if the checks were done.
+Later, after the script has finished executing successfully, the deferred checks can be finally executed to determine whether the script should in fact be failed entirely.
+This deferment allows some efficiency advantages (like allowing Schnorr batch validation, fast rejection of some invalid blocks/transactions, etc.).
+
+The simplest imaginable rule would be to only count signature check function calls that are actually done.
+The main problem with this approach is that M-of-N ECDSA multisig verifications perform a variable number of signature checks, at least M but as many as N.
+Some of these checks fail, some succeed. The count would then be only determinable by actually performing full signature checks.
+With the deferment mentioned above, this would mean that any limits on sigchecks could not be accurately enforced before actually carrying out the signature checks.
+
+A secondary aspect of counting is that when all signatures are null, we assign a sigchecks count of 0.
+This is a rare case since most scripts want only valid signatures anyway.
+However, it does increase accuracy of the count, and it can be useful in smart contracting to use null signatures instead of booleans to control branching flows (booleans pushed from scriptSig can be malleated).
+Since it is easy to implement the 0 sigchecks counting and it's more accurate that way, we decided to include this.
+
+## Why have limits?
+
+The SigOps and SigChecks limits exist solely to limit the impact of denial of service attacks. There are a variety of attacks that might occur, but these are the main ones:
+- An attacking miner can craft valid/invalid blocks packed full with valid and CPU-intensive non-standard scripts that would require huge amounts of time (perhaps hours) to validate.
+- Anyone may flood the mempool with valid but CPU-intensive transactions. Since these are valid, they will be propagated to all nodes 'for free' and load down the network.
+
+While these might sound bad, it's worth noting that the disruption would be temporary.
+The mempool and block attack vectors are essentially decoupled since efficient nodes use transaction validity caching: if they have accepted a transaction already, they don't need to re-verify it when they see it mined in a block.
+Also, CPU-intensive blocks do not cause any kind of "permanent damage" to new nodes coming online, since again efficient nodes typically provide for an 'assume-valid' setting that only requires fully verifying recent blocks.
+
+*Blocks*:
+Slow blocks can be made without any setup, but the slowest possible block would require a many setup blocks to be mined beforehand that generate attack outputs.
+These attack outputs would then all be spent in the attack block.
+Since scripts are limited to 201 opcodes and inputs are at least 41 bytes, this could achieve about 5 signature checks for every byte in the spending block, or 160 million signature checks with today's maximum block size.
+As a rough rule of thumb, each signature check takes 50 microseconds, so such a block would take a couple of CPU-hours to validate (though this is trivially parallelized).
+The proposed limit of 141 bytes / sigcheck cuts the worst case down by a factor of 700.
+The main motivation here isn't just to ensure nondisruption with current block sizes, but also to make sure future block size increases can be made with needing to worry so much about slow block attacks.
+
+*Mempool*:
+As far as mempool attacks go, these currently are already greatly limited by standardness rules on mainnet that 1) whitelist only certain allowed output script templates and 2) limit P2SH to 15 sigops.
+If either rule were simply removed, it would permit abusive scripts that perform a large number of verifications in a tight space.
+Since we are planning to remove sigops, then something needs to go in place of that P2SH sigops rule.
+Besides limiting the density of CPU usage, it also makes sense to limit signature checks density in transactions as a *support* for the block limit: we don't want that the mempool can be totally filled with high-sigchecks transactions that take ages to clear out (since each block can only consume so many of them).
+
+It's worth pointing out some of the indirect limits that are created as a result:
+- As mentioned above it is impossible for the number of SigChecks in an input to exceed 201, which is the current limit on the 'opcode count' for a single script.
+ - However, a mainnet standard transaction cannot have a scriptSig longer than 1650 bytes, which means an input in a standard transaction won't be able to have more than 39 SigChecks.
+- The per-input rule means that the overall density of SigChecks in a standard transaction cannot exceed 33.5 bytes / SigCheck. This occurs with many inputs having each two SigChecks in a scriptSig of length 26, i.e., an input of size 26+41 = 67 bytes.
+ - Due to additional script template standardness rules on mainnet, it is practically not possible to produce such a short scriptSig containing two sigchecks. So, practically one can only achieve 36.67 bytes/SigCheck (three SigChecks in a scriptSig of length 69), using 1-of-3 bare multisignatures or some P2SH tricks.
+- Likewise standard transactions on mainnet are limited to 100000 bytes, so a standard transaction won't be able to have more than 3000 sigchecks.
+
+## Choice of numbers
+
+The numbers proposed for the per-input and per-block limits are based on an examination of current typical uses, and an examination of the historical blockchain.
+
+The per-input limit is designed to support the two most extreme standard use cases, which deserve continuing support and (though rare) are still used occasionally:
+
+* Spending a bare 1-of-3 multisignature in ECDSA mode will have 3 SigChecks in around 73 bytes. (Bare multisigs like 1-of-4 and beyond are nonstandard to fund.)
+* Spending a P2SH 1-of-15 multisignature in ECDSA mode will have 15 SigChecks in around 589 bytes.
+
+The proposed per-input rule is a line interpolating between those two cases, with a spare allowance of 4 bytes for each (since ECDSA signatures are variable size, and very rarely are they shorter than this in normal usage).
+
+Typical use cases are much much lower density than these. P2PK and P2PKH have 1 SigCheck in ~70 bytes and ~105 bytes respectively, and most P2SH multisignatures are 2-of-3 spent with ECDSA which have 3 SigChecks in a ~250 byte scriptSig. I've plotted the common standard use cases below. As can be seen
+
+![Input sigchecks plotted for various standard scripts](2020-05-15-sigchecks-plotinputs.png)
+
+The block limit is based on an examination of normal usage patterns and observations on historical blocks. Historically, the bulk (75%) of blocks have had a density of between 150 and 250 bytes/SigCheck, and the average density of the whole chain is 176 bytes/SigCheck. Only 2% of blocks have been more dense than 141 bytes/SigCheck. This matches the fact that the vast majority of inputs/outputs are P2PKH, which on the whole (considering funding and spending) have a density of around 182 bytes/SigCheck. Rarely, one sees a block that is packed full of an unusually high fraction of P2SH 2-of-3 multisignature consolidations, which pushes down to the 100 bytes/SigCheck level. Blocks more dense than 98 bytes/SigCheck have been extremely rare, making up 0.01% of blocks.
+
+The exact number of 141 bytes/SigCheck comes from considering a fairly common use case, which is consolidating many P2PKH inputs. If done with Schnorr signatures then each input is 141 bytes and one SigCheck.
+
+The choice of 141 bytes/SigCheck for a block is ~4x times more aggressive than the ~36.67 bytes/SigCheck standardness rule. It's worth emphasizing however that this block limit is based on the maximum block size. Thus, it may happen that a normally mined block has an actual density of ~36.67 bytes/SigCheck, however, such a block could not be more than ~1/4th of the maximum block byte size.
+
+A histogram of historical block densities is plotted below:
+![Block sigchecks density historically (up to mid-2019)](2020-05-15-sigchecks-plotblocks.png)
+
+# Implementation
+
+**Implementation information to be added - TBD**
+
+# Acknowledgements
+
+Thanks to Amaury Sechet, Josh Green, Tobias Ruck, Tyler Smith, Calin Culianu, and Andrew Stone for valuable feedback.
diff --git a/doc/standards/2020-05-15-upgrade.md b/doc/standards/2020-05-15-upgrade.md
new file mode 100644
index 000000000..d292bd567
--- /dev/null
+++ b/doc/standards/2020-05-15-upgrade.md
@@ -0,0 +1,72 @@
+---
+layout: specification
+title: 2020-MAY-15 Network Upgrade Specification
+date: 2020-04-26
+category: spec
+activation: 1589544000
+version: 0.4
+---
+
+## Summary
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1589544000 (May 15th, 2020, 12:00PM UTC),
+Bitcoin Cash will execute an upgrade of the network consensus rules according to this specification.
+Starting from the next block these consensus rules changes will take effect:
+
+* Bitcoin Cash's SigOps counting and limiting system is replaced with a new system, referred to as SigChecks.
+* A new opcode called OP_REVERSEBYTES has been added to the script system.
+* Enforcement of the Infrastructure Funding Plan, subject to activation by [BIP 9](https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki) miner signalling.
+
+The following are not consensus changes, but are recommended policy changes for Bitcoin Cash implementations:
+
+* The default for max number of in-mempool ancestors is changed from 25 to 50.
+* The default for max number of in-mempool descendants is changed from 25 to 50.
+* Automatic replay protection for future upgrade.
+
+## SigChecks
+
+Enforcement of sigops limits is removed, and replaced with new limits based on the number of signature checks that are actually executed when running a script. This new system is called SigChecks.
+
+Details can be found in the [full specification: SigChecks](https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/2020-05-15-sigchecks.md).
+
+## OP_REVERSEBYTES
+
+This new opcode reverses the order of bytes in a string. It can be used to change endianness.
+
+Details can be found in the [full specification: OP_REVERSEBYTES](https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/2020-05-15-op_reversebytes.md).
+
+## Infrastructure Funding Plan
+
+The purpose of the Infrastructure Funding Plan (IFP) is to provide funding to development projects working on common Bitcoin Cash infrastructure.
+If activated, it enforces that 5% of the block reward is spent to one of a set of specified addresses.
+Activation is triggered via [BIP 9](https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki) version bits signalling prior to the May 15 upgrade.
+
+More detailed can be found in the [full specification](https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/2020-05-15-ifp.md).
+
+## Automatic Replay Protection
+
+The purpose of Automatic Replay Protection is to serve as a full node version-deprecation mechanism. It is intended to cause
+full validating nodes which do not upgrade, to automatically separate themselves from the main network after the next
+upgrade on 15 May 2020. Nodes which implement the next upgrade will remove this automatic replay protection, and thus all regular
+wallets can continue using the default ForkID with no change to follow the main upgraded chain.
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is less than UNIX timestamp 1605441600 (Nov 2020 upgrade)
+Bitcoin Cash full nodes MUST enforce the following rule:
+
+ * `forkid` [2] to be equal to 0.
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1605441600
+(Nov 2020 upgrade) Bitcoin Cash full nodes implementing the May 2020 consensus rules SHOULD enforce the following change:
+
+ * Update `forkid` [2] to be equal to `0xFFXXXX`, where `XXXX` is some arbitrary hex value.
+ ForkIDs beginning with 0xFF will be reserved for future protocol upgrades.
+
+This particular consensus rule MUST NOT be implemented by Bitcoin Cash wallet software. Wallets that follow the upgrade
+should not have to change anything.
+
+## References
+
+[1] Median Time Past is described in [bitcoin.it wiki](https://en.bitcoin.it/wiki/Block_timestamp).
+It is guaranteed by consensus rules to be monotonically increasing.
+
+[2] The `forkId` is defined as per the [replay protected sighash](replay-protected-sighash.md) specification.
diff --git a/doc/standards/2020-11-15-asert.md b/doc/standards/2020-11-15-asert.md
new file mode 100644
index 000000000..090232fb1
--- /dev/null
+++ b/doc/standards/2020-11-15-asert.md
@@ -0,0 +1,424 @@
+---
+layout: specification
+title: ASERT Difficulty Adjustment Algorithm (aserti3-2d)
+date: 2020-08-17
+category: spec
+activation: 1605441600
+version: 0.6.3
+author: freetrader, Jonathan Toomim, Calin Culianu, Mark Lundeberg, Tobias Ruck
+---
+
+## Summary
+
+Activation of a new new difficulty adjustment algorithm 'aserti3-2d'
+(or 'ASERT' for short) for the November 2020 Bitcoin Cash upgrade. Activation will be
+based on MTP, with the last pre-fork block used as the anchor block.
+
+## Motivation
+
+- To eliminate periodic oscillations in difficulty and hashrate
+- To reduce the difference in profitability between steady miners and
+ those who switch to mining other blockchains.
+- To maintain average block intervals close to the 10 minute target.
+- To bring the average transaction confirmation time close to target time.
+
+## Technical background
+
+The November 2017 Bitcoin Cash upgrade introduced a simple moving average as
+difficulty adjustment algorithm. This change unfortunately introduced daily
+periodic difficulty oscillations, which resulted in long confirmation times
+followed by a burst of rapid blocks. This harms the user experience of Bitcoin
+Cash, and punishes steady hashrate miners.
+
+Research into the family of difficulty algorithms based on an exponential
+moving average (EMA) resulted in ASERT (Absolutely Scheduled Exponentially
+Rising Targets) [1], which has been developed by Mark Lundeberg in 2019 and
+fully described by him in 2020. An equivalent formula was independently
+discovered in 2018 by Jacob Eliosoff and in 2020 by Werner et. al [6].
+
+ASERT does not have the same oscillations as the DAA introduced in the November
+2017 upgrade and has a range of other attractive qualities such as robustness
+against singularities [15] without a need for additional rules, and absence of
+accumulation of rounding/approximation errors.
+
+In extensive simulation against a range of other stable algorithms [2],
+an ASERT algorithm performed best across criteria that included:
+
+- Average block times closest to an ideal target time of 600 seconds.
+- Average transaction confirmation times closest to the target time.
+- Reducing the advantage of non-steady mining strategies, thereby maximizing
+ the relative profitability of steady mining.
+
+## Specification
+
+### Terms and conventions
+
+* Fork block: The first block mined according to the new consensus rules.
+* Anchor block: The parent of the fork block.
+
+
+### Requirements
+
+#### Target computation
+The current block's target bits are calculated by the following algorithm.
+
+The aserti3-2d algorithm can be described by the following formula:
+
+```
+next_target = anchor_target * 2**((time_delta - ideal_block_time * (height_delta + 1)) / halflife)
+```
+
+where:
+
+- `anchor_target` is the unsigned 256 bit integer equivalent of the `nBits` value in
+ the header of the anchor block.
+- `time_delta` is the difference, in signed integer seconds, between the
+ timestamp in the header of the current block and the timestamp in the
+ parent of the anchor block.
+- `ideal_block_time` is a constant: 600 seconds, the targeted
+ average time between blocks.
+- `height_delta` is the difference in block height between the current
+ block and the anchor block.
+- `halflife` is a constant parameter sometimes referred to as
+ 'tau', with a value of 172800 (seconds) on mainnet.
+- `next_target` is the integer value of the target computed for the block
+ after the current block.
+
+The algorithm below implements the above formula using fixed-point integer
+arithmetic and a cubic polynomial approximation to the 2^x term.
+
+The 'target' values used as input and output are the compact representations
+of actual 256-bit integer targets as specified for the 'nBits' field in the
+block header.
+
+
+Python-code, uses Python 3 syntax:
+
+```python
+def next_target_aserti3_2d(
+ anchor_height: int, # height of the anchor block.
+ anchor_parent_time: int, # timestamp (nTime) of the parent of the anchor block.
+ anchor_bits: int, # 'nBits' value of the anchor block.
+ current_height: int, # height of the current block.
+ current_time: int, # timestamp of the current block.
+) -> int: # 'target' nBits of the current block.
+ ideal_block_time = 600 # in seconds
+ halflife = 172_800 # 2 days (in seconds)
+ radix = 2**16 # 16 bits for decimal part of fixed-point integer arithmetic
+ max_bits = 0x1d00_ffff # maximum target in nBits representation
+ max_target = bits_to_target(max_bits) # maximum target as integer
+
+ anchor_target = bits_to_target(anchor_bits)
+ time_delta = current_time - anchor_parent_time
+ height_delta = current_height - anchor_height # can be negative
+ # `//` is truncating division (int.__floordiv__) - see note 3 below
+ exponent = time_delta - ideal_block_time * (height_delta + 1) // halflife
+
+ # Compute equivalent of `num_shifts = math.floor(exponent / 2**16)`
+ num_shifts = exponent >> 16
+
+ exponent = exponent - num_shifts * radix
+ factor = ((195_766_423_245_049 * exponent +
+ 971_821_376 * exponent**2 +
+ 5_127 * exponent**3 +
+ 2**47) >> 48) + radix
+ next_target = anchor_target * factor
+
+ # Calculate `next_target = math.floor(next_target * 2**factor)`
+ if num_shifts < 0:
+ next_target >>= -num_shifts
+ else:
+ # Implementations should be careful of overflow here (see note 6 below).
+ next_target <<= num_shifts
+
+ next_target >>= 16
+ if next_target == 0:
+ return target_to_bits(1) # hardest valid target
+
+ if next_target > max_target:
+ return max_bits # limit on easiest target
+ return target_to_bits(next_target)
+```
+
+Note 1: The reference implementations make use of signed integer arithmetic.
+ Alternative implementations may use strictly unsigned integer
+ arithmetic.
+
+Note 2: All implementations should strictly avoid use of floating point
+ arithmetic in the computation of the exponent.
+
+Note 3: In the calculation of the exponent, truncating integer division [7, 10]
+ must be used, as indicated by the `//` division operator (`int.__floordiv__`).
+
+Note 5: The convenience functions `bits_to_target()` and `target_to_bits()`
+ are assumed to be available for conversion between compact 'nBits'
+ and unsigned 256-bit integer representations of targets.
+ Examples of such functions are available in the C++ and Python3
+ reference implementations.
+
+Note 6: If a limited-width integer type is used for `current_target`, then the `<<`
+ operator may cause an overflow exception or silent discarding of
+ most-significant bits.
+ Implementations must detect and handle such cases to correctly emulate
+ the behaviour of an unlimited-width calculation. Note that if the result
+ at this point would exceed `radix * max_target` then `max_bits` may be returned
+ immediately.
+
+Note 7: The polynomial approximation that computes `factor` must be performed
+ with 64 bit unsigned integer arithmetic or better. It *will*
+ overflow a signed 64 bit integer. Since exponent is signed, it may be
+ necessary to cast it to unsigned 64 bit integer. In languages like
+ Java where long is always signed, an unsigned shift `>>> 48` must be
+ used to divide by 2^48.
+
+
+#### Activation
+
+The ASERT algorithm will be activated according to the top-level upgrade spec [3].
+
+#### Anchor block
+
+ASERT requires the choice of an anchor block to schedule future target
+computations.
+
+The first block with an MTP that is greater/equal to the upgrade activation time
+will be used as the anchor block for subsequent ASERT calculations.
+
+This corresponds to the last block mined under the pre-ASERT DAA rules.
+
+Note 1: The anchor block is the block whose height and target
+ (nBits) are used as the 'absolute' basis for ASERT's
+ scheduled target. The timestamp (nTime) of the anchor block's
+ *parent* is used.
+
+Note 2: The height, timestamp, and nBits of this block are not known ahead of
+ the upgrade. Implementations MUST dynamically determine it across the
+ upgrade. Once the network upgrade has been consolidated by
+ sufficient chain work or a checkpoint, implementations can simply
+ hard-code the known height, nBits and associated (parent) timestamp
+ this anchor block. Implementations MAY also hard-code other equivalent
+ representations, such as an nBits value and a time offset from the
+ genesis block.
+
+
+#### REQ-ASERT-TESTNET-DIFF-RESET (testnet difficulty reset)
+
+On testnet, an additional rule will be included: Any block with a timestamp
+that is more than 1200 seconds after its parent's timestamp must use an
+nBits value of `max_bits` (`0x1d00ffff`).
+
+
+## Rationale and commentary on requirements / design decisions
+
+1. Choice of anchor block determination
+
+ Choosing an anchor block that is far enough in the past would result
+ in slightly simpler coding requirements but would create the possibility
+ of a significant difficulty adjustment at the upgrade.
+
+ The last block mined according to the old DAA was chosen since this block is
+ the most proximal anchor and allows for the smoothest transition to the new
+ algorithm.
+
+2. Avoidance of floating point calculations
+
+ Compliance with IEEE-754 floating point arithmetic is not generally
+ guaranteed by programming languages on which a new DAA needs to be
+ implemented. This could result in floating point calculations yielding
+ different results depending on compilers, interpreters or hardware.
+
+ It is therefore highly advised to perform all calculations purely using
+ integers and highly specific operators to ensure identical difficulty
+ targets are enforced across all implementations.
+
+3. Choice of half-life
+
+ A half-life of 2 days (`halflife = 2 * 24 * 3600`), equivalent to an e^x-based
+ time constant of `2 * 144 * ln(2)` or aserti3-415.5, was chosen because it reaches
+ near-optimal performance in simulations by balancing the need to buffer
+ against statistical noise and the need to respond rapidly to swings in price
+ or hashrate, while also being easy for humans to understand: For every 2 days
+ ahead of schedule a block's timestamp becomes, the difficulty doubles.
+
+4. Choice of approximation polynomial
+
+ The DAA is part of a control system feedback loop that regulates hashrate,
+ and the exponential function and its integer approximation comprise its
+ transfer function. As such, standard guidelines for ensuring control system
+ stability apply. Control systems tend to be far more sensitive to
+ differential nonlinearity (DNL) than integral nonlinearity (INL) in their
+ transfer functions. Our requirements were to have a transfer function that
+ was (a) monotonic, (b) contained no abrupt changes, (c) had precision and
+ differential nonlinearity that was better than our multi-block statistical
+ noise floor, (d) was simple to implement, and (e) had integral nonlinearity
+ that was no worse than our single-block statistical noise floor.
+
+ A simple, fast to compute cubic approximation of 2^x for 0 <= x < 1 was
+ found to satisfy all of these requirements. It maintains an absolute error
+ margin below 0.013% over this range [8]. In order to address the full
+ (-infinity, +infinity) domain of the exponential function, we found the
+ `2**(x + n) = 2**n * 2**x` identity to be of use. Our cubic approximation gives
+ the exactly correct values `f(0) == 1` and `f(1) == 2`, which allows us to
+ use this identity without concern for discontinuities at the edges of the
+ approximation's domain.
+
+ First, there is the issue of DNL. Our goal was to ensure that our algorithm
+ added no more than 25% as much noise as is inherent in our dataset. Our
+ algorithm is effectively trying to estimate the characteristic hashrate over
+ the recent past, using a 2-day (~288-block) half-life. Our expected
+ exponential distribution of block intervals has a standard deviation (stddev)
+ of 600 seconds. Over a 2-day half-life, our noise floor in our estimated
+ hashrate should be about `sqrt(1 / 288) * 600` seconds, or 35.3 seconds. Our
+ chosen approximation method is able to achieve precision of 3 seconds in most
+ circumstances, limited in two places by 16-bit operations:
+ `172800 sec / 65536 = 2.6367 sec`
+ Our worst-case precision is 8 seconds, and is limited by the worst-case
+ 15-bit precision of the nBits value. This 8 second worst-case is not within
+ the scope of this work to address, as it would require a change to the block
+ header. Our worst-case step size is 0.00305%,[11] due to the worst-case
+ 15-bit nBits mantissa issue. Outside the 15-bit nBits mantissa range, our
+ approximation has a worst-case precision of 0.0021%. Overall, we considered
+ this to be satisfactory DNL performance.
+
+ Second, there is the issue of INL. Simulation testing showed that difficulty
+ and hashrate regulation performance was remarkably insensitive to
+ integral non-linearity. We found that even the use of `f(x) = 1 + x` as an
+ approximation of `2**x` in the `aserti1` algorithm was satisfactory when
+ coupled with the `2**(x + n) = 2^n * 2^x` identity, despite having 6%
+ worst-case INL.[12][13] An approximation with poor INL will still show good
+ hashrate regulation ability, but will have a different amount of drift for a
+ given change in hashrate depending on where in the [0, 1) domain our exponent
+ (modulo 1) lies. With INL of +/- 1%, for any given difficulty (or target), a
+ block's timestamp might end up being 1% of 172800 seconds ahead of or behind
+ schedule. However, out of an abundance of caution, and because achieving
+ higher precision was easy, we chose to aim for INL that would be comparable
+ to or less than the typical drift that can be caused by one block. Out of
+ a 2-day half-life window, one block's variance comprises:
+ `600 / 172800 = 0.347%`
+ Our cubic approximation's INL performance is better than 0.013%,[14] which
+ exceeds that requirement by a comfortable margin.
+
+5. Conversion of difficulty bits (nBits) to 256-bit target representations
+
+ As there are few calculations in ASERT which involve 256-bit integers
+ and the algorithm is executed infrequently, it was considered unnecessary
+ to require more complex operations such as doing arithmetic directly on
+ the compact target representations (nBits) that are the inputs/output of
+ the difficulty algorithm.
+
+ Furthermore, 256-bit (or even bignum) arithmetic is available in existing
+ implementation and used within the previous DAA. Performance impacts are
+ negligible.
+
+6. Choice of 16-bits of precision for fixed-point math
+
+ The nBits format is comprised of 8 bits of base_256 exponent, followed by a
+ 24-bit mantissa. The mantissa must have a value of at least 0x008000, which
+ means that the worst-case scenario gives the mantissa only 15 bits of
+ precision. The choice of 16-bit precision in our fixed point math ensures
+ that overall precision is limited by this 15-bit nBits limit.
+
+7. Choice of name
+
+ The specific algorithm name 'aserti3-2d' was chosen based on:
+
+ - the 'i' refers to the integer-only arithmetic
+ - the '3' refers to the cubic approximation of the exponential
+ - the '2d' refers to the 2-day (172800 second) halflife
+
+
+## Implementation advice
+
+Implementations must not make any rounding errors during their calculations.
+Rounding must be done exactly as specified in the algorithm. In practice,
+to guarantee that, you likely need to use integer arithmetic exclusively.
+
+Implementations which use signed integers and use bit-shifting must ensure
+that the bit-shifting is arithmetic.
+
+Note 1: In C++ compilers, right shifting negative signed integers
+ is formally unspecified behavior until C++20 when it
+ will become standard [5]. In practice, C/C++ compilers
+ commonly implement arithmetic bit shifting for signed
+ numbers. Implementers are advised to verify good behavior
+ through compile-time assertions or unit tests.
+
+
+## Reference implementations
+
+- C++ code for aserti3-2d (see pow.cpp): <https://reviews.bitcoinabc.org/D7174>
+- Python3 code (see contrib/testgen/validate_nbits_aserti3_2d.py): <https://gitlab.com/bitcoin-cash-node/bitcoin-cash-node/-/merge_requests/692>
+- Java code: <https://github.com/pokkst/asert-java>
+
+
+## Test vectors
+
+Test vectors suitable for validating further implementations of the aserti3-2d
+algorithm are available at:
+
+ <https://gitlab.com/bitcoin-cash-node/bchn-sw/qa-assets/-/tree/master/test_vectors/aserti3-2d>
+
+and alternatively at:
+
+ <https://download.bitcoincashnode.org/misc/data/asert/test_vectors>
+
+
+## Acknowledgements
+
+Thanks to Mark Lundeberg for granting permission to publish the ASERT paper [1],
+Jonathan Toomim for developing the initial Python and C++ implementations,
+upgrading the simulation framework [9] and evaluating the various difficulty
+algorithms.
+
+Thanks to Jacob Eliosoff, Tom Harding and Scott Roberts for evaluation work
+on the families of EMA and other algorithms considered as replacements for
+the Bitcoin Cash DAA, and thanks to the following for review and their
+valuable suggestions for improvement:
+
+- Andrea Suisani (sickpig)
+- BigBlockIfTrue
+- Fernando Pellicioni
+- imaginary_username
+- mtrycz
+- Jochen Hoenicke
+- John Nieri (emergent_reasons)
+- Tom Zander
+
+
+## References
+
+[1] "[Static difficulty adjustments, with absolutely scheduled exponentially rising targets (DA-ASERT) -- v2](http://toom.im/files/da-asert.pdf)", Mark B. Lundeberg, July 31, 2020
+
+[2] "[BCH upgrade proposal: Use ASERT as the new DAA](https://read.cash/@jtoomim/bch-upgrade-proposal-use-asert-as-the-new-daa-1d875696)", Jonathan Toomim, 8 July 2020
+
+[3] [Bitcoin Cash November 15, 2020 Upgrade Specification](2020-11-15-upgrade.md).
+
+[4] <https://en.wikipedia.org/wiki/Arithmetic_shift>
+
+[5] <https://en.cppreference.com/w/cpp/language/operator_arithmetic>
+
+[6] "[Unstable Throughput: When the Difficulty Algorithm Breaks](https://arxiv.org/pdf/2006.03044.pdf)", Sam M. Werner, Dragos I. Ilie, Iain Stewart, William J. Knottenbelt, June 2020
+
+[7] "[Different kinds of integer division](https://harry.garrood.me/blog/integer-division)", Harry Garrood, blog, 2018
+
+[8] [Error in a cubic approximation of 2^x for 0 <= x < 1](https://twitter.com/MarkLundeberg/status/1191831127306031104)
+
+[9] Jonathan Toomim adaptation of kyuupichan's difficulty algorithm simulator: <https://github.com/jtoomim/difficulty/tree/comparator>
+
+[10] "[The Euclidean definition of the functions div and mod](dl.acm.org/doi/10.1145/128861.128862)", Raymond T. Boute, 1992, ACM Transactions on Programming Languages and Systems (TOPLAS). 14. 127-144. 10.1145/128861.128862
+
+[11] <http://toom.im/bch/aserti3_step_size.html>
+
+[12] [f(x) = (1 + x)/2^x for 0<x<1](https://www.wolframalpha.com/input/?i=f%28x%29+%3D+%281+%2B+x%29%2F2%5Ex+for+0%3Cx%3C1), WolframAlpha.
+
+[13] <https://github.com/zawy12/difficulty-algorithms/issues/62#issuecomment-647060200>
+
+[14] <http://toom.im/bch/aserti3_approx_error.html>
+
+[15] <https://github.com/zawy12/difficulty-algorithms/issues/62#issuecomment-646187957>
+
+
+## License
+
+This specification is dual-licensed under the Creative Commons CC0 1.0 Universal and
+GNU All-Permissive licenses.
diff --git a/doc/standards/2020-11-15-upgrade.md b/doc/standards/2020-11-15-upgrade.md
new file mode 100644
index 000000000..5a377859d
--- /dev/null
+++ b/doc/standards/2020-11-15-upgrade.md
@@ -0,0 +1,67 @@
+---
+layout: specification
+title: 2020-NOV-15 Network Upgrade Specification
+date: 2020-08-15
+category: spec
+activation: 1605441600
+version: 0.1
+---
+
+## Summary
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1605441600 (Nov 15th, 2020, 12:00PM UTC),
+Bitcoin Cash will execute an upgrade of the network consensus rules according to this specification.
+Starting from the next block these consensus rules changes will take effect:
+
+* Bitcoin Cash's Difficulty Adjustment Algorithm (DAA) is replaced with a new system, referred to as aserti3-2d.
+* The addition of a new coinbase rule.
+
+The following are not consensus changes, but are recommended policy changes for Bitcoin Cash implementations:
+
+* Automatic replay protection for future upgrade.
+
+## Difficulty Adjustment Algorithm
+
+Bitcoin Cash's Difficulty Adjustment Algorithm (DAA) is replaced with a new algorithm called [ASERT](http://toom.im/files/da-asert.pdf).
+
+The specific implementation is called aserti3-2d. Details can be found in the [full specification: ASERT](https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/2020-11-15-asert.md).
+
+## Coinbase Rule
+
+The purpose of the new coinbase rule is to provide funding to development projects working on common Bitcoin Cash infrastructure.
+
+The coinbase rule enforces that at least 8% of the block reward must be spent as a single output to the following Bitcoin Cash address:
+`bitcoincash:pqnqv9lt7e5vjyp0w88zf2af0l92l8rxdgnlxww9j9`.
+
+The amount of the output must be equal to or greater than the integer `required`, calculated as follows using integer math:
+```
+required = (8 * blockReward) / 100
+```
+
+## Automatic Replay Protection
+
+The purpose of Automatic Replay Protection is to serve as a full node version-deprecation mechanism. It is intended to cause
+full validating nodes which do not upgrade, to automatically separate themselves from the main network after the next
+upgrade on 15 May 2021. Nodes which implement the next upgrade will remove this automatic replay protection, and thus all regular
+wallets can continue using the default ForkID with no change to follow the main upgraded chain.
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is less than UNIX timestamp 1621080000 (May 2021 upgrade)
+Bitcoin Cash full nodes MUST enforce the following rule:
+
+ * `forkid` [2] to be equal to 0.
+
+When the median time past [1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1621080000
+(May 2021 upgrade) Bitcoin Cash full nodes implementing the Nov 2020 consensus rules SHOULD enforce the following change:
+
+ * Update `forkid` [2] to be equal to `0xFFXXXX`, where `XXXX` is some arbitrary hex value.
+ ForkIDs beginning with 0xFF will be reserved for future protocol upgrades.
+
+This particular consensus rule MUST NOT be implemented by Bitcoin Cash wallet software. Wallets that follow the upgrade
+should not have to change anything.
+
+## References
+
+[1] Median Time Past is described in [bitcoin.it wiki](https://en.bitcoin.it/wiki/Block_timestamp).
+It is guaranteed by consensus rules to be monotonically increasing.
+
+[2] The `forkId` is defined as per the [replay protected sighash](replay-protected-sighash.md) specification.
diff --git a/doc/standards/block.md b/doc/standards/block.md
new file mode 100644
index 000000000..48cf978ad
--- /dev/null
+++ b/doc/standards/block.md
@@ -0,0 +1,144 @@
+---
+layout: specification
+title: Block Spec for Bitcoin Cash ⚠️ out of date ⚠️
+category: spec
+date: 2017-08-26
+activation: 1515888000
+version: 1.0
+---
+
+This section of the Bitcoin Cash (BCH) specification ("spec") documents the **block data structure** for implementing a compatible BCH client, including the block header, block serialization, and coinbase transaction formats.
+
+This spec is based on the Bitcoin ABC implementation of the [Bitcoin Cash](https://www.bitcoincash.org/) protocol.
+
+Developer resources:
+- [Bitcoin ABC source code](https://github.com/Bitcoin-ABC/bitcoin-abc)
+- [Bitcoin ABC developer documentation](https://www.bitcoinabc.org/doc/dev/)
+
+# Block
+A **block** is one of the two base primitives in the BCH system, the other being a **transaction**. Primitive in this context means it is one of the data structures for which the BCH software provides built-in support.
+
+Nodes collect new transactions into a **block**, hash them into a hash tree (**merkle root hash**), and scan through **nonce** values to make the block's hash satisfy proof-of-work requirements.
+
+When a miner solves the proof-of-work, it broadcasts the block to network nodes and if the block is valid it is added to the block chain. The first transaction in the block is the **coinbase transaction** that creates a new coin owned by the creator of the block. An algorithm ensures that a new block is generated every 10 minutes (600 seconds) on average.
+
+The block validation rules described here ensure that BCH nodes stay in consensus with other nodes. There are several rules that must be respected for a block to be valid. A node is responsible for processing, validating, and relaying the block and its transactions.
+
+A node is distinct on the network from miners and wallets. A BCH node is a piece of software that connects to other nodes in a network and communicates via peer-to-peer messages. Nodes use the `verack` protocol to communicate and perform full validation checks, including:
+
+1. Connecting to the network and peers.
+2. Acquiring block headers.
+3. Validating all blocks.
+4. Validating all transactions.
+
+Block source code: https://github.com/Bitcoin-ABC/bitcoin-abc/blob/master/src/primitives/block.h.
+
+## Block Header
+Block headers are serialized in the 80-byte format comprising six fields: [version](#version), [previous block hash](#previous-block-hash), [merkle root hash](#merkle-root-hash), [timestamp](#time), [difficulty target](target), and [nonce](#nonce).
+
+The block header is hashed as part of the proof-of-work algorithm, making the serialized header format part of the consensus rules. The hash of the block header is the unique signature of the block. The block header hash is included in the next block that is mined. The block header includes a pointer to the previous block that links them in the blockchain.
+
+The block header requires the following six fields. Note that the hashes are in internal byte order; all other values are in little-endian order.
+
+Field | Size (bytes) | Data type | Description
+----------------|---------------|-----------|------------
+`nVersion` | 4 | int32_t | The block version number indicates which set of block validation rules to follow.
+`hashPrevBlock` | 32 | uint256 | The SHA256(SHA256(Block_Header)) message digest of the previous block’s header.
+`hashMerkleRoot`| 32 | uint256 | The message digest of the Merkle root.
+`nTime` | 4 | uint32_t | Current timestamp in seconds since 1970-01-01T00:00 UTC (Unix time).
+`nBits` | 4 | uint32_t | Difficulty target for the proof-of-work for this block.
+`nNonce` | 4 | uint32_t | 32-bit number (starts at 0) used to generate this block (the "nonce").
+
+### Block Version
+The block version number is a signed 4 byte integer (`int32_t`) that indicates which set of block validation rules to follow. BCH version >= 4 is valid.
+
+### Previous Block Hash
+The SHA256(SHA256(Block_Header)) message digest (hash) of the previous block’s header in internal byte order. This ensures no previous block can be changed without also changing this block’s header.
+
+### Merkle Root Hash
+The Merkle tree is data structure that provides a record of all transactions in the block. Each transaction in the block is a leaf in the Merkle tree and includes a hash of the previous transaction hash. The Merkle root is derived from the hashes of all transactions included in this block. The hash of the Merkle root ensures that no transaction can be modified without modifying the block header.
+
+The Merkle root is constructed from the list of transaction IDs in the order the transactions appear in the block.
+
+* The coinbase transaction TXID is always placed first.
+
+* Any input within this block can spend an output which also appears in this block (assuming the spend is otherwise valid). However, the TXID corresponding to the output must be placed at some point before the TXID corresponding to the input. This ensures that any program parsing block chain transactions linearly will encounter each output before it is used as an input.
+
+If a block only has a coinbase transaction, the coinbase TXID is used as the Merkle root hash.
+
+If a block only has a coinbase transaction and one other transaction, the TXIDs of those two transactions are placed in order, concatenated as 64 raw bytes, and then SHA256(SHA256()) hashed together to form the Merkle root.
+
+If a block has three or more transactions, intermediate Merkle tree rows are formed. The TXIDs are placed in order and paired, starting with the coinbase transaction's TXID. Each pair is concatenated together as 64 raw bytes and SHA256(SHA256()) hashed to form a second row of hashes. If there are an odd (non-even) number of TXIDs, the last TXID is concatenated with a copy of itself and hashed. If there are more than two hashes in the second row, the process is repeated to create a third row (and, if necessary, repeated further to create additional rows). Once a row is obtained with only two hashes, those hashes are concatenated and hashed to produce the Merkle root.
+
+TXIDs and intermediate hashes are always in internal byte order when they're concatenated, and the resulting Merkle root is also in internal byte order when it's placed in the block header.
+
+Note that the Merkle root makes it possible in the future to securely verify that a transaction has been accepted by the network using just the block header (which includes the Merkle tree), eliminating the current requirement to download the entire blockchain.
+
+Refer to the source code for more details on security issues: https://github.com/Bitcoin-ABC/bitcoin-abc/blob/master/src/consensus/merkle.cpp.
+
+### Block Timestamp
+The block timestamp is Unix epoch time when the miner started hashing the header according to the miner's clock. The block timestamp must be greater than the median time of the previous 11 blocks. Note that when validating the first 11 blocks of the chain, you will need to know how to handle arrays of less than length 11 to get a median. A node will not accept a block with a timestamp more than 2 hours ahead of its view of network-adjusted time.
+
+### Difficulty Target
+The difficulty target is a 256-bit unsigned integer which a header hash must be less than or equal to for that header to be a valid part of the block chain. The header field *nBits* provides only 32 bits of space, so the target number uses a less precise format called "compact" which works like a base-256 version of scientific notation. As a base-256 number, nBits can be parsed as bytes the same way you might parse a decimal number in base-10 scientific notation.
+
+Although the target threshold should be an unsigned integer, the class from which the original nBits implementation inherits properties from a signed data class, allowing the target threshold to be negative if the high bit of the significand is set.
+
+* When parsing nBits, the system converts a negative target threshold into a target of zero, which the header hash can equal (in
+ theory, at least).
+
+* When creating a value for nBits, the system checks to see if it will produce an nBits which will be interpreted as negative; if so, it divides the significand by 256 and increases the exponent by 1 to produce the same number with a different encoding.
+
+Difficulty is a measure of how difficult it is to find a hash below a given target. The BCH network has a global block difficulty. Valid blocks must have a hash below the difficulty target calculated from the `nBits` value. The current difficulty target is available here: https://blockexplorer.com/api/status?q=getDifficulty.
+
+### Nonce
+To be valid, a block include a **nonce** value that is the solution to the mining process. This proof-of-work is verified by other BCH nodes each time they receive a block.
+
+**NOTE:** The original purpose of the nonce was to manipulate it to find a solution to the mining process. However, because mining devices now have hashrates in the terahash range, the `nonce` field is too small. In practice, most block headers do not include a solution to the mining process in the `nonce`. Instead, miners have try many different Merkle root hashes, which is typically done by changing the coinbase TX. A `nonce` value is nonetheless required.
+
+The `nonce` is a 32-bit (4-byte) field whose value is arbitrarily set by miners to modify the header hash and produce a hash that is less than the difficulty target with the required number of leading zeros (currently 32) satisfies the proof-of-work.
+
+An arbitrary number miners change to modify the header hash in order to produce a hash less than or equal to the target threshold. If all 32-bit values are tested, the time can be updated or the coinbase transaction can be changed and the Merkle root updated.
+
+The nonce is an arbitrarily changed by miners to modify the header hash and produce a hash less than the difficulty target. If all 32-bit values are tested, the time can be updated or the coinbase transaction can be changed and the Merkle root updated.
+
+Any change to the nonce will make the block header hash completely different. Since it is virtually impossible to predict which combination of bits will result in the right hash, many different nonce values are tried, and the hash is recomputed for each value until a hash containing the required number of zero bits as set by the difficulty target is found. The resulting hash has to be a value less than the current difficulty and so will have to have a certain number of leading zero bits to be less than that. As this iterative calculation requires time and resources, the presentation of the block with the correct nonce value constitutes proof-of-work.
+
+It is important to note that the proof-of-work can be verified by computing one hash with the proper content, and is therefore very cheap. The fact that the proof is cheap to verify is as important as the fact that it is expensive to compute.
+
+## Coinbase Transaction
+The first transaction in the body of each block is a special transaction called the **coinbase transaction** which is used to pay miners of the block. The coinbase transaction is required, and must collect and spend any transaction fees paid by transactions included in the block.
+
+A valid block is entitled to receive a block subsidy of newly created bitcoincash value, and it must also be spent in the coinbase transaction. Together, the transaction fees and block subsidy are called the **block reward**. A coinbase transaction is invalid if it tries to spend more value than is available from the block reward. The subsidy plus fees is the maximum coinbase payout, but note that it is valid for the coinbase to pay less.
+
+The coinbase transaction must have one input spending from 000000000000000. The field used to provide the signature can contain arbitrary data up to 100 bytes. The coinbase transaction must start with the block height to ensure no two coinbase transactions have the same transaction id (TXID).
+
+The coinbase transaction has the following format:
+
+| Bytes | Name | Data Type | Description
+|----------|--------------------|----------------------|--------------
+| 32 | hash (null) | char[32] | A 32-byte null, as a coinbase has no previous outpoint.
+| 4 | index (UINT32_MAX) | uint32_t | 0xffffffff, as a coinbase has no previous outpoint.
+| *Varies* | script bytes | compactSize uint | The number of bytes in the coinbase script, up to a maximum of 100 bytes.
+| *Varies* (4) | height | script | The block height of this block. Required parameter. Uses the script language: starts with a data-pushing opcode that indicates how many bytes to push to the stack followed by the block height as a little-endian unsigned integer. This script must be as short as possible, otherwise it may be rejected. The data-pushing opcode is 0x03 and the total size is four bytes.
+| *Varies* | coinbase script | *None* | The coinbase field and input parameter: Arbitrary data not exceeding 100 bytes minus the (4) height bytes. Miners commonly place an extra nonce in this field to update the block header Merkle root during hashing.
+| 4 | sequence | uint32_t | Sequence number.
+
+Although the coinbase script is arbitrary data, if it includes the bytes used by any signature-checking operations such as `OP_CHECKSIG`,
+those signature checks will be counted as signature operations (sigops) towards the block's sigop limit. To avoid this, you can prefix all data with the appropriate push operation. See [Transaction format](#transaction) for details on opcodes.
+
+## Block Serialization
+Blocks must be serialized in binary format for transport on the network. Under current BCH consensus rules, a BCH block is valid if its serialized size is not more than 32MB (32,000,000 bytes). All fields described below count towards the serialized size limit.
+
+| Bytes | Name | Data type | Description
+|-----------|---------------|-------------------|------------
+| 80 | block header | block_header | The block header in the proper format. See [Block Header](#block-header).
+| Varies | txn_count | compactSize uint | Total number of transactions in this block, including the coinbase transaction.
+| Varies | txns | raw transaction | Each transaction in this block in this block, one after another, in raw transaction format. Transactions must appear in the data stream in the same order their TXIDs appeared in the first row of the [Merkle tree](#merkle-root-hash).
+
+The serialized (raw) form of each block header is hashed as part of the proof-of-work, making the serialized block header part of the BCH consensus rules. As part of the mining process, the block header is hashed repeatedly to create proof-of-work.
+
+BCH uses SHA256(SHA256(Block_Header)) to hash the block header. You must ensure that the block header is in the proper byte-order before hashing. The following serialization rules apply to the block header:
+
+- Both hash fields use double-hashing (`SHA256(SHA256(DATA))`) and are serialized in internal byte order, which means the standard order in which hash message digests are displayed as strings.
+- The values for all other fields in the block header are serialized in little-endian order. Note that when displayed via a block browser or query, the ordering is big-endian.
diff --git a/doc/standards/may-2018-hardfork.md b/doc/standards/may-2018-hardfork.md
new file mode 100644
index 000000000..eddb05eab
--- /dev/null
+++ b/doc/standards/may-2018-hardfork.md
@@ -0,0 +1,38 @@
+---
+layout: specification
+title: May 2018 Hardfork Specification
+category: spec
+date: 2018-04-09
+activation: 1526400000
+version: 1.1
+---
+
+## Summary
+
+When the median time past[1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1526400000 Bitcoin Cash will execute a hardfork according to this specification. Starting from the next block these consensus rules changes will take effect:
+
+* Blocksize increase to 32,000,000 bytes
+* Re-enabling of several opcodes
+
+The following are not consensus changes, but are recommended changes for Bitcoin Cash implementations:
+
+* Automatic replay protection for future hardforks
+* Increase OP_RETURN relay size to 223 total bytes
+
+## Blocksize increase
+
+The blocksize hard capacity limit will be increased to 32MB (32000000 bytes).
+
+## OpCodes
+
+Several opcodes will be re-enabled per [may-2018-reenabled-opcodes](may-2018-reenabled-opcodes.md)
+
+## Automatic Replay Protection
+
+When the median time past[1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1542300000 (November 2018 hardfork) Bitcoin Cash full nodes implementing the May 2018 consensus rules SHOULD enforce the following change:
+
+ * Update `forkid`[1] to be equal to 0xFF0001. ForkIDs beginning with 0xFF will be reserved for future protocol upgrades.
+
+This particular consensus rule MUST NOT be implemented by Bitcoin Cash wallet software.
+
+[1] The `forkId` is defined as per the [replay protected sighash](replay-protected-sighash.md) specification.
diff --git a/doc/standards/may-2018-reenabled-opcodes.md b/doc/standards/may-2018-reenabled-opcodes.md
new file mode 100644
index 000000000..a13e39aba
--- /dev/null
+++ b/doc/standards/may-2018-reenabled-opcodes.md
@@ -0,0 +1,425 @@
+---
+layout: specification
+title: Restore disabled script opcodes, May 2018
+category: spec
+date: 2018-04-05
+activation: 1526400000
+version: 0.4
+updated: 2018-05-23
+---
+
+## Introduction
+
+In 2010 and 2011 the discovery of serious bugs prompted the deactivation of many opcodes in the Bitcoin script language.
+It is our intention to restore the functionality that some of these opcodes provided in Bitcoin Cash. Rather than simply
+re-enable the opcodes, the functionality that they provide has been re-examined and in some cases the opcodes have been
+re-designed or new opcodes have been added to address specific issues.
+
+This document contains the specifications for the opcodes that are to be added in the May 2018 protocol upgrade. We
+anticipate that additional opcodes will be proposed for the November 2018, or later, protocol upgrades.
+
+The opcodes that are to be added are:
+
+|Word |OpCode |Hex |Input |Output | Description |
+|-----------|-------|----|--------------|--------|------------------------------------------------------------------|
+|OP_CAT |126 |0x7e|x1 x2 |out |Concatenates two byte sequences |
+|OP_SPLIT |127 |0x7f|x n |x1 x2 |Split byte sequence *x* at position *n* |
+|OP_AND |132 |0x84|x1 x2 |out |Boolean *AND* between each bit of the inputs |
+|OP_OR |133 |0x85|x1 x2 |out |Boolean *OR* between each bit of the inputs |
+|OP_XOR |134 |0x86|x1 x2 |out |Boolean *EXCLUSIVE OR* between each bit of the inputs |
+|OP_DIV |150 |0x96|a b |out |*a* is divided by *b* |
+|OP_MOD |151 |0x97|a b |out |return the remainder after *a* is divided by *b* |
+|OP_NUM2BIN |128 |0x80|a b |out |convert numeric value *a* into byte sequence of length *b* |
+|OP_BIN2NUM |129 |0x81|x |out |convert byte sequence *x* into a numeric value |
+
+Splice operations: `OP_CAT`, `OP_SPLIT`**
+
+Bitwise logic: `OP_AND`, `OP_OR`, `OP_XOR`
+
+Arithmetic: `OP_DIV`, `OP_MOD`
+
+New operations:
+* `x OP_BIN2NUM -> n`, convert a byte sequence `x` into a numeric value
+* `n m OP_NUM2BIN -> out`, convert a numeric value `n` into a byte sequence of length `m`
+
+Further discussion of the purpose of these new operations can be found below under *bitwise operations*.
+
+** A new operation, `OP_SPLIT`, has been designed as a replacement for `OP_SUBSTR`, `OP_LEFT`and `OP_RIGHT`.
+The original operations can be implemented with varying combinations of `OP_SPLIT`, `OP_SWAP` and `OP_DROP`.
+
+
+## <a name="data-types"></a>Script data types
+
+It should be noted that in script operation data values on the stack are interpreted as either byte sequences
+or numeric values. **All data on the stack is interpreted as a byte sequence unless specifically stated as being interpreted
+as a numeric value.**
+
+For accuracy in this specification, a byte sequences is presented as {0x01, 0x02, 0x03}. This sequence is three bytes long, it begins
+with a byte of value 1 and ends with a byte of value 3.
+
+The numeric value type has specific limitations:
+1. The used encoding is little endian with an explicit sign bit (the highest bit of the last byte).
+2. They cannot exceed 4 bytes in length.
+3. They must be encoded using the shortest possible byte length (no zero padding)
+ 1. There is one exception to rule 3: if there is more than one byte and the most significant bit of the
+ second-most-significant-byte is set it would conflict with the sign bit. In this case a single 0x00 or 0x80 byte is allowed
+ to the left.
+4. Zero is encoded as a zero length byte sequence. Single byte positive or negative zero (0x00 or 0x80) are not allowed.
+
+The new opcode `x OP_BIN2NUM -> out` can be used convert a byte sequence into a numeric value where required.
+
+The new opcode `x n OP_NUM2BIN` can be used to convert a numeric value into a zero padded byte sequence of length `n`
+whilst preserving the sign bit.
+
+## Definitions
+
+* *Stack memory use*. This is the sum of the size of the elements on the stack. It gives an indication of impact on
+ memory use by the interpreter.
+* *Operand order*. In keeping with convention where multiple operands are specified the top most stack item is the
+ last operand. e.g. `x1 x2 OP_CAT` --> `x2` is the top stack item and `x1` is the next from the top.
+* *empty byte sequence*. Throughout this document `OP_0` is used as a convenient representation of an empty byte
+ sequence. Whilst it is a push data op code, its effect is to push an empty byte sequence on to the stack.
+
+## Specification
+
+Global conditions apply to all operations. These conditions must be checked by the implementation when
+it is possible that they will occur:
+* for all e : elements on the stack, `0 <= len(e) <= MAX_SCRIPT_ELEMENT_SIZE`
+* for each operator, the required number of operands are present on the stack when the operand is executed
+
+These unit tests should be included for every operation:
+1. executing the operation with an input element of length greater than `MAX_SCRIPT_ELEMENT_SIZE` will fail
+2. executing the operation with an insufficient number of operands on the stack causes a failure
+
+
+Operand consumption:
+
+In all cases where not explicitly stated otherwise the operand stack elements are consumed by the operation and replaced with the output.
+
+## Splice operations
+
+### OP_CAT
+
+ Opcode (decimal): 126
+ Opcode (hex): 0x7e
+
+Concatenates two operands.
+
+ x1 x2 OP_CAT -> out
+
+Examples:
+* `{Ox11} {0x22, 0x33} OP_CAT -> 0x112233`
+
+The operator must fail if `len(out) > MAX_SCRIPT_ELEMENT_SIZE`. The operation cannot output elements that violate the constraint on the element size.
+
+Note that the concatenation of a zero length operand is valid.
+
+Impact of successful execution:
+* stack memory use is constant
+* number of elements on stack is reduced by one
+
+The limit on the length of the output prevents the memory exhaustion attack and results in the operation having less
+impact on stack size than existing OP_DUP operators.
+
+Unit tests:
+1. `maxlen_x y OP_CAT -> failure`. Concatenating any operand except an empty vector, including a single byte value
+ (e.g. `OP_1`), onto a maximum sized array causes failure
+3. `large_x large_y OP_CAT -> failure`. Concatenating two operands, where the total length is greater than
+ `MAX_SCRIPT_ELEMENT_SIZE`, causes failure
+4. `OP_0 OP_0 OP_CAT -> OP_0`. Concatenating two empty arrays results in an empty array
+5. `x OP_0 OP_CAT -> x`. Concatenating an empty array onto any operand results in the operand, including when
+ `len(x) = MAX_SCRIPT_ELEMENT_SIZE`
+6. `OP_0 x OP_CAT -> x`. Concatenating any operand onto an empty array results in the operand, including when
+ `len(x) = MAX_SCRIPT_ELEMENT_SIZE`
+7. `x y OP_CAT -> concat(x,y)`. Concatenating two operands generates the correct result
+
+### OP_SPLIT
+
+*`OP_SPLIT` replaces `OP_SUBSTR` and uses it's opcode.*
+
+ Opcode (decimal): 127
+ Opcode (hex): 0x7f
+
+
+Split the operand at the given position. This operation is the exact inverse of OP_CAT
+
+ x n OP_SPLIT -> x1 x2
+
+ where n is interpreted as a numeric value
+
+Examples:
+* `{0x00, 0x11, 0x22} 0 OP_SPLIT -> OP_0 {0x00, 0x11, 0x22}`
+* `{0x00, 0x11, 0x22} 1 OP_SPLIT -> {0x00} {0x11, 0x22}`
+* `{0x00, 0x11, 0x22} 2 OP_SPLIT -> {0x00, 0x11} {0x22}`
+* `{0x00, 0x11, 0x22} 3 OP_SPLIT -> {0x00, 0x11, 0x22} OP_0`
+
+Notes:
+* this operator has been introduced as a replacement for the previous `OP_SUBSTR`, `OP_LEFT`and `OP_RIGHT`. All three operators can be
+simulated with varying combinations of `OP_SPLIT`, `OP_SWAP` and `OP_DROP`. This is in keeping with the minimalist philosophy where a single
+primitive can be used to simulate multiple more complex operations.
+* `x` is split at position `n`, where `n` is the number of bytes from the beginning
+* `x1` will be the first `n` bytes of `x` and `x2` will be the remaining bytes
+* if `n == 0`, then `x1` is the empty array and `x2 == x`
+* if `n == len(x)` then `x1 == x` and `x2` is the empty array.
+* if `n > len(x)`, then the operator must fail.
+* `x n OP_SPLIT OP_CAT -> x`, for all `x` and for all `0 <= n <= len(x)`
+
+The operator must fail if:
+* `!isnum(n)`. Fail if `n` is not a numeric value.
+* `n < 0`. Fail if `n` is negative.
+* `n > len(x)`. Fail if `n` is too high.
+
+Impact of successful execution:
+* stack memory use is constant (slight reduction by `len(n)`)
+* number of elements on stack is constant
+
+Unit tests:
+* `OP_0 0 OP_SPLIT -> OP_0 OP_0`. Execution of OP_SPLIT on empty array results in two empty arrays.
+* `x 0 OP_SPLIT -> OP_0 x`
+* `x len(x) OP_SPLIT -> x OP_0`
+* `x (len(x) + 1) OP_SPLIT -> FAIL`
+* include successful unit tests
+
+## Bitwise logic
+
+The bitwise logic operators expect 'byte sequence' operands. The operands must be the same length.
+* In the case of 'byte sequence' operands `OP_CAT` can be used to pad a shorter byte sequence to an appropriate length.
+* In the case of 'byte sequence' operands where the length of operands is not known until runtime a sequence of 0x00 bytes
+ (for use with `OP_CAT`) can be produced using `OP_0 n OP_NUM2BIN`
+* In the case of numeric value operands `x n OP_NUM2BIN` can be used to pad a numeric value to length `n` whilst preserving
+ the sign bit.
+
+### OP_AND
+
+ Opcode (decimal): 132
+ Opcode (hex): 0x84
+
+Boolean *and* between each bit in the operands.
+
+ x1 x2 OP_AND -> out
+
+Notes:
+* where `len(x1) == 0` and `len(x2) == 0` the output will be an empty array.
+
+The operator must fail if:
+1. `len(x1) != len(x2)`. The two operands must be the same size.
+
+Impact of successful execution:
+* stack memory use reduced by `len(x1)`
+* number of elements on stack is reduced by one
+
+Unit tests:
+
+1. `x1 x2 OP_AND -> failure`, where `len(x1) != len(x2)`. The two operands must be the same size.
+2. `x1 x2 OP_AND -> x1 & x2`. Check valid results.
+
+### OP_OR
+
+ Opcode (decimal): 133
+ Opcode (hex): 0x85
+
+Boolean *or* between each bit in the operands.
+
+ x1 x2 OP_OR -> out
+
+The operator must fail if:
+1. `len(x1) != len(x2)`. The two operands must be the same size.
+
+Impact of successful execution:
+* stack memory use reduced by `len(x1)`
+* number of elements on stack is reduced by one
+
+Unit tests:
+1. `x1 x2 OP_OR -> failure`, where `len(x1) != len(x2)`. The two operands must be the same size.
+2. `x1 x2 OP_OR -> x1 | x2`. Check valid results.
+
+### OP_XOR
+
+ Opcode (decimal): 134
+ Opcode (hex): 0x86
+
+Boolean *xor* between each bit in the operands.
+
+ x1 x2 OP_XOR -> out
+
+The operator must fail if:
+1. `len(x1) != len(x2)`. The two operands must be the same size.
+
+Impact of successful execution:
+* stack memory use reduced by `len(x1)`
+* number of elements on stack is reduced by one
+
+Unit tests:
+1. `x1 x2 OP_XOR -> failure`, where `len(x1) != len(x2)`. The two operands must be the same size.
+2. `x1 x2 OP_XOR -> x1 xor x2`. Check valid results.
+
+## Arithmetic
+
+#### Note about canonical form and floor division
+
+Operands for all arithmetic operations are assumed to be numeric values and must be in canonical form.
+See [data types](#data-types) for more information.
+
+**Floor division**
+
+Note that when considering integer division and modulo operations with negative operands, the rules applied in the C language and most
+languages (with Python being a notable exception) differ from the strict mathematical definition. Script follows the C language set of
+rules. Namely:
+1. Non-integer quotients are rounded towards zero
+2. The equation `(a/b)*b + a%b == a` is satisfied by the results
+3. From the above equation it follows that: `a%b == a - (a/b)*b`
+4. In practice if `a` is negative for the modulo operator the result will be negative or zero.
+
+
+### OP_DIV
+
+ Opcode (decimal): 150
+ Opcode (hex): 0x96
+
+Return the integer quotient of `a` and `b`. If the result would be a non-integer it is rounded *towards* zero.
+
+ a b OP_DIV -> out
+
+ where a and b are interpreted as numeric values
+
+The operator must fail if:
+1. `!isnum(a) || !isnum(b)`. Fail if either operand is not a numeric value.
+1. `b == 0`. Fail if `b` is equal to any type of zero.
+
+Impact of successful execution:
+* stack memory use reduced
+* number of elements on stack is reduced by one
+
+Unit tests:
+1. `a b OP_DIV -> failure` where `!isnum(a)` or `!isnum(b)`. Both operands must be numeric values
+2. `a 0 OP_DIV -> failure`. Division by positive zero (all sizes), negative zero (all sizes), `OP_0`
+3. `27 7 OP_DIV -> 3`, `27 -7 OP_DIV -> -3`, `-27 7 OP_DIV -> -3`, `-27 -7 OP_DIV -> 3`. Check negative operands.
+ *Pay attention to sign*.
+4. check valid results for operands of different lengths `0..4`
+
+### OP_MOD
+
+ Opcode (decimal): 151
+ Opcode (hex): 0x97
+
+Returns the remainder after dividing a by b. The output will be represented using the least number of bytes required.
+
+ a b OP_MOD -> out
+
+ where a and b are interpreted as numeric values
+
+The operator must fail if:
+1. `!isnum(a) || !isnum(b)`. Fail if either operand is not a numeric value.
+1. `b == 0`. Fail if `b` is equal to any type of zero.
+
+Impact of successful execution:
+* stack memory use reduced (one element removed)
+* number of elements on stack is reduced by one
+
+Unit tests:
+1. `a b OP_MOD -> failure` where `!isnum(a)` or `!isnum(b)`. Both operands must be numeric values.
+2. `a 0 OP_MOD -> failure`. Division by positive zero (all sizes), negative zero (all sizes), `OP_0`
+3. `27 7 OP_MOD -> 6`, `27 -7 OP_MOD -> 6`, `-27 7 OP_MOD -> -6`, `-27 -7 OP_MOD -> -6`. Check negative operands.
+ *Pay attention to sign*.
+4. check valid results for operands of different lengths `0..4` and returning result zero
+
+## New operations
+
+### OP_NUM2BIN
+
+*`OP_NUM2BIN` replaces `OP_LEFT` and uses it's opcode*
+
+ Opcode (decimal): 128
+ Opcode (hex): 0x80
+
+Convert the numeric value into a byte sequence of a certain size, taking account of the sign bit.
+The byte sequence produced uses the little-endian encoding.
+
+ a b OP_NUM2BIN -> x
+
+where `a` and `b` are interpreted as numeric values. `a` is the value to be converted to a byte sequence,
+it can be up to `MAX_SCRIPT_ELEMENT_SIZE` long and does not need to be minimally encoded.
+`b` is the desired size of the result, it must be minimally encoded and <= 4 bytes long. It must be possible for the
+value `a` to be encoded in a byte sequence of length `b` without loss of data.
+
+
+See also `OP_BIN2NUM`.
+
+Examples:
+* `2 4 OP_NUM2BIN -> {0x02, 0x00, 0x00, 0x00}`
+* `-5 4 OP_NUM2BIN -> {0x05, 0x00, 0x00, 0x80}`
+
+The operator must fail if:
+1. `b` is not a minimally encoded numeric value.
+2. `b < len(minimal_encoding(a))`. `a` must be able to fit into `b` bytes.
+3. `b > MAX_SCRIPT_ELEMENT_SIZE`. The result would be too large.
+
+Impact of successful execution:
+* stack memory use will be increased by `b - len(a) - len(b)`, maximum increase is when `b = MAX_SCRIPT_ELEMENT_SIZE`
+* number of elements on stack is reduced by one
+
+Unit tests:
+1. `a b OP_NUM2BIN -> failure` where `!isnum(b)`. `b` must be a minimally encoded numeric value.
+2. `256 1 OP_NUM2BIN -> failure`. Trying to produce a byte sequence which is smaller than the minimum size needed to
+ contain the numeric value.
+3. `1 (MAX_SCRIPT_ELEMENT_SIZE+1) OP_NUM2BIN -> failure`. Trying to produce an array which is too large.
+4. other valid parameters with various results
+
+### OP_BIN2NUM
+
+*`OP_BIN2NUM` replaces `OP_RIGHT` and uses it's opcode*
+
+ Opcode (decimal): 129
+ Opcode (hex): 0x81
+
+Convert the byte sequence into a numeric value, including minimal encoding. The byte sequence must encode the value in little-endian encoding.
+
+ a OP_BIN2NUM -> x
+
+See also `OP_NUM2BIN`.
+
+Notes:
+* if `a` is any form of zero, including negative zero, then `OP_0` must be the result
+
+Examples:
+* `{0x02, 0x00, 0x00, 0x00, 0x00} OP_BIN2NUM -> 2`. `0x0200000000` in little-endian encoding has value 2.
+* `{0x05, 0x00, 0x80} OP_BIN2NUM -> -5` - `0x050080` in little-endian encoding has value -5.
+
+The operator must fail if:
+1. the numeric value is out of the range of acceptable numeric values (currently size is limited to 4 bytes)
+
+Impact of successful execution:
+* stack memory use is equal or less than before. Minimal encoding of the byte sequence can produce a result which is shorter.
+* the number of elements on the stack remains constant
+
+Unit tests:
+1. `a OP_BIN2NUM -> failure`, when `a` is a byte sequence whose numeric value is too large to fit into the numeric value
+ type, for both positive and negative values.
+2. `{0x00} OP_BIN2NUM -> OP_0`. Byte sequences, of various lengths, consisting only of zeros should produce an OP_0 (zero
+ length array).
+3. `{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} OP_BIN2NUM -> 1`. A large byte sequence, whose numeric value would fit in the numeric value
+ type, is a valid operand.
+4. The same test as above, where the length of the input byte sequence is equal to MAX_SCRIPT_ELEMENT_SIZE.
+5. `{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80} OP_BIN2NUM -> -1`. Same as above, for negative values.
+6. `{0x80} OP_BIN2NUM -> OP_0`. Negative zero, in a byte sequence, should produce zero.
+7. `{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80} OP_BIN2NUM -> OP_0`. Large negative zero, in a byte sequence, should produce zero.
+8. other valid parameters with various results
+
+## Reference implementation
+
+* OP_AND, OP_OR, OP_XOR: https://reviews.bitcoinabc.org/D1211
+
+* OP_DIV and OP_MOD: https://reviews.bitcoinabc.org/D1212
+
+* OP_CAT: https://reviews.bitcoinabc.org/D1227
+
+* OP_SPLIT: https://reviews.bitcoinabc.org/D1228
+
+* OP_BIN2NUM: https://reviews.bitcoinabc.org/D1220
+
+* OP_NUM2BIN: https://reviews.bitcoinabc.org/D1222
+
+
+## References
+
+<a name="op_codes">[1]</a> https://en.bitcoin.it/wiki/Script#Opcodes
diff --git a/doc/standards/nov-13-hardfork-spec.md b/doc/standards/nov-13-hardfork-spec.md
new file mode 100644
index 000000000..e5acf6939
--- /dev/null
+++ b/doc/standards/nov-13-hardfork-spec.md
@@ -0,0 +1,86 @@
+---
+layout: specification
+title: November 13th Bitcoin Cash Hardfork Technical Details
+category: spec
+date: 2017-11-07
+activation: 1510600000
+version: 1.3
+---
+
+## Summary
+
+When the median time past[1] of the most recent 11 blocks (MTP-11) is greater than or equal to UNIX timestamp 1510600000 Bitcoin Cash will execute a hardfork according to this specification. Starting from the next block these three consensus rules changes will take effect:
+
+* Enforcement of LOW_S signatures ([BIP 0146](https://github.com/bitcoin/bips/blob/master/bip-0146.mediawiki#low_s))
+* Enforcement of NULLFAIL ([BIP 0146](https://github.com/bitcoin/bips/blob/master/bip-0146.mediawiki#nullfail))
+* A replacement for the emergency difficulty adjustment. The algorithm for the new difficulty adjustment is described below
+
+## Difficulty Adjustment Algorithm Description
+
+To calculate the difficulty of a given block (B_n+1), with an MTP-11[1] greater than or equal to the unix timestamp 1510600000, perform the following steps:
+
+* NOTE: Implementations must use integer arithmetic only
+
+1. Let B_n be the Nth block in a Bitcoin Cash Blockchain.
+1. Let B_last be chosen[2] from [B_n-2, B_n-1, B_n].
+1. Let B_first be chosen[2] from [B_n-146, B_n-145, B_n-144].
+1. Let the Timespan (TS) be equal to the difference in UNIX timestamps (in seconds) between B_last and B_first within the range [72 * 600, 288 * 600]. Values outside should be treated as their respective limit
+1. Let the Work Performed (W) be equal to the difference in chainwork[3] between B_last and B_first.
+1. Let the Projected Work (PW) be equal to (W * 600) / TS.
+1. Let Target (T) be equal to the (2^256 - PW) / PW. This is calculated by taking the two’s complement of PW (-PW) and dividing it by PW (-PW / PW).
+1. The target difficulty for block B_n+1 is then equal to the lesser of T and 0x00000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
+
+## Test Case
+
+1. Create a genesis block with the following data:
+
+```
+ nHeight = 0;
+ nTime = 1269211443;
+ nBits = 0x1c0fffff;
+```
+
+1. Add 2049 blocks at 600 second intervals with the same nBits.
+1. Add another 10 blocks at 600 second intervals. nBits should remain constant.
+1. Add a block 6000 seconds in the future with nBits remaining the same.
+1. Add a block -4800 seconds from the previous block. nBits should remain the constant.
+1. Add 20 blocks at 600 second intervals. nBits should remain constant.
+1. Add a block at a 550 second interval. nBits should remain constant.
+1. Add 10 blocks at 550 second intervals. The target difficulty should slowly decrease.
+1. nBits should be 0x1c0fe7b1.
+1. Add 20 more blocks at 10 second intervals. The target difficulty decrease quickly.
+1. nBits should be 0x1c0db19f.
+1. Add 1 block at an interval of 6000 seconds.
+1. nBits should be 0x1c0d9222.
+1. Produce 93 blocks at 6000 second intervals. The target difficulty should increase.
+1. nBits should be 0x1c2f13b9.
+1. Add one block at 6000 seconds.
+1. nBits should be 0x1c2ee9bf.
+1. Add 192 blocks at 6000 second intervals. The target difficulty should increase.
+1. nBits should be 0x1d00ffff.
+1. Add 5 blocks at 6000 second intervals. Target should stay constant at the maximum value.
+
+## References
+
+ - [Algorithm](https://github.com/Bitcoin-ABC/bitcoin-abc/commit/be51cf295c239ff6395a0aa67a3e13906aca9cb2)
+ - [Activation](https://github.com/Bitcoin-ABC/bitcoin-abc/commit/18dc8bb907091d69f4887560ab2e4cfbc19bae77)
+ - [Activation Time](https://github.com/Bitcoin-ABC/bitcoin-abc/commit/8eed7939c72781a812fdf3fb8c36d4e3a428d268)
+ - [Test Case](https://github.com/Bitcoin-ABC/bitcoin-abc/blob/d8eac91f8d16716eed0ad11ccac420122280bb13/src/test/pow_tests.cpp#L193)
+
+FAQ
+---
+Q: Does this imply that if the blocks are timestamped sequentially, the last block has no effect since it will look at the block before that one?
+
+A: Yes
+
+Footnotes
+---------
+1. The MTP-11 of a block is defined as the median timestamp of the last 11 blocks prior to, and including, a specific block
+2. A block is chosen via the following mechanism:
+ Given a list: S = [B_n-2, B_n-1, B_n]
+ a. If timestamp(S[0]) greater than timestamp(S[2]) then swap S[0] and S[2].
+ b. If timestamp(S[0]) greater than timestamp(S[1]) then swap S[0] and S[1].
+ c. If timestamp(S[1]) greater than timestamp(S[2]) then swap S[1] and S[2].
+ d. Return S[1].
+ See [GetSuitableBlock](https://github.com/Bitcoin-ABC/bitcoin-abc/commit/be51cf295c239ff6395a0aa67a3e13906aca9cb2#diff-ba91592f703a9d0badf94e67144bc0aaR208)
+3. Chainwork for a Block (B) is the sum of block proofs from the genesis block up to and including block `B`. `Block proof` is defined in [chain.cpp](https://github.com/Bitcoin-ABC/bitcoin-abc/blob/d8eac91f8d16716eed0ad11ccac420122280bb13/src/chain.cpp#L132)
diff --git a/doc/standards/op_checkdatasig.md b/doc/standards/op_checkdatasig.md
new file mode 100644
index 000000000..95cabf751
--- /dev/null
+++ b/doc/standards/op_checkdatasig.md
@@ -0,0 +1,199 @@
+---
+layout: specification
+title: OP_CHECKDATASIG and OP_CHECKDATASIGVERIFY Specification
+category: spec
+date: 2018-08-20
+activation: 1542300000
+version: 0.6
+---
+
+OP_CHECKDATASIG
+===============
+
+OP_CHECKDATASIG and OP_CHECKDATASIGVERIFY check whether a signature is valid with respect to a message and a public key.
+
+OP_CHECKDATASIG permits data to be imported into a script, and have its validity checked against some signing authority such as an "Oracle".
+
+OP_CHECKDATASIG and OP_CHECKDATASIGVERIFY are designed to be implemented similarly to OP_CHECKSIG [1]. Conceptually, one could imagine OP_CHECKSIG functionality being replaced by OP_CHECKDATASIG, along with a separate Op Code to create a hash from the transaction based on the SigHash algorithm.
+
+OP_CHECKDATASIG Specification
+-----------------------------
+
+### Semantics
+
+OP_CHECKDATASIG fails immediately if the stack is not well formed. To be well formed, the stack must contain at least three elements [`<sig>`, `<msg>`, `<pubKey>`] in this order where `<pubKey>` is the top element and
+ * `<pubKey>` must be a validly encoded public key
+ * `<msg>` can be any string
+ * `<sig>` must follow the strict DER encoding as described in [2] and the S-value of `<sig>` must be at most the curve order divided by 2 as described in [3]
+
+If the stack is well formed, then OP_CHECKDATASIG pops the top three elements [`<sig>`, `<msg>`, `<pubKey>`] from the stack and pushes true onto the stack if `<sig>` is valid with respect to the raw single-SHA256 hash of `<msg>` and `<pubKey>` using the secp256k1 elliptic curve. Otherwise, it pops three elements and pushes false onto the stack in the case that `<sig>` is the empty string and fails in all other cases.
+
+Nullfail is enforced the same as for OP_CHECKSIG [3]. If the signature does not match the supplied public key and message hash, and the signature is not an empty byte array, the entire script fails.
+
+### Opcode Number
+
+OP_CHECKDATASIG uses the previously unused opcode number 186 (0xba in hex encoding)
+
+### SigOps
+
+Signature operations accounting for OP_CHECKDATASIG shall be calculated the same as OP_CHECKSIG. This means that each OP_CHECKDATASIG shall be counted as one (1) SigOp.
+
+### Activation
+
+Use of OP_CHECKDATASIG, unless occuring in an unexecuted OP_IF branch, will make the transaction invalid if it is included in a block where the median timestamp of the prior 11 blocks is less than 1542300000.
+
+### Unit Tests
+
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIG` fails if 15 November 2018 protocol upgrade is not yet activated.
+ - `<sig> <msg> OP_CHECKDATASIG` fails if there are fewer than 3 items on stack.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIG` fails if `<pubKey>` is not a validly encoded public key.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIG` fails if `<sig>` is not a validly encoded signature with strict DER encoding.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIG` fails if signature `<sig>` is not empty and does not pass the Low S check.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIG` fails if signature `<sig>` is not empty and does not pass signature validation of `<msg>` and `<pubKey>`.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIG` pops three elements and pushes false onto the stack if `<sig>` is an empty byte array.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIG` pops three elements and pushes true onto the stack if `<sig>` is a valid signature of `<msg>` with respect to `<pubKey>`.
+
+OP_CHECKDATASIGVERIFY Specification
+-----------------------------------
+
+### Semantics
+
+OP_CHECKDATASIGVERIFY is equivalent to OP_CHECKDATASIG followed by OP_VERIFY. It leaves nothing on the stack, and will cause the script to fail immediately if the signature check does not pass.
+
+### Opcode Number
+
+OP_CHECKDATASIGVERIFY uses the previously unused opcode number 187 (0xbb in hex encoding)
+
+### SigOps
+
+Signature operations accounting for OP_CHECKDATASIGVERIFY shall be calculated the same as OP_CHECKSIGVERIFY. This means that each OP_CHECKDATASIGVERIFY shall be counted as one (1) SigOp.
+
+### Activation
+
+Use of OP_CHECKDATASIGVERIFY, unless occuring in an unexecuted OP_IF branch, will make the transaction invalid if it is included in a block where the median timestamp of the prior 11 blocks is less than 1542300000.
+
+### Unit Tests
+
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY` fails if 15 November 2018 protocol upgrade is not yet activated.
+ - `<sig> <msg> OP_CHECKDATASIGVERIFY` fails if there are fewer than 3 item on stack.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY`fails if `<pubKey>` is not a validly encoded public key.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY` fails if `<sig>` is not a validly encoded signature with strict DER encoding.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY` fails if signature `<sig>` is not empty and does not pass the Low S check.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY` fails if `<sig>` is not a valid signature of `<msg>` with respect to `<pubKey>`.
+ - `<sig> <msg> <pubKey> OP_CHECKDATASIGVERIFY` pops the top three stack elements if `<sig>` is a valid signature of `<msg>` with respect to `<pubKey>`.
+
+Sample Implementation [4, 5]
+----------------------------
+
+```c++
+ case OP_CHECKDATASIG:
+ case OP_CHECKDATASIGVERIFY: {
+ // Make sure this remains an error before activation.
+ if ((flags & SCRIPT_ENABLE_CHECKDATASIG) == 0) {
+ return set_error(serror, SCRIPT_ERR_BAD_OPCODE);
+ }
+
+ // (sig message pubkey -- bool)
+ if (stack.size() < 3) {
+ return set_error(
+ serror, SCRIPT_ERR_INVALID_STACK_OPERATION);
+ }
+
+ valtype &vchSig = stacktop(-3);
+ valtype &vchMessage = stacktop(-2);
+ valtype &vchPubKey = stacktop(-1);
+
+ if (!CheckDataSignatureEncoding(vchSig, flags,
+ serror) ||
+ !CheckPubKeyEncoding(vchPubKey, flags, serror)) {
+ // serror is set
+ return false;
+ }
+
+ bool fSuccess = false;
+ if (vchSig.size()) {
+ valtype vchHash(32);
+ CSHA256()
+ .Write(vchMessage.data(), vchMessage.size())
+ .Finalize(vchHash.data());
+ uint256 message(vchHash);
+ CPubKey pubkey(vchPubKey);
+ fSuccess = pubkey.Verify(message, vchSig);
+ }
+
+ if (!fSuccess && (flags & SCRIPT_VERIFY_NULLFAIL) &&
+ vchSig.size()) {
+ return set_error(serror, SCRIPT_ERR_SIG_NULLFAIL);
+ }
+
+ popstack(stack);
+ popstack(stack);
+ popstack(stack);
+ stack.push_back(fSuccess ? vchTrue : vchFalse);
+ if (opcode == OP_CHECKDATASIGVERIFY) {
+ if (fSuccess) {
+ popstack(stack);
+ } else {
+ return set_error(serror,
+ SCRIPT_ERR_CHECKDATASIGVERIFY);
+ }
+ }
+ } break;
+```
+
+Sample Usage
+------------
+
+The following example shows a spend and redeem script for a basic use of CHECKDATASIG. This example validates the signature of some data, provides a placeholder where you would then process that data, and finally allows one of 2 signatures to spend based on the outcome of the data processing.
+
+### spend script:
+```
+push txsignature
+push txpubkey
+push msg
+push sig
+```
+### redeem script:
+```
+ (txsig, txpubkey msg, sig)
+OP_OVER (txsig, txpubkey, msg, sig, msg)
+push data pubkey (txsig, txpubkey, msg, sig, msg, pubkey)
+OP_CHECKDATASIGVERIFY (txsig, txpubkey, msg)
+```
+Now that msg is on the stack top, the script can write predicates on it,
+resulting in the message being consumed and a true/false condition left on the stack: (txpubkey, txsig, boolean)
+```
+OP_IF (txsig, txpubkey)
+ OP_DUP (txsig, txpubkey, txpubkey)
+ OP_HASH160 (txsig, txpubkey, address)
+ push <p2pkh spend address> (txsig, txpubkey, address, p2pkh spend address)
+ OP_EQUALVERIFY (txsig, txpubkey)
+ OP_CHECKSIG
+OP_ELSE
+ (same as if clause but a different <p2pkh spend address>)
+OP_ENDIF
+```
+
+History
+-------
+
+This specification is based on Andrew Stone’s OP_DATASIGVERIFY proposal [6, 7]. It is modified from Stone's original proposal based on a synthesis of all the peer-review and feedback received [8].
+
+References
+----------
+
+[1] [OP_CHECKSIG](https://en.bitcoin.it/wiki/OP_CHECKSIG)
+
+[2] [Strict DER Encoding](https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki)
+
+[3] [Low-S and Nullfail Specification](https://github.com/bitcoin/bips/blob/master/bip-0146.mediawiki)
+
+[4] [Bitcoin ABC implementation](https://reviews.bitcoinabc.org/D1621)
+
+[5] [Bitcoin ABC implementation update](https://reviews.bitcoinabc.org/D1646)
+
+[6] [Andrew Stone’s OP_DATASIGVERIFY](https://github.com/BitcoinUnlimited/BitcoinUnlimited/blob/bucash1.3.0.0/doc/opdatasigverify.md)
+
+[7] [Andrew Stone's article on Scripting](https://medium.com/@g.andrew.stone/bitcoin-scripting-applications-decision-based-spending-8e7b93d7bdb9)
+
+[8] [Peer Review of Andrew Stone's Proposal](https://github.com/bitcoincashorg/bitcoincash.org/pull/10)
diff --git a/doc/standards/replay-protected-sighash.md b/doc/standards/replay-protected-sighash.md
new file mode 100644
index 000000000..0763a14fb
--- /dev/null
+++ b/doc/standards/replay-protected-sighash.md
@@ -0,0 +1,210 @@
+---
+layout: specification
+title: BUIP-HF Digest for replay protected signature verification across hard forks
+category: spec
+date: 2017-07-16
+activation: 1501590000
+version: 1.2
+---
+
+## Abstract
+
+This document describes proposed requirements and design for a reusable signing mechanism ensuring replay protection in the event of a chain split. It provides a way for users to create transactions which are invalid on forks lacking support for the mechanism and a fork-specific ID.
+
+The proposed digest algorithm is adapted from BIP143[[1]](#bip143) as it minimizes redundant data hashing in verification, covers the input value by the signature and is already implemented in a wide variety of applications[[2]](#bip143Motivation).
+
+The proposed digest algorithm is used when the `SIGHASH_FORKID` bit is set in the signature's sighash type. The verification of signatures which do not set this bit is not affected.
+
+## Specification
+
+### Activation
+
+The proposed digest algorithm is only used when the `SIGHASH_FORKID` bit in the signature sighash's type is set. It is defined as follows:
+
+````cpp
+ // ...
+ SIGHASH_SINGLE = 3,
+ SIGHASH_FORKID = 0x40,
+ SIGHASH_ANYONECANPAY = 0x80,
+ // ...
+````
+
+In presence of the `SIGHASH_FORKID` flag in the signature's sighash type, the proposed algorithm is used.
+
+Signatures using the `SIGHASH_FORKID` digest method must be rejected before [UAHF](https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/uahf-technical-spec.md) is activated.
+
+In order to ensure proper activation, the reference implementation uses the `SCRIPT_ENABLE_SIGHASH_FORKID` flag when executing `EvalScript` .
+
+### Digest algorithm
+
+The proposed digest algorithm computes the double SHA256 of the serialization of:
+1. nVersion of the transaction (4-byte little endian)
+2. hashPrevouts (32-byte hash)
+3. hashSequence (32-byte hash)
+4. outpoint (32-byte hash + 4-byte little endian)
+5. scriptCode of the input (serialized as scripts inside CTxOuts)
+6. value of the output spent by this input (8-byte little endian)
+7. nSequence of the input (4-byte little endian)
+8. hashOutputs (32-byte hash)
+9. nLocktime of the transaction (4-byte little endian)
+10. sighash type of the signature (4-byte little endian)
+
+Items 1, 4, 7 and 9 have the same meaning as in the original algorithm[[3]](#OP_CHECKSIG).
+
+#### hashPrevouts
+
+* If the `ANYONECANPAY` flag is not set, `hashPrevouts` is the double SHA256 of the serialization of all input outpoints;
+* Otherwise, `hashPrevouts` is a `uint256` of `0x0000......0000`.
+
+#### hashSequence
+
+* If none of the `ANYONECANPAY`, `SINGLE`, `NONE` sighash type is set, `hashSequence` is the double SHA256 of the serialization of `nSequence` of all inputs;
+* Otherwise, `hashSequence` is a `uint256` of `0x0000......0000`.
+
+#### scriptCode
+
+In this section, we call `script` the script being currently executed. This means `redeemScript` in case of P2SH, or the `scriptPubKey` in the general case.
+
+* If the `script` does not contain any `OP_CODESEPARATOR`, the `scriptCode` is the `script` serialized as scripts inside `CTxOut`.
+* If the `script` contains any `OP_CODESEPARATOR`, the `scriptCode` is the `script` but removing everything up to and including the last executed `OP_CODESEPARATOR` before the signature checking opcode being executed, serialized as scripts inside `CTxOut`.
+
+Notes:
+1. Contrary to the original algorithm, this one does not use `FindAndDelete` to remove the signature from the script.
+2. Because of 1, it is not possible to create a valid signature within `redeemScript` or `scriptPubkey` as the signature would be part of the digest. This enforces that the signature is in `sigScript` .
+3. In case an opcode that requires signature checking is present in `sigScript`, `script` is effectively `sigScript`. However, for reason similar to 2, it is not possible to provide a valid signature in that case.
+
+#### value
+
+The 8-byte value of the amount of Bitcoin this input contains.
+
+#### hashOutputs
+
+* If the sighash type is neither `SINGLE` nor `NONE`, `hashOutputs` is the double SHA256 of the serialization of all output amounts (8-byte little endian) paired up with their `scriptPubKey` (serialized as scripts inside CTxOuts);
+* If sighash type is `SINGLE` and the input index is smaller than the number of outputs, `hashOutputs` is the double SHA256 of the output amount with `scriptPubKey` of the same index as the input;
+* Otherwise, `hashOutputs` is a `uint256` of `0x0000......0000`.
+
+Notes:
+1. In the original algorithm[[3]](#OP_CHECKSIG), a `uint256` of `0x0000......0001` is committed if the input index for a `SINGLE` signature is greater than or equal to the number of outputs. In this BIP a `0x0000......0000` is committed, without changing the semantics.
+
+#### sighash type
+
+The sighash type is altered to include a 24-bit *fork id* in its most significant bits.
+
+````cpp
+ ss << ((GetForkID() << 8) | nHashType);
+````
+
+This ensure that the proposed digest algorithm will generate different results on forks using different *fork ids*.
+
+## Implementation
+
+Addition to `SignatureHash` :
+
+````cpp
+ if (nHashType & SIGHASH_FORKID) {
+ uint256 hashPrevouts;
+ uint256 hashSequence;
+ uint256 hashOutputs;
+
+ if (!(nHashType & SIGHASH_ANYONECANPAY)) {
+ hashPrevouts = GetPrevoutHash(txTo);
+ }
+
+ if (!(nHashType & SIGHASH_ANYONECANPAY) &&
+ (nHashType & 0x1f) != SIGHASH_SINGLE &&
+ (nHashType & 0x1f) != SIGHASH_NONE) {
+ hashSequence = GetSequenceHash(txTo);
+ }
+
+ if ((nHashType & 0x1f) != SIGHASH_SINGLE &&
+ (nHashType & 0x1f) != SIGHASH_NONE) {
+ hashOutputs = GetOutputsHash(txTo);
+ } else if ((nHashType & 0x1f) == SIGHASH_SINGLE &&
+ nIn < txTo.vout.size()) {
+ CHashWriter ss(SER_GETHASH, 0);
+ ss << txTo.vout[nIn];
+ hashOutputs = ss.GetHash();
+ }
+
+ CHashWriter ss(SER_GETHASH, 0);
+ // Version
+ ss << txTo.nVersion;
+ // Input prevouts/nSequence (none/all, depending on flags)
+ ss << hashPrevouts;
+ ss << hashSequence;
+ // The input being signed (replacing the scriptSig with scriptCode +
+ // amount). The prevout may already be contained in hashPrevout, and the
+ // nSequence may already be contain in hashSequence.
+ ss << txTo.vin[nIn].prevout;
+ ss << static_cast<const CScriptBase &>(scriptCode);
+ ss << amount;
+ ss << txTo.vin[nIn].nSequence;
+ // Outputs (none/one/all, depending on flags)
+ ss << hashOutputs;
+ // Locktime
+ ss << txTo.nLockTime;
+ // Sighash type
+ ss << ((GetForkId() << 8) | nHashType);
+ return ss.GetHash();
+ }
+````
+
+Computation of midstates:
+
+````cpp
+uint256 GetPrevoutHash(const CTransaction &txTo) {
+ CHashWriter ss(SER_GETHASH, 0);
+ for (unsigned int n = 0; n < txTo.vin.size(); n++) {
+ ss << txTo.vin[n].prevout;
+ }
+
+ return ss.GetHash();
+}
+
+uint256 GetSequenceHash(const CTransaction &txTo) {
+ CHashWriter ss(SER_GETHASH, 0);
+ for (unsigned int n = 0; n < txTo.vin.size(); n++) {
+ ss << txTo.vin[n].nSequence;
+ }
+
+ return ss.GetHash();
+}
+
+uint256 GetOutputsHash(const CTransaction &txTo) {
+ CHashWriter ss(SER_GETHASH, 0);
+ for (unsigned int n = 0; n < txTo.vout.size(); n++) {
+ ss << txTo.vout[n];
+ }
+
+ return ss.GetHash();
+}
+````
+
+Gating code:
+
+````cpp
+ uint32_t nHashType = GetHashType(vchSig);
+ if (nHashType & SIGHASH_FORKID) {
+ if (!(flags & SCRIPT_ENABLE_SIGHASH_FORKID))
+ return set_error(serror, SCRIPT_ERR_ILLEGAL_FORKID);
+ } else {
+ // Drop the signature in scripts when SIGHASH_FORKID is not used.
+ scriptCode.FindAndDelete(CScript(vchSig));
+ }
+````
+
+## Note
+
+In the UAHF, a `fork id` of 0 is used (see [[4]](#uahfspec) REQ-6-2 NOTE 4), i.e.
+the GetForkID() function returns zero.
+In that case the code can be simplified to omit the function.
+
+## References
+
+<a name="bip143">[1]</a> https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
+
+<a name="bip143Motivation">[2]</a> https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki#Motivation
+
+<a name="OP_CHECKSIG">[3]</a> https://en.bitcoin.it/wiki/OP_CHECKSIG
+
+<a name="uahfspec">[4]</a> https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/uahf-technical-spec.md
diff --git a/doc/standards/transaction.md b/doc/standards/transaction.md
new file mode 100644
index 000000000..4a6fb4e61
--- /dev/null
+++ b/doc/standards/transaction.md
@@ -0,0 +1,235 @@
+---
+layout: specification
+title: Transaction Spec for Bitcoin Cash ⚠️ out of date ⚠️
+category: spec
+date: 2017-08-26
+activation: 1515888000
+version: 1.0
+---
+
+This section of the Bitcoin Cash (BCH) specification ("spec") documents the **transaction data structure** for implementing a compatible BCH client, including transaction format, opcodes, and examples.
+
+This spec is based on the Bitcoin ABC implementation of the [Bitcoin Cash](https://www.bitcoincash.org/) protocol.
+
+Developer resources:
+- [Bitcoin ABC source code](https://github.com/Bitcoin-ABC/bitcoin-abc)
+- [Bitcoin ABC developer documentation](https://www.bitcoinabc.org/doc/dev/)
+
+## Transaction
+A transaction is one of the two base primitives in the BCH system, the other being a block. Primitive in this context means that it is one of the data types for which the BCH spec provides built-in support.
+
+A transaction is a transfer of BCH that is broadcast to the network and collected into blocks. A transaction typically references previous transaction outputs as new transaction inputs and dedicates all input Bitcoincash values to new outputs. Transactions are not encrypted, so it is possible to browse and view every transaction ever collected into a block. Once transactions are buried under enough confirmations they can be considered irreversible.
+
+Transaction comprises a signature and redeem script pair, which provides flexibility in releasing outputs. A serialized transaction contains an input and an output.
+
+Source code: https://github.com/Bitcoin-ABC/bitcoin-abc/blob/master/src/primitives/transaction.h.
+
+### Transaction requirements
+A transaction that meets the criteria documented here is said to be standard. Standard transactions are accepted into the mempool and relayed by nodes on the network. This ensures that nodes have a similar looking mempool so that the system behave predictably. Standard transaction outputs nominate addresses, and the redemption of any future inputs requires a relevant signature.
+
+Transaction requirements:
+- Transaction size: < 100k
+- Version must be 1 or 2
+- Signature script must be data push only
+- Script size must be 1650 or less
+
+NOTE: A BCH node should be able to process non-standard transactions as well. Even if a node cannot successfully relay a non-standard transaction, it should not crash if it ends up having to process one of those transactions.
+
+### Transaction Input
+Inputs to a transaction include the outpoint, signature script, and sequence.
+
+An input is a reference to an output from a previous transaction. Multiple inputs are often listed in a transaction. All of the new transaction's input values (that is, the total coin value of the previous outputs referenced by the new transaction's inputs) are added up, and the total (less any transaction fee) is completely used by the outputs of the new transaction. Previous tx is a hash of a previous transaction. Index is the specific output in the referenced transaction. scriptSig is the first half of a script (discussed in more detail later).
+
+### Transaction Output
+Outputs from a transaction include the BCH amount and redeem script which is used to spend the output and sets up parameters for the signature script. Redeem scripts should not use OP_CODES.
+
+## OpCodes
+The opcodes used in the pubkey scripts of standard transactions are as follows.
+
+See also the source code: https://github.com/Bitcoin-ABC/bitcoin-abc/blob/master/src/script/script.h.
+
+### 0x00 to 0x4e
+There are arious data pushing opcodes from 0x00 to 0x4e (1--78) that must be used must be used to push signatures and public keys onto the stack.
+
+### OP_TRUE/OP_1, OP_2 through OP_16
+`OP_TRUE`/`OP_1` (0x51) and `OP_2` through `OP_16` (0x52--0x60) push the values 1 through 16 to the stack.
+
+### OP_CHECKSIG
+`OP_CHECKSIG` consumes a signature and a full public key, and pushes true onto the stack if the transaction data specified by the SIGHASH flag was converted into the signature using the same ECDSA private key that generated the public key. Otherwise, it pushes false onto the stack.
+
+### OP_DUP
+`OP_DUP` pushes a copy of the topmost stack item on to the stack.
+
+### OP_HASH160
+`OP_HASH160` consumes the topmost item on the stack, computes the RIPEMD160(SHA256()) hash of that item, and pushes that hash onto the stack.
+
+### OP_EQUAL
+`OP_EQUAL` consumes the top two items on the stack, compares them, and pushes true onto the stack if they are the same, false if not.
+
+### OP_VERIFY
+`OP_VERIFY` consumes the topmost item on the stack. If that item is zero (false) it terminates the script in failure.
+
+### OP_EQUALVERIFY
+`OP_EQUALVERIFY` runs `OP_EQUAL` and then `OP_VERIFY` in sequence.
+
+### OP_CHECKMULTISIG
+`OP_CHECKMULTISIG` consumes the value (n) at the top of the stack, consumes that many of the next stack levels (public keys), consumes the value (m) now at the top of the stack, and consumes that many of the next values (signatures) plus one extra value.
+
+The "one extra value" it consumes is the result of an off-by-one error in the original Bitcoin implementation. This value is not used in Bitcoincash, so signature scripts prefix the list of secp256k1 signatures with a single OP_0 (0x00).
+
+`OP_CHECKMULTISIG` compares the first signature against each public key until it finds an ECDSA match. Starting with the subsequent public key, it compares the second signature against each remaining public key until it finds an ECDSA match. The process is repeated until all signatures have been checked or not enough public keys remain to produce a successful result.
+
+Because public keys are not checked again if they fail any signature comparison, signatures must be placed in the signature script using the same order as their corresponding public keys were placed in the pubkey script or redeem script.
+
+The `OP_CHECKMULTISIG` verification process requires that signatures in the signature script be provided in the same order as their corresponding public keys in the pubkey script or redeem script.
+
+### OP_RETURN
+`OP_RETURN` terminates the script in failure when executed.
+
+## Address Conversion
+The hashes used in P2PKH and P2SH outputs are commonly encoded as Bitcoincash addresses. This is the procedure to encode those hashes and decode the addresses.
+
+First, get your hash. For P2PKH, you RIPEMD-160(SHA256()) hash a ECDSA public key derived from your 256-bit ECDSA private key (random data). For P2SH, you RIPEMD-160(SHA256()) hash a redeem script serialized in the format used in raw transactions.
+
+Taking the resulting hash:
+
+1. Add an address version byte in front of the hash. The version bytes commonly used by Bitcoincash are:
+
+ * 0x00 for P2PKH addresses on the main Bitcoincash network (mainnet)
+
+ * 0x6f for P2PKH addresses on the Bitcoincash testing network (testnet)
+
+ * 0x05 for P2SH addresses on mainnet
+
+ * 0xc4 for P2SH addresses on testnet
+
+2. Create a copy of the version and hash; then hash that twice with SHA256: `SHA256(SHA256(version . hash))`
+
+3. Extract the first four bytes from the double-hashed copy. These are used as a checksum to ensure the base hash gets transmitted
+ correctly.
+
+4. Append the checksum to the version and hash, and encode it as a base58 string: <!--[-->`BASE58(version . hash . checksum)`<!--]-->
+
+The code can be traced using the [base58 header file][core base58.h].
+
+To convert addresses back into hashes, reverse the base58 encoding, extract the checksum, repeat the steps to create the checksum and compare it against the extracted checksum, and then remove the version byte.
+
+## Raw Transaction Format
+Bitcoincash transactions are broadcast between peers in a serialized byte format, called raw format. It is this form of a transaction which is SHA256(SHA256()) hashed to create the TXID and, ultimately, the merkle root of a block containing the transaction---making the transaction format part of the consensus rules.
+
+Bitcoincash Core and many other tools print and accept raw transactions encoded as hex.
+
+A raw transaction has the following top-level format:
+
+| Bytes | Name | Data Type | Description
+|----------|--------------|---------------------|-------------
+| 4 | version | `uint32_t` | Transaction version number; currently version 1. Programs creating transactions using newer consensus rules may use higher version numbers.
+| *Varies* | tx_in count | `compactSize uint` | Number of inputs in this transaction.
+| *Varies* | tx_in | `txIn` | Transaction inputs. See description of txIn below.
+| *Varies* | tx_out count | `compactSize uint` | Number of outputs in this transaction.
+| *Varies* | tx_out | `txOut` | Transaction outputs. See description of txOut below.
+| 4 | lock_time | `uint32_t` | A time (Unix epoch time) or block number.
+
+A transaction may have multiple inputs and outputs, so the txIn and txOut structures may recur within a transaction. CompactSize unsigned
+integers are a form of variable-length integers; they are described in CompactSize unsigned integer.
+
+## TxIn: Transaction Input
+Each non-coinbase input spends an outpoint from a previous transaction.
+
+| Bytes | Name | Data Type | Description
+|----------|------------------|----------------------|--------------
+| 36 | previous_output | outpoint | The previous outpoint being spent. See description of outpoint below.
+| *Varies* | script bytes | compactSize uint | The number of bytes in the signature script. Maximum is 10,000 bytes.
+| *Varies* | signature script | char[] | Script that satisfies conditions in the outpoint's pubkey script. Should only contain data pushes.
+| 4 | sequence | uint32_t | Sequence number. Default is 0xffffffff.
+
+## Outpoint
+The outpoint is a reference to an output from a previous transaction. Because a single transaction can include multiple outputs, the outpoint structure includes both a TXID and an output index number to refer to the specific part of a specific output.
+
+| Bytes | Name | Data Type | Description
+|-------|-------|-----------|--------------
+| 32 | hash | char[32] | The TXID of the transaction holding the output to spend. The TXID is a hash provided here in internal byte order.
+| 4 | index | uint32_t | The output index number of the specific output to spend from the transaction. The first output is 0x00000000.
+
+## TxOut: Transaction Output
+Each output spends a certain number of Satoshis, placing them under control of anyone who can satisfy the provided pubkey script.
+
+| Bytes | Name | Data Type | Description
+|----------|-----------------|------------------|--------------
+| 8 | value | int64_t | Number of Satoshis to spend. May be zero; the sum of all outputs may not exceed the sum of Satoshis previously spent to the outpoints provided in the input section. (Exception: coinbase transactions spend the block subsidy and collected transaction fees.)
+| 1+ | pk_script bytes | compactSize uint | Number of bytes in the pubkey script. Maximum is 10,000 bytes.
+| *Varies* | pk_script | char[] | Defines the conditions which must be satisfied to spend this output.
+
+## CompactSize Unsigned Integers
+The raw transaction format and several peer-to-peer network messages use a type of variable-length integer to indicate the number of bytes in a following piece of data.
+
+The source code and this document refers to these variable length integers as compactSize. Because it's used in the transaction format, the format of compactSize unsigned integers is part of the consensus rules.
+
+For numbers from 0 to 252, compactSize unsigned integers look like regular unsigned integers. For other numbers up to 0xffffffffffffffff, a byte is prefixed to the number to indicate its length---but otherwise the numbers look like regular unsigned integers in little-endian order. For example, the number 515 is encoded as 0xfd0302.
+
+| Value | Bytes Used | Format
+|-----------------------------------------|------------|-----------------------------------------
+| >= 0 && <= 252 | 1 | uint8_t
+| >= 253 && <= 0xffff | 3 | 0xfd followed by the number as uint16_t
+| >= 0x10000 && <= 0xffffffff | 5 | 0xfe followed by the number as uint32_t
+| >= 0x100000000 && <= 0xffffffffffffffff | 9 | 0xff followed by the number as uint64_t
+
+## Signature Script
+The purpose of the signature script (scriptSig) is to ensure that the spender is a legitimate spender, that is, evidence of private key held.
+
+The scriptSig contains two components: a signature and a public key. The public key must match the hash given in the script of the redeemed output. The public key is used to verify the redeemers signature, which is the second component. More precisely, the second component is an ECDSA signature over a hash of a simplified version of the transaction. It, combined with the public key, proves the transaction was created by the real owner of the address in question.
+
+Signature scripts are not signed, so anyone can modify them. This means signature scripts should only contain data and data-pushing opcodes which can't be modified without causing the pubkey script to fail. Placing non-data-pushing opcodes in the signature script currently makes a transaction non-standard, and future consensus rules may forbid such transactions altogether. (Non-data-pushing opcodes are already forbidden in signature scripts when spending a P2SH pubkey script.)
+
+## Sequence
+
+Check lock time verify (s4)
+
+Check sequence verify (s4)
+
+## Standard Transaction Format Examples
+
+### P2SH
+
+ 23-bytes
+ OP_HASH160
+ <reedem script hash>
+ OP_EQUAL
+ Use address version=1 and hash=<reedem script hash>
+
+### P2PKH
+
+ 25 bytes
+ OP_DUP
+ OP_HASH160
+ <public key hash>
+ OP_EQUALVERIFY
+ OP_CHECKSIG
+ Use address version=0 and hash=<public key hash>
+
+### P2PK
+
+ 35 or 67 bytes
+ <public key>
+ OP_CHECKSIG
+ Use address version=0 and hash=HASH160(<public key>)
+
+### Bare multisig
+
+ <n: [0-20]>
+ <pubkey 0>
+ …
+ <pubkey n>
+ <(null)>
+ OP_CHECKMULTISIG
+
+NOTE: Bare multisig (which isn't wrapped into P2SH) is limited to 3-of-3.
+
+### Data carrier
+
+ Limited to one per transaction
+ Limited to 223 bytes
+ OP_RETURN
+ <push data>
+
+NOTE: Multiple pushes of data are allowed.
diff --git a/doc/standards/uahf-technical-spec.md b/doc/standards/uahf-technical-spec.md
new file mode 100644
index 000000000..f3a408ad7
--- /dev/null
+++ b/doc/standards/uahf-technical-spec.md
@@ -0,0 +1,284 @@
+---
+layout: specification
+title: UAHF Technical Specification
+category: spec
+date: 2017-07-24
+activation: 1501590000
+version: 1.6
+---
+
+## Introduction
+
+This document describes proposed requirements for a block size Hard Fork (HF).
+
+BUIP 55 specified a block height fork. This UAHF specification is
+inspired by the idea of a flag day, but changed to a time-based fork due
+to miner requests. It should be possible to change easily to a height-based
+fork - the sense of the requirements would largely stay the same.
+
+
+## Definitions
+
+MTP: the "median time past" value of a block, calculated from its nTime
+value, and the nTime values of its up to 10 immediate ancestors.
+
+"activation time": once the MTP of the chain tip is equal to or greater
+than this time, the next block must be a valid fork block. The fork block
+and subsequent blocks built on it must satisfy the new consensus rules.
+
+"fork block": the first block built on top of a chain tip whose MTP is
+greater than or equal to the activation time.
+
+"fork EB": the user-specified value that EB shall be set to at
+activation time. EB can be adjusted post-activation by the user.
+
+"fork MG": the user-specified value that MG shall be set to at activation
+time. It must be > 1MB. The user can adjust MG to any value once the
+fork has occurred (not limited to > 1MB after the fork).
+
+"Large block" means a block satisfying 1,000,000 bytes < block
+size <= EB, where EB is as adjusted by REQ-4-1 and a regular block
+is a block up to 1,000,000 bytes in size.
+
+"Core rules" means all blocks <= 1,000,000 bytes (Base block size).
+
+"Extended BU tx/sigops rules" means the existing additional consensus rules (1) and
+(2) below, as formalized by BUIP040 [1] and used by the Bitcoin Unlimited
+client's excessive checks for blocks larger than 1MB, extended with rule
+(3) below:
+1. maximum sigops per block is calculated based on the actual size of
+a block using
+max_block_sigops = 20000 * ceil((max(blocksize, 1000000) / 1000000))
+2. maximum allowed size of a single transaction is 1,000,000 bytes (1MB)
+3. maximum allowed number of sigops for a single transaction is 20k .
+
+NOTE 1: In plain English, the maximum allowed sigops per block is
+20K sigops per the size of the block, rounded up to nearest integer in MB.
+i.e. 20K if <= 1MB, 40K for the blocks > 1MB and up to 2MB, etc.
+
+
+## Requirements
+
+### REQ-1 (fork by default)
+
+The client (with UAHF implementation) shall default to activating
+a hard fork with new consensus rules as specified by the remaining
+requirements.
+
+RATIONALE: It is better to make the HF active by default in a
+special HF release version. Users have to download a version capable
+of HF anyway, it is more convenient for them if the default does not
+require them to make additional configuration.
+
+NOTE 1: It will be possible to disable the fork behavior (see
+REQ-DISABLE)
+
+
+### REQ-2 (configurable activation time)
+
+The client shall allow a "activation time" to be configured by the user,
+with a default value of 1501590000 (epoch time corresponding to Tue
+1 Aug 2017 12:20:00 UTC)
+
+RATIONALE: Make it configurable to adapt easily to UASF activation
+time changes.
+
+NOTE 1: Configuring a "activation time" value of zero (0) shall disable
+any UAHF hard fork special rules (see REQ-DISABLE)
+
+
+### REQ-3 (fork block must be > 1MB)
+
+The client shall enforce a block size larger than 1,000,000 bytes
+for the fork block.
+
+RATIONALE: This enforces the hard fork from the original 1MB
+chain and prevents a re-organization of the forked chain to
+the original chain.
+
+
+### REQ-4-1 (require "fork EB" configured to at least 8MB at startup)
+
+If UAHF is not disabled (see REQ-DISABLE), the client shall enforce
+that the "fork EB" is configured to at least 8,000,000 (bytes) by raising
+an error during startup requesting the user to ensure adequate configuration.
+
+RATIONALE: Users need to be able to run with their usual EB prior to the
+fork (e.g. some are running EB1 currently). The fork code needs to adjust
+this EB automatically to a > 1MB value. 8MB is chosen as a minimum since
+miners have indicated in the past that they would be willing to support
+such a size, and the current network is capable of handling it.
+
+
+### REQ-4-2 (require user to specify suitable *new* MG at startup)
+
+If UAHF is not disabled (see REQ-DISABLE), the client shall require
+the user to specify a "fork MG" (mining generation size) greater than
+1,000,000 bytes.
+
+RATIONALE: This ensures a suitable MG is set at the activation time so
+that a mining node would produce a fork block compatible with REQ-3.
+It also forces the user (miner) to decide on what size blocks they want to
+produce immediately after the fork.
+
+NOTE 1: The DEFAULT_MAX_GENERATED_BLOCK_SIZE in the released client needs
+to remain 1,000,000 bytes so that the client will not generate invalid
+blocks before the fork activates. At activation time, however, the "fork MG"
+specified by the user (default: 2MB) will take effect.
+
+
+### REQ-5 (max tx / max block sigops rules for blocks > 1 MB)
+
+Blocks larger than 1,000,000 shall be subject to "Extended BU tx/sigops rules"
+as follows:
+
+1. maximum sigops per block shall be calculated based on the actual size of
+a block using
+`max_block_sigops = 20000 * ceil((max(blocksize_bytes, 1000000) / 1000000))`
+
+2. maximum allowed size of a single transaction shall be 1,000,000 bytes
+
+NOTE 1: Blocks up to and including 1,000,000 bytes in size shall be subject
+to existing pre-fork Bitcoin consensus rules.
+
+NOTE 2: Transactions exceeding 100,000 bytes (100KB) shall remain
+non-standard after the activation time, meaning they will not be relayed.
+
+NOTE 3: BU treats both rules (1) and (2) as falling under the Emergent
+Consensus rules (AD). Other clients may choose to implement them as
+firm rules at their own risk.
+
+
+### REQ-6-1 (disallow special OP_RETURN-marked transactions with sunset clause)
+
+Once the fork has activated, transactions consisting exclusively of a single OP_RETURN output, followed by a single minimally-coded data push with the specific magic data value of
+
+ Bitcoin: A Peer-to-Peer Electronic Cash System
+
+(46 characters, including the single spaces separating the words, and
+without any terminating null character) shall be considered invalid until
+block 530,000 inclusive.
+
+RATIONALE: (DEPRECATED - see NOTE 2) To give users on the legacy chain (or other fork chains)
+an opt-in way to exclude their transactions from processing on the UAHF
+fork chain. The sunset clause block height is calculated as approximately
+1 year after currently planned UASF activation time (Aug 1 2017 00:00:00 GMT),
+rounded down to a human friendly number.
+
+NOTE 1: Transactions with such OP_RETURNs shall be considered valid again
+for block 530,001 and onwards.
+
+NOTE 2: With the changes in v1.6 of this specification, mandatory use
+of SIGHASH_FORKID replay protection on UAHF chain makes the use of this
+opt-out protection unnecessary. Clients should nevertheless implement this
+requirement, as removing it would constitute a hard fork vis-a-vis the
+existing network. The sunset clause in this requirement will take care
+of its expiry by itself.
+
+
+### REQ-6-2 (mandatory signature shift via hash type)
+
+Once the fork has activated, a transaction shall be deemed valid only if
+the following are true in combination:
+- its nHashType has bit 6 set (SIGHASH_FORKID, mask 0x40)
+- a magic 'fork id' value is added to the nHashType before the hash is
+ calculated (see note 4)
+- it is digested using the new algorithm described in REQ-6-3
+
+RATIONALE: To provide strong protection against replay of existing
+transactions on the UAHF chain, only transactions signed with the new
+hash algorithm and having SIGHASH_FORKID set will be accepted, by consensus.
+
+NOTE 1: It is possible for other hard forks to allow SIGHASH_FORKID-protected
+transactions on their chain by implementing a compatible signature.
+However, this does require a counter hard fork by legacy chains.
+
+NOTE 2: (DEPRECATED) ~~The client shall still accept transactions whose signatures~~
+~~verify according to pre-fork rules, subject to the additional OP_RETURN~~
+~~constraint introduced by REQ-6-1.~~
+
+NOTE 3: (DEPRECATED) ~~If bit 6 is not set, only the unmodified nHashType will be used~~
+~~to compute the hash and verify the signature.~~
+
+NOTE 4: The magic 'fork id' value used by UAHF-compatible clients is zero.
+This means that the change in hash when bit 6 is set is effected only by
+the adapted signing algorithm (see REQ-6-3).
+
+NOTE 5: See also REQ-6-4 which introduces a requirement for use of
+SCRIPT_VERIFY_STRICTENC.
+
+
+### REQ-6-3 (use adapted BIP143 hash algorithm for protected transactions)
+
+Once the fork has activated, any transaction that has bit 6 set in its
+hash type shall have its signature hash computed using a minimally revised
+form of the transaction digest algorithm specified in BIP143.
+
+RATIONALE: see Motivation section of BIP143 [2].
+
+NOTE 1: refer to [3] for the specificaton of the revised transaction
+digest based on BIP143. Revisions were made to account for non-Segwit
+deployment.
+
+
+### REQ-6-4 (mandatory use of SCRIPT_VERIFY_STRICTENC)
+
+Once the fork has activated, transactions shall be validated with
+SCRIPT_VERIFY_STRICTENC flag set.
+
+RATIONALE: Use of SCRIPT_VERIFY_STRICTENC also ensures that the
+nHashType is validated properly.
+
+NOTE: As SCRIPT_VERIFY_STRICTENC is not clearly defined by BIP,
+implementations seeking to be compliant should consult the Bitcoin C++
+source code to emulate the checks enforced by this flag.
+
+
+### REQ-7 Difficulty adjustement in case of hashrate drop
+
+In case the MTP of the tip of the chain is 12h or more after the MTP 6 block
+before the tip, the proof of work target is increased by a quarter, or 25%,
+which corresponds to a difficulty reduction of 20% .
+
+RATIONALE: The hashrate supporting the chain is dependent on market price and
+hard to predict. In order to make sure the chain remains viable no matter what
+difficulty needs to adjust down in case of abrupt hashrate drop.
+
+### REQ-DISABLE (disable fork by setting fork time to 0)
+
+If the activation time is configured to 0, the client shall not enforce
+the new consensus rules of UAHF, including the activation of the fork,
+the size constraint at a certain time, and the enforcing of EB/AD
+constraints at startup.
+
+RATIONALE: To make it possible to use such a release as a compatible
+client with legacy chain / i.e. to decide to not follow the HF on one's
+node / make a decision at late stage without needing to change client.
+
+
+### OPT-SERVICEBIT (NODE_BITCOIN_CASH service bit)
+
+A UAHF-compatible client should set service bit 5 (value 0x20).
+
+RATIONALE: This service bit allows signaling that the node is a UAHF
+supporting node, which helps DNS seeders distinguish UAHF implementations.
+
+NOTE 1: This is an optional feature which clients do not strictly have to
+implement.
+
+NOTE 2: This bit is currently referred to as NODE_BITCOIN_CASH and displayed
+as "CASH" in user interfaces of some Bitcoin clients (BU, ABC).
+
+
+## References
+
+[1] https://bitco.in/forum/threads/buip040-passed-emergent-consensus-parameters-and-defaults-for-large-1mb-blocks.1643/
+
+[2] https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki#Motivation
+
+[3] [Digest for replay protected signature verification accross hard forks](replay-protected-sighash.md)
+
+[4] https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/uahf-test-plan.md
+
+
+END
diff --git a/doc/standards/uahf-test-plan.md b/doc/standards/uahf-test-plan.md
new file mode 100644
index 000000000..e82e4e9c4
--- /dev/null
+++ b/doc/standards/uahf-test-plan.md
@@ -0,0 +1,109 @@
+---
+layout: specification
+title: UAHF Test Plan
+category: spec
+date: 2017-06-14
+activation: 1501590000
+version: 1.0
+---
+
+## Introduction
+
+This document is a rough draft of tests planned for UAHF as described
+in the UAHF Technical Specification [1].
+
+Test cases listed below are currenty incomplete w.r.t. the revised
+technical specification - this document is very much under construction.
+
+
+## Functional Tests
+
+### TEST-1
+
+If UAHF is disabled a large block is considered to break core rules,
+as is presently the case.
+
+
+### TEST-2
+
+If UAHF is disabled, a regular block is accepted at or after the
+activation time (as determined by MTP(block.parent) without being
+considered invalid.
+
+
+### TEST-3
+
+If enabled, a large block is considered excessive if all blocks
+have time < activation time.
+
+
+### TEST-4
+
+If enabled, a large block B is not considered excessive if
+MTP(B.parent) >= activation time
+
+
+### TEST-5
+
+If enabled, a large block B is not considered excessive if
+MTP(B.parent) < activation time, provided a prior block A has
+MTP(A.parent) >= activation time.
+
+
+### TEST-6
+
+If enabled, a regular block R that is the first such that
+MTP(R.parent) >= activation time is considered invalid (satisfy REQ-3).
+
+
+### TEST-7
+
+If enabled, a regular block R that is not the first such that
+MTP(R.parent) >= activation time is considered valid.
+
+
+### TEST-8
+
+A small block more-work chain does not get re-orged to from a big
+block chain after activation has kicked in.
+
+
+### TEST-9
+
+Test that enabling after being disabled renders a small chain going
+past activation invalid that was previously valid owing to it being
+disabled. And vice versa (enabled -> disabled).
+
+
+### TEST-10
+
+If enabled, if a large but < 8MB block is produced, ensure that the
+degenerate case of sigops heavy instructions does not unduly affect
+validation times above and beyond the standard expected if UAHF
+is not enabled.
+
+
+### TEST-11
+
+Test that linear scaling of 20,000 sigops / MB works for block
+sizes > 1MB (rounding block size up to nearest MB) (ref. BUIP040).
+
+
+### TEST-12
+
+(similar to (TEST-9), but test interoperability of datadir with other
+clients.)
+
+Test what happens when the unmodified BU / Core / other clients are
+used on a datadir where the UAHF client has been run. Should
+test again data from disabled (Core rules data, should be fine) ,
+and enabled (big block data stored - may need to rebuild DB? or
+provide tool to truncate the data back to pre-fork block?)
+
+
+## References
+
+[1] https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/uahf-technical-spec.md
+
+
+END

File Metadata

Mime Type
text/x-diff
Expires
Thu, May 22, 03:06 (15 h, 1 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5866351
Default Alt Text
(218 KB)

Event Timeline