Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F14362776
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
107 KB
Subscribers
None
View Options
diff --git a/src/rpc/avalanche.cpp b/src/rpc/avalanche.cpp
index d1eefcf66..45fc20cb3 100644
--- a/src/rpc/avalanche.cpp
+++ b/src/rpc/avalanche.cpp
@@ -1,1087 +1,1102 @@
// Copyright (c) 2020 The Bitcoin developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <avalanche/avalanche.h>
#include <avalanche/delegation.h>
#include <avalanche/delegationbuilder.h>
#include <avalanche/peermanager.h>
#include <avalanche/processor.h>
#include <avalanche/proof.h>
#include <avalanche/proofbuilder.h>
#include <avalanche/validation.h>
#include <config.h>
#include <core_io.h>
#include <key_io.h>
#include <net_processing.h>
#include <node/context.h>
#include <rpc/blockchain.h>
#include <rpc/server.h>
#include <rpc/util.h>
#include <util/strencodings.h>
#include <util/translation.h>
#include <univalue.h>
static RPCHelpMan getavalanchekey() {
return RPCHelpMan{
"getavalanchekey",
"Returns the key used to sign avalanche messages.\n",
{},
RPCResult{RPCResult::Type::STR_HEX, "", ""},
RPCExamples{HelpExampleRpc("getavalanchekey", "")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
if (!g_avalanche) {
throw JSONRPCError(RPC_INTERNAL_ERROR,
"Avalanche is not initialized");
}
return HexStr(g_avalanche->getSessionPubKey());
},
};
}
static CPubKey ParsePubKey(const UniValue ¶m) {
const std::string keyHex = param.get_str();
if ((keyHex.length() != 2 * CPubKey::COMPRESSED_SIZE &&
keyHex.length() != 2 * CPubKey::SIZE) ||
!IsHex(keyHex)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
strprintf("Invalid public key: %s\n", keyHex));
}
return HexToPubKey(keyHex);
}
static bool registerProofIfNeeded(avalanche::ProofRef proof) {
return g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
return pm.getProof(proof->getId()) ||
pm.registerProof(std::move(proof));
});
}
static void verifyDelegationOrThrow(avalanche::Delegation &dg,
const std::string &dgHex, CPubKey &auth) {
bilingual_str error;
if (!avalanche::Delegation::FromHex(dg, dgHex, error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
avalanche::DelegationState state;
if (!dg.verify(state, auth)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"The delegation is invalid: " + state.ToString());
}
}
static void verifyProofOrThrow(const NodeContext &node, avalanche::Proof &proof,
const std::string &proofHex) {
bilingual_str error;
if (!avalanche::Proof::FromHex(proof, proofHex, error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
avalanche::ProofValidationState state;
{
LOCK(cs_main);
if (!proof.verify(state, *Assert(node.chainman))) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"The proof is invalid: " + state.ToString());
}
}
}
static RPCHelpMan addavalanchenode() {
return RPCHelpMan{
"addavalanchenode",
"Add a node in the set of peers to poll for avalanche.\n",
{
{"nodeid", RPCArg::Type::NUM, RPCArg::Optional::NO,
"Node to be added to avalanche."},
{"publickey", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The public key of the node."},
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"Proof that the node is not a sybil."},
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"The proof delegation the the node public key"},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the addition succeeded or not."},
RPCExamples{
HelpExampleRpc("addavalanchenode", "5, \"<pubkey>\", \"<proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
RPCTypeCheck(request.params,
{UniValue::VNUM, UniValue::VSTR, UniValue::VSTR});
if (!g_avalanche) {
throw JSONRPCError(RPC_INTERNAL_ERROR,
"Avalanche is not initialized");
}
const NodeId nodeid = request.params[0].get_int64();
CPubKey key = ParsePubKey(request.params[1]);
auto proof = RCUPtr<avalanche::Proof>::make();
NodeContext &node = EnsureAnyNodeContext(request.context);
verifyProofOrThrow(node, *proof, request.params[2].get_str());
const avalanche::ProofId &proofid = proof->getId();
if (key != proof->getMaster()) {
if (request.params.size() < 4 || request.params[3].isNull()) {
throw JSONRPCError(
RPC_INVALID_ADDRESS_OR_KEY,
"The public key does not match the proof");
}
avalanche::Delegation dg;
CPubKey auth;
verifyDelegationOrThrow(dg, request.params[3].get_str(), auth);
if (dg.getProofId() != proofid) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
"The delegation does not match the proof");
}
if (key != auth) {
throw JSONRPCError(
RPC_INVALID_ADDRESS_OR_KEY,
"The public key does not match the delegation");
}
}
if (!registerProofIfNeeded(proof)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"The proof has conflicting utxos");
}
if (!node.connman->ForNode(nodeid, [&](CNode *pnode) {
// FIXME This is not thread safe, and might cause issues if
// the unlikely event the peer sends an avahello message at
// the same time.
if (!pnode->m_avalanche_state) {
pnode->m_avalanche_state =
std::make_unique<CNode::AvalancheState>();
}
pnode->m_avalanche_state->pubkey = std::move(key);
return true;
})) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
strprintf("The node does not exist: %d", nodeid));
;
}
return g_avalanche->withPeerManager(
[&](avalanche::PeerManager &pm) {
if (!pm.addNode(nodeid, proofid)) {
return false;
}
pm.addUnbroadcastProof(proofid);
return true;
});
},
};
}
static RPCHelpMan buildavalancheproof() {
return RPCHelpMan{
"buildavalancheproof",
"Build a proof for avalanche's sybil resistance.\n",
{
{"sequence", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The proof's sequence"},
{"expiration", RPCArg::Type::NUM, RPCArg::Optional::NO,
"A timestamp indicating when the proof expire"},
{"master", RPCArg::Type::STR, RPCArg::Optional::NO,
"The master private key in base58-encoding"},
{
"stakes",
RPCArg::Type::ARR,
RPCArg::Optional::NO,
"The stakes to be signed and associated private keys",
{
{
"stake",
RPCArg::Type::OBJ,
RPCArg::Optional::NO,
"A stake to be attached to this proof",
{
{"txid", RPCArg::Type::STR_HEX,
RPCArg::Optional::NO, "The transaction id"},
{"vout", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The output number"},
{"amount", RPCArg::Type::AMOUNT,
RPCArg::Optional::NO, "The amount in this UTXO"},
{"height", RPCArg::Type::NUM, RPCArg::Optional::NO,
"The height at which this UTXO was mined"},
{"iscoinbase", RPCArg::Type::BOOL,
/* default */ "false",
"Indicate wether the UTXO is a coinbase"},
{"privatekey", RPCArg::Type::STR,
RPCArg::Optional::NO,
"private key in base58-encoding"},
},
},
},
},
{"payoutAddress", RPCArg::Type::STR, RPCArg::Optional::OMITTED,
"A payout address (not required for legacy proofs)"},
},
RPCResult{RPCResult::Type::STR_HEX, "proof",
"A string that is a serialized, hex-encoded proof data."},
RPCExamples{HelpExampleRpc("buildavalancheproof",
"0 1234567800 \"<master>\" []")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
RPCTypeCheck(request.params, {UniValue::VNUM, UniValue::VNUM,
UniValue::VSTR, UniValue::VARR});
const uint64_t sequence = request.params[0].get_int64();
const int64_t expiration = request.params[1].get_int64();
CKey masterKey = DecodeSecret(request.params[2].get_str());
if (!masterKey.IsValid()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid master key");
}
CTxDestination payoutAddress = CNoDestination();
if (!avalanche::Proof::useLegacy(gArgs)) {
if (request.params[4].isNull()) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"A payout address is required if "
"`-legacyavaproof` is false");
}
payoutAddress = DecodeDestination(request.params[4].get_str(),
config.GetChainParams());
if (!IsValidDestination(payoutAddress)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Invalid payout address");
}
}
avalanche::ProofBuilder pb(sequence, expiration, masterKey,
GetScriptForDestination(payoutAddress));
const UniValue &stakes = request.params[3].get_array();
for (size_t i = 0; i < stakes.size(); i++) {
const UniValue &stake = stakes[i];
RPCTypeCheckObj(
stake,
{
{"txid", UniValue::VSTR},
{"vout", UniValue::VNUM},
// "amount" is also required but check is done below
// due to UniValue::VNUM erroneously not accepting
// quoted numerics (which are valid JSON)
{"height", UniValue::VNUM},
{"privatekey", UniValue::VSTR},
});
int nOut = find_value(stake, "vout").get_int();
if (nOut < 0) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
"vout cannot be negative");
}
const int height = find_value(stake, "height").get_int();
if (height < 1) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR,
"height must be positive");
}
const TxId txid(ParseHashO(stake, "txid"));
const COutPoint utxo(txid, nOut);
if (!stake.exists("amount")) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Missing amount");
}
const Amount amount =
AmountFromValue(find_value(stake, "amount"));
const UniValue &iscbparam = find_value(stake, "iscoinbase");
const bool iscoinbase =
iscbparam.isNull() ? false : iscbparam.get_bool();
CKey key =
DecodeSecret(find_value(stake, "privatekey").get_str());
if (!key.IsValid()) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Invalid private key");
}
if (!pb.addUTXO(utxo, amount, uint32_t(height), iscoinbase,
std::move(key))) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Duplicated stake");
}
}
const avalanche::ProofRef proof = pb.build();
return proof->ToHex();
},
};
}
static RPCHelpMan decodeavalancheproof() {
return RPCHelpMan{
"decodeavalancheproof",
"Convert a serialized, hex-encoded proof, into JSON object. "
"The validity of the proof is not verified.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The proof hex string"},
},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::NUM, "sequence",
"The proof's sequential number"},
{RPCResult::Type::NUM, "expiration",
"A timestamp indicating when the proof expires"},
{RPCResult::Type::STR_HEX, "master", "The master public key"},
{RPCResult::Type::STR, "signature",
"The proof signature (base64 encoded). Not available when "
"-legacyavaproof is enabled."},
{RPCResult::Type::OBJ,
"payoutscript",
"The proof payout script. Always empty when -legacyavaproof "
"is enabled.",
{
{RPCResult::Type::STR, "asm", "Decoded payout script"},
{RPCResult::Type::STR_HEX, "hex",
"Raw payout script in hex format"},
{RPCResult::Type::STR, "type",
"The output type (e.g. " + GetAllOutputTypes() + ")"},
{RPCResult::Type::NUM, "reqSigs",
"The required signatures"},
{RPCResult::Type::ARR,
"addresses",
"",
{
{RPCResult::Type::STR, "address", "eCash address"},
}},
}},
{RPCResult::Type::STR_HEX, "limitedid",
"A hash of the proof data excluding the master key."},
{RPCResult::Type::STR_HEX, "proofid",
"A hash of the limitedid and master key."},
{RPCResult::Type::STR_AMOUNT, "staked_amount",
"The total staked amount of this proof in " +
Currency::get().ticker + "."},
{RPCResult::Type::NUM, "score", "The score of this proof."},
{RPCResult::Type::ARR,
"stakes",
"",
{
{RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::STR_HEX, "txid",
"The transaction id"},
{RPCResult::Type::NUM, "vout", "The output number"},
{RPCResult::Type::STR_AMOUNT, "amount",
"The amount in this UTXO"},
{RPCResult::Type::NUM, "height",
"The height at which this UTXO was mined"},
{RPCResult::Type::BOOL, "iscoinbase",
"Indicate whether the UTXO is a coinbase"},
{RPCResult::Type::STR_HEX, "pubkey",
"This UTXO's public key"},
{RPCResult::Type::STR, "signature",
"Signature of the proofid with this UTXO's private "
"key (base64 encoded)"},
}},
}},
}},
RPCExamples{HelpExampleCli("decodeavalancheproof", "\"<hex proof>\"") +
HelpExampleRpc("decodeavalancheproof", "\"<hex proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
RPCTypeCheck(request.params, {UniValue::VSTR});
avalanche::Proof proof;
bilingual_str error;
if (!avalanche::Proof::FromHex(proof, request.params[0].get_str(),
error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
UniValue result(UniValue::VOBJ);
result.pushKV("sequence", proof.getSequence());
result.pushKV("expiration", proof.getExpirationTime());
result.pushKV("master", HexStr(proof.getMaster()));
const auto signature = proof.getSignature();
if (signature) {
result.pushKV("signature", EncodeBase64(*signature));
}
const auto payoutScript = proof.getPayoutScript();
UniValue payoutScriptObj(UniValue::VOBJ);
ScriptPubKeyToUniv(payoutScript, payoutScriptObj,
/* fIncludeHex */ true);
result.pushKV("payoutscript", payoutScriptObj);
result.pushKV("limitedid", proof.getLimitedId().ToString());
result.pushKV("proofid", proof.getId().ToString());
result.pushKV("staked_amount", proof.getStakedAmount());
result.pushKV("score", uint64_t(proof.getScore()));
UniValue stakes(UniValue::VARR);
for (const avalanche::SignedStake &s : proof.getStakes()) {
const COutPoint &utxo = s.getStake().getUTXO();
UniValue stake(UniValue::VOBJ);
stake.pushKV("txid", utxo.GetTxId().ToString());
stake.pushKV("vout", uint64_t(utxo.GetN()));
stake.pushKV("amount", s.getStake().getAmount());
stake.pushKV("height", uint64_t(s.getStake().getHeight()));
stake.pushKV("iscoinbase", s.getStake().isCoinbase());
stake.pushKV("pubkey", HexStr(s.getStake().getPubkey()));
// Only PKHash destination is supported, so this is safe
stake.pushKV("address",
EncodeDestination(PKHash(s.getStake().getPubkey()),
config));
stake.pushKV("signature", EncodeBase64(s.getSignature()));
stakes.push_back(stake);
}
result.pushKV("stakes", stakes);
return result;
},
};
}
static RPCHelpMan delegateavalancheproof() {
return RPCHelpMan{
"delegateavalancheproof",
"Delegate the avalanche proof to another public key.\n",
{
{"limitedproofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The limited id of the proof to be delegated."},
{"privatekey", RPCArg::Type::STR, RPCArg::Optional::NO,
"The private key in base58-encoding. Must match the proof master "
"public key or the upper level parent delegation public key if "
" supplied."},
{"publickey", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The public key to delegate the proof to."},
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"A string that is the serialized, hex-encoded delegation for the "
"proof and which is a parent for the delegation to build."},
},
RPCResult{RPCResult::Type::STR_HEX, "delegation",
"A string that is a serialized, hex-encoded delegation."},
RPCExamples{
HelpExampleRpc("delegateavalancheproof",
"\"<limitedproofid>\" \"<privkey>\" \"<pubkey>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
RPCTypeCheck(request.params,
{UniValue::VSTR, UniValue::VSTR, UniValue::VSTR});
if (!g_avalanche) {
throw JSONRPCError(RPC_INTERNAL_ERROR,
"Avalanche is not initialized");
}
avalanche::LimitedProofId limitedProofId{
ParseHashV(request.params[0], "limitedproofid")};
const CKey privkey = DecodeSecret(request.params[1].get_str());
if (!privkey.IsValid()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY,
"The private key is invalid");
}
const CPubKey pubkey = ParsePubKey(request.params[2]);
std::unique_ptr<avalanche::DelegationBuilder> dgb;
if (request.params.size() >= 4 && !request.params[3].isNull()) {
avalanche::Delegation dg;
CPubKey auth;
verifyDelegationOrThrow(dg, request.params[3].get_str(), auth);
if (dg.getProofId() !=
limitedProofId.computeProofId(dg.getProofMaster())) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
"The delegation does not match the proof");
}
if (privkey.GetPubKey() != auth) {
throw JSONRPCError(
RPC_INVALID_ADDRESS_OR_KEY,
"The private key does not match the delegation");
}
dgb = std::make_unique<avalanche::DelegationBuilder>(dg);
} else {
dgb = std::make_unique<avalanche::DelegationBuilder>(
limitedProofId, privkey.GetPubKey());
}
if (!dgb->addLevel(privkey, pubkey)) {
throw JSONRPCError(RPC_MISC_ERROR,
"Unable to build the delegation");
}
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << dgb->build();
return HexStr(ss);
},
};
}
static RPCHelpMan decodeavalanchedelegation() {
return RPCHelpMan{
"decodeavalanchedelegation",
"Convert a serialized, hex-encoded avalanche proof delegation, into "
"JSON object. \n"
"The validity of the delegation is not verified.\n",
{
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The delegation hex string"},
},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::STR_HEX, "pubkey",
"The public key the proof is delegated to."},
{RPCResult::Type::STR_HEX, "proofmaster",
"The delegated proof master public key."},
{RPCResult::Type::STR_HEX, "delegationid",
"The identifier of this delegation."},
{RPCResult::Type::STR_HEX, "limitedid",
"A delegated proof data hash excluding the master key."},
{RPCResult::Type::STR_HEX, "proofid",
"A hash of the delegated proof limitedid and master key."},
{RPCResult::Type::NUM, "depth",
"The number of delegation levels."},
{RPCResult::Type::ARR,
"levels",
"",
{
{RPCResult::Type::OBJ,
"",
"",
{
{RPCResult::Type::NUM, "index",
"The index of this delegation level."},
{RPCResult::Type::STR_HEX, "pubkey",
"This delegated public key for this level"},
{RPCResult::Type::STR, "signature",
"Signature of this delegation level (base64 "
"encoded)"},
}},
}},
}},
RPCExamples{HelpExampleCli("decodeavalanchedelegation",
"\"<hex delegation>\"") +
HelpExampleRpc("decodeavalanchedelegation",
"\"<hex delegation>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
RPCTypeCheck(request.params, {UniValue::VSTR});
avalanche::Delegation delegation;
bilingual_str error;
if (!avalanche::Delegation::FromHex(
delegation, request.params[0].get_str(), error)) {
throw JSONRPCError(RPC_DESERIALIZATION_ERROR, error.original);
}
UniValue result(UniValue::VOBJ);
result.pushKV("pubkey", HexStr(delegation.getDelegatedPubkey()));
result.pushKV("proofmaster", HexStr(delegation.getProofMaster()));
result.pushKV("delegationid", delegation.getId().ToString());
result.pushKV("limitedid",
delegation.getLimitedProofId().ToString());
result.pushKV("proofid", delegation.getProofId().ToString());
auto levels = delegation.getLevels();
result.pushKV("depth", uint64_t(levels.size()));
UniValue levelsArray(UniValue::VARR);
for (auto &level : levels) {
UniValue obj(UniValue::VOBJ);
obj.pushKV("pubkey", HexStr(level.pubkey));
obj.pushKV("signature", EncodeBase64(level.sig));
levelsArray.push_back(std::move(obj));
}
result.pushKV("levels", levelsArray);
return result;
},
};
}
static RPCHelpMan getavalancheinfo() {
return RPCHelpMan{
"getavalancheinfo",
"Returns an object containing various state info regarding avalanche "
"networking.\n",
{},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{
- {RPCResult::Type::BOOL, "active",
+ {RPCResult::Type::BOOL, "ready_to_poll",
"Whether the node is ready to start polling and voting."},
{RPCResult::Type::OBJ,
"local",
"Only available if -avaproof has been supplied to the node",
{
- {RPCResult::Type::BOOL, "live",
- "Whether the node local proof has been verified or not."},
+ {RPCResult::Type::BOOL, "verified",
+ "Whether the node local proof has been locally verified "
+ "or not."},
{RPCResult::Type::STR_HEX, "proofid",
"The node local proof id."},
{RPCResult::Type::STR_HEX, "limited_proofid",
"The node local limited proof id."},
{RPCResult::Type::STR_HEX, "master",
"The node local proof master public key."},
{RPCResult::Type::STR, "payout_address",
"The node local proof payout address. This might be "
"omitted if the payout script is not one of P2PK, P2PKH "
"or P2SH, in which case decodeavalancheproof can be used "
"to get more details."},
{RPCResult::Type::STR_AMOUNT, "stake_amount",
"The node local proof staked amount."},
}},
{RPCResult::Type::OBJ,
"network",
"",
{
{RPCResult::Type::NUM, "proof_count",
- "The number of valid avalanche proofs we know exist."},
+ "The number of valid avalanche proofs we know exist "
+ "(including this node's local proof if applicable)."},
{RPCResult::Type::NUM, "connected_proof_count",
"The number of avalanche proofs with at least one node "
- "we are connected to."},
+ "we are connected to (including this node's local proof "
+ "if applicable)."},
+ {RPCResult::Type::NUM, "dangling_proof_count",
+ "The number of avalanche proofs with no node attached."},
{RPCResult::Type::NUM, "finalized_proof_count",
"The number of known avalanche proofs that have been "
"finalized by avalanche."},
{RPCResult::Type::NUM, "conflicting_proof_count",
"The number of known avalanche proofs that conflict with "
"valid proofs."},
- {RPCResult::Type::NUM, "orphan_proof_count",
- "The number of known avalanche proofs that are "
- "orphaned."},
+ {RPCResult::Type::NUM, "immature_proof_count",
+ "The number of known avalanche proofs that have immature "
+ "utxos."},
{RPCResult::Type::STR_AMOUNT, "total_stake_amount",
"The total staked amount over all the valid proofs in " +
- Currency::get().ticker + "."},
+ Currency::get().ticker +
+ " (including this node's local proof if "
+ "applicable)."},
{RPCResult::Type::STR_AMOUNT, "connected_stake_amount",
"The total staked amount over all the connected proofs "
"in " +
- Currency::get().ticker + "."},
+ Currency::get().ticker +
+ " (including this node's local proof if "
+ "applicable)."},
+ {RPCResult::Type::STR_AMOUNT, "dangling_stake_amount",
+ "The total staked amount over all the dangling proofs "
+ "in " +
+ Currency::get().ticker +
+ " (including this node's local proof if "
+ "applicable)."},
{RPCResult::Type::NUM, "node_count",
- "The number of avalanche nodes we are connected to."},
+ "The number of avalanche nodes we are connected to "
+ "(including this node if a local proof is set)."},
{RPCResult::Type::NUM, "connected_node_count",
"The number of avalanche nodes associated with an "
- "avalanche proof."},
+ "avalanche proof (including this node if a local proof "
+ "is set)."},
{RPCResult::Type::NUM, "pending_node_count",
"The number of avalanche nodes pending for a proof."},
}},
},
},
RPCExamples{HelpExampleCli("getavalancheinfo", "") +
HelpExampleRpc("getavalancheinfo", "")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
if (!g_avalanche) {
throw JSONRPCError(RPC_INTERNAL_ERROR,
"Avalanche is not initialized");
}
UniValue ret(UniValue::VOBJ);
- ret.pushKV("active", g_avalanche->isQuorumEstablished());
+ ret.pushKV("ready_to_poll", g_avalanche->isQuorumEstablished());
auto localProof = g_avalanche->getLocalProof();
if (localProof != nullptr) {
UniValue local(UniValue::VOBJ);
- local.pushKV("live", g_avalanche->withPeerManager(
- [&](const avalanche::PeerManager &pm) {
- return pm.isBoundToPeer(
- localProof->getId());
- }));
+ local.pushKV("verified",
+ g_avalanche->withPeerManager(
+ [&](const avalanche::PeerManager &pm) {
+ return pm.isBoundToPeer(
+ localProof->getId());
+ }));
local.pushKV("proofid", localProof->getId().ToString());
local.pushKV("limited_proofid",
localProof->getLimitedId().ToString());
local.pushKV("master", HexStr(localProof->getMaster()));
CTxDestination destination;
if (ExtractDestination(localProof->getPayoutScript(),
destination)) {
local.pushKV("payout_address",
EncodeDestination(destination, config));
}
local.pushKV("stake_amount", localProof->getStakedAmount());
ret.pushKV("local", local);
}
g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
UniValue network(UniValue::VOBJ);
uint64_t proofCount{0};
uint64_t connectedProofCount{0};
uint64_t finalizedProofCount{0};
uint64_t connectedNodeCount{0};
Amount totalStakes = Amount::zero();
Amount connectedStakes = Amount::zero();
pm.forEachPeer([&](const avalanche::Peer &peer) {
CHECK_NONFATAL(peer.proof != nullptr);
- // Don't account for our local proof here
- if (peer.proof == localProof) {
- return;
- }
+ const bool isLocalProof = peer.proof == localProof;
+ ++proofCount;
const Amount proofStake = peer.proof->getStakedAmount();
- ++proofCount;
totalStakes += proofStake;
if (peer.hasFinalized) {
++finalizedProofCount;
}
- if (peer.node_count > 0) {
+ if (peer.node_count > 0 || isLocalProof) {
++connectedProofCount;
connectedStakes += proofStake;
}
- connectedNodeCount += peer.node_count;
+ connectedNodeCount += peer.node_count + isLocalProof;
});
network.pushKV("proof_count", proofCount);
network.pushKV("connected_proof_count", connectedProofCount);
network.pushKV("dangling_proof_count",
proofCount - connectedProofCount);
network.pushKV("finalized_proof_count", finalizedProofCount);
network.pushKV("conflicting_proof_count",
uint64_t(pm.getConflictingProofCount()));
- network.pushKV("orphan_proof_count",
+ network.pushKV("immature_proof_count",
uint64_t(pm.getOrphanProofCount()));
network.pushKV("total_stake_amount", totalStakes);
network.pushKV("connected_stake_amount", connectedStakes);
network.pushKV("dangling_stake_amount",
totalStakes - connectedStakes);
const uint64_t pendingNodes = pm.getPendingNodeCount();
network.pushKV("node_count", connectedNodeCount + pendingNodes);
network.pushKV("connected_node_count", connectedNodeCount);
network.pushKV("pending_node_count", pendingNodes);
ret.pushKV("network", network);
});
return ret;
},
};
}
static RPCHelpMan getavalanchepeerinfo() {
return RPCHelpMan{
"getavalanchepeerinfo",
"Returns data about an avalanche peer as a json array of objects. If "
"no proofid is provided, returns data about all the peers.\n",
{
{"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED,
"The hex encoded avalanche proof identifier."},
},
RPCResult{
RPCResult::Type::ARR,
"",
"",
{{
RPCResult::Type::OBJ,
"",
"",
{{
{RPCResult::Type::NUM, "avalanche_peerid",
"The avalanche internal peer identifier"},
{RPCResult::Type::STR_HEX, "proofid",
"The avalanche proof id used by this peer"},
{RPCResult::Type::STR_HEX, "proof",
"The avalanche proof used by this peer"},
{RPCResult::Type::NUM, "nodecount",
"The number of nodes for this peer"},
{RPCResult::Type::ARR,
"node_list",
"",
{
{RPCResult::Type::NUM, "nodeid",
"Node id, as returned by getpeerinfo"},
}},
}},
}},
},
RPCExamples{HelpExampleCli("getavalanchepeerinfo", "") +
HelpExampleCli("getavalanchepeerinfo", "\"proofid\"") +
HelpExampleRpc("getavalanchepeerinfo", "") +
HelpExampleRpc("getavalanchepeerinfo", "\"proofid\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
RPCTypeCheck(request.params, {UniValue::VSTR});
if (!g_avalanche) {
throw JSONRPCError(RPC_INTERNAL_ERROR,
"Avalanche is not initialized");
}
auto peerToUniv = [](const avalanche::PeerManager &pm,
const avalanche::Peer &peer) {
UniValue obj(UniValue::VOBJ);
obj.pushKV("avalanche_peerid", uint64_t(peer.peerid));
obj.pushKV("proofid", peer.getProofId().ToString());
obj.pushKV("proof", peer.proof->ToHex());
UniValue nodes(UniValue::VARR);
pm.forEachNode(peer, [&](const avalanche::Node &n) {
nodes.push_back(n.nodeid);
});
obj.pushKV("nodecount", uint64_t(peer.node_count));
obj.pushKV("node_list", nodes);
return obj;
};
UniValue ret(UniValue::VARR);
g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
// If a proofid is provided, only return the associated peer
if (!request.params[0].isNull()) {
const avalanche::ProofId proofid =
avalanche::ProofId::fromHex(
request.params[0].get_str());
if (!pm.isBoundToPeer(proofid)) {
throw JSONRPCError(RPC_INVALID_PARAMETER,
"Proofid not found");
}
pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
return ret.push_back(peerToUniv(pm, peer));
});
return;
}
// If no proofid is provided, return all the peers
pm.forEachPeer([&](const avalanche::Peer &peer) {
ret.push_back(peerToUniv(pm, peer));
});
});
return ret;
},
};
}
static RPCHelpMan getrawavalancheproof() {
return RPCHelpMan{
"getrawavalancheproof",
"Lookup for a known avalanche proof by id.\n",
{
{"proofid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The hex encoded avalanche proof identifier."},
},
RPCResult{
RPCResult::Type::OBJ,
"",
"",
{{
{RPCResult::Type::STR_HEX, "proof",
"The hex encoded proof matching the identifier."},
{RPCResult::Type::BOOL, "orphan",
"Whether the proof is an orphan."},
{RPCResult::Type::BOOL, "boundToPeer",
"Whether the proof is bound to an avalanche peer."},
{RPCResult::Type::BOOL, "conflicting",
"Whether the proof has a conflicting UTXO with an avalanche "
"peer."},
{RPCResult::Type::BOOL, "finalized",
"Whether the proof is finalized by vote."},
}},
},
RPCExamples{HelpExampleRpc("getrawavalancheproof", "<proofid>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
if (!g_avalanche) {
throw JSONRPCError(RPC_INTERNAL_ERROR,
"Avalanche is not initialized");
}
const avalanche::ProofId proofid =
avalanche::ProofId::fromHex(request.params[0].get_str());
bool isOrphan = false;
bool isBoundToPeer = false;
bool conflicting = false;
bool finalized = false;
auto proof = g_avalanche->withPeerManager(
[&](const avalanche::PeerManager &pm) {
isOrphan = pm.isOrphan(proofid);
isBoundToPeer = pm.isBoundToPeer(proofid);
conflicting = pm.isInConflictingPool(proofid);
finalized =
pm.forPeer(proofid, [&](const avalanche::Peer &p) {
return p.hasFinalized;
});
return pm.getProof(proofid);
});
if (!proof) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Proof not found");
}
UniValue ret(UniValue::VOBJ);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << *proof;
ret.pushKV("proof", HexStr(ss));
ret.pushKV("orphan", isOrphan);
ret.pushKV("boundToPeer", isBoundToPeer);
ret.pushKV("conflicting", conflicting);
ret.pushKV("finalized", finalized);
return ret;
},
};
}
static RPCHelpMan sendavalancheproof() {
return RPCHelpMan{
"sendavalancheproof",
"Broadcast an avalanche proof.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The avalanche proof to broadcast."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the proof was sent successfully or not."},
RPCExamples{HelpExampleRpc("sendavalancheproof", "<proof>")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
if (!g_avalanche) {
throw JSONRPCError(RPC_INTERNAL_ERROR,
"Avalanche is not initialized");
}
auto proof = RCUPtr<avalanche::Proof>::make();
NodeContext &node = EnsureAnyNodeContext(request.context);
// Verify the proof. Note that this is redundant with the
// verification done when adding the proof to the pool, but we get a
// chance to give a better error message.
verifyProofOrThrow(node, *proof, request.params[0].get_str());
// Add the proof to the pool if we don't have it already. Since the
// proof verification has already been done, a failure likely
// indicates that there already is a proof with conflicting utxos.
const avalanche::ProofId &proofid = proof->getId();
if (!registerProofIfNeeded(proof)) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
"The proof has conflicting utxo with an existing proof");
}
g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
pm.addUnbroadcastProof(proofid);
});
RelayProof(proofid, *node.connman);
return true;
},
};
}
static RPCHelpMan verifyavalancheproof() {
return RPCHelpMan{
"verifyavalancheproof",
"Verify an avalanche proof is valid and return the error otherwise.\n",
{
{"proof", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"Proof to verify."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the proof is valid or not."},
RPCExamples{HelpExampleRpc("verifyavalancheproof", "\"<proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
RPCTypeCheck(request.params, {UniValue::VSTR});
avalanche::Proof proof;
verifyProofOrThrow(EnsureAnyNodeContext(request.context), proof,
request.params[0].get_str());
return true;
},
};
}
static RPCHelpMan verifyavalanchedelegation() {
return RPCHelpMan{
"verifyavalanchedelegation",
"Verify an avalanche delegation is valid and return the error "
"otherwise.\n",
{
{"delegation", RPCArg::Type::STR_HEX, RPCArg::Optional::NO,
"The avalanche proof delegation to verify."},
},
RPCResult{RPCResult::Type::BOOL, "success",
"Whether the delegation is valid or not."},
RPCExamples{HelpExampleRpc("verifyavalanchedelegation", "\"<proof>\"")},
[&](const RPCHelpMan &self, const Config &config,
const JSONRPCRequest &request) -> UniValue {
RPCTypeCheck(request.params, {UniValue::VSTR});
avalanche::Delegation delegation;
CPubKey dummy;
verifyDelegationOrThrow(delegation, request.params[0].get_str(),
dummy);
return true;
},
};
}
void RegisterAvalancheRPCCommands(CRPCTable &t) {
// clang-format off
static const CRPCCommand commands[] = {
// category actor (function)
// ----------------- --------------------
{ "avalanche", getavalanchekey, },
{ "avalanche", addavalanchenode, },
{ "avalanche", buildavalancheproof, },
{ "avalanche", decodeavalancheproof, },
{ "avalanche", delegateavalancheproof, },
{ "avalanche", decodeavalanchedelegation, },
{ "avalanche", getavalancheinfo, },
{ "avalanche", getavalanchepeerinfo, },
{ "avalanche", getrawavalancheproof, },
{ "avalanche", sendavalancheproof, },
{ "avalanche", verifyavalancheproof, },
{ "avalanche", verifyavalanchedelegation, },
};
// clang-format on
for (const auto &c : commands) {
t.appendCommand(c.name, &c);
}
}
diff --git a/test/functional/abc_p2p_compactproofs.py b/test/functional/abc_p2p_compactproofs.py
index 21df2d621..8661a873e 100644
--- a/test/functional/abc_p2p_compactproofs.py
+++ b/test/functional/abc_p2p_compactproofs.py
@@ -1,680 +1,681 @@
#!/usr/bin/env python3
# Copyright (c) 2022 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test proof inventory relaying
"""
import random
import time
from test_framework.avatools import (
AvaP2PInterface,
NoHandshakeAvaP2PInterface,
build_msg_avaproofs,
gen_proof,
get_ava_p2p_interface,
get_proof_ids,
wait_for_proof,
)
from test_framework.messages import (
NODE_AVALANCHE,
NODE_NETWORK,
AvalanchePrefilledProof,
calculate_shortid,
msg_avaproofsreq,
msg_getavaproofs,
)
from test_framework.p2p import P2PInterface, p2p_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import MAX_NODES, assert_equal, p2p_port
# Timeout after which the proofs can be cleaned up
AVALANCHE_AVAPROOFS_TIMEOUT = 2 * 60
# Max interval between 2 periodic networking processing
AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL = 5 * 60
class ProofStoreP2PInterface(AvaP2PInterface):
def __init__(self):
self.proofs = []
super().__init__()
def on_avaproof(self, message):
self.proofs.append(message.proof)
def get_proofs(self):
with p2p_lock:
return self.proofs
class CompactProofsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[
'-enableavalanche=1',
'-avaproofstakeutxoconfirmations=1',
'-avacooldown=0',
'-enableavalanchepeerdiscovery=1',
]] * self.num_nodes
def setup_network(self):
# Don't connect the nodes
self.setup_nodes()
@staticmethod
def received_avaproofs(peer):
with p2p_lock:
return peer.last_message.get("avaproofs")
def test_send_outbound_getavaproofs(self):
self.log.info(
"Check we send a getavaproofs message to our avalanche outbound peers")
node = self.nodes[0]
p2p_idx = 0
non_avapeers = []
for _ in range(4):
peer = P2PInterface()
node.add_outbound_p2p_connection(
peer,
p2p_idx=p2p_idx,
connection_type="outbound-full-relay",
services=NODE_NETWORK,
)
non_avapeers.append(peer)
p2p_idx += 1
inbound_avapeers = [
node.add_p2p_connection(
NoHandshakeAvaP2PInterface()) for _ in range(4)]
outbound_avapeers = []
# With a proof and the service bit set
for _ in range(4):
peer = AvaP2PInterface(node)
node.add_outbound_p2p_connection(
peer,
p2p_idx=p2p_idx,
connection_type="avalanche",
services=NODE_NETWORK | NODE_AVALANCHE,
)
outbound_avapeers.append(peer)
p2p_idx += 1
# Without a proof and no service bit set
for _ in range(4):
peer = AvaP2PInterface()
node.add_outbound_p2p_connection(
peer,
p2p_idx=p2p_idx,
connection_type="outbound-full-relay",
services=NODE_NETWORK,
)
outbound_avapeers.append(peer)
p2p_idx += 1
def all_peers_received_getavaproofs():
with p2p_lock:
return all([p.last_message.get("getavaproofs")
for p in outbound_avapeers])
self.wait_until(all_peers_received_getavaproofs)
with p2p_lock:
assert all([p.message_count.get(
"getavaproofs", 0) >= 1 for p in outbound_avapeers])
assert all([p.message_count.get(
"getavaproofs", 0) == 0 for p in non_avapeers])
assert all([p.message_count.get(
"getavaproofs", 0) == 0 for p in inbound_avapeers])
self.log.info(
"Check we send periodic getavaproofs message to some of our peers")
def count_outbounds_getavaproofs():
with p2p_lock:
return sum([p.message_count.get("getavaproofs", 0)
for p in outbound_avapeers])
outbounds_getavaproofs = count_outbounds_getavaproofs()
node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL)
self.wait_until(lambda: count_outbounds_getavaproofs()
== outbounds_getavaproofs + 3)
outbounds_getavaproofs += 3
with p2p_lock:
assert all([p.message_count.get(
"getavaproofs", 0) == 0 for p in non_avapeers])
assert all([p.message_count.get(
"getavaproofs", 0) == 0 for p in inbound_avapeers])
self.log.info(
"After the first avaproofs has been received, all the peers are requested periodically")
responding_outbound_avapeer = AvaP2PInterface(node)
node.add_outbound_p2p_connection(
responding_outbound_avapeer,
p2p_idx=p2p_idx,
connection_type="avalanche",
services=NODE_NETWORK | NODE_AVALANCHE,
)
p2p_idx += 1
responding_outbound_avapeer_id = node.getpeerinfo()[-1]['id']
outbound_avapeers.append(responding_outbound_avapeer)
self.wait_until(all_peers_received_getavaproofs)
# Register as an avalanche node for the avaproofs message to be counted
key, proof = gen_proof(node)
assert node.addavalanchenode(
responding_outbound_avapeer_id,
key.get_pubkey().get_bytes().hex(),
proof.serialize().hex())
# Send the avaproofs message
avaproofs = build_msg_avaproofs([proof])
responding_outbound_avapeer.send_and_ping(avaproofs)
# Now the node will request from all its peers at each time period
outbounds_getavaproofs = count_outbounds_getavaproofs()
num_outbound_avapeers = len(outbound_avapeers)
node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL)
self.wait_until(lambda: count_outbounds_getavaproofs()
== outbounds_getavaproofs + num_outbound_avapeers)
outbounds_getavaproofs += num_outbound_avapeers
for p in outbound_avapeers:
with node.assert_debug_log(["received: avaproofs"], ["Ignoring unsollicited avaproofs"]):
p.send_message(build_msg_avaproofs([]))
with p2p_lock:
assert all([p.message_count.get(
"getavaproofs", 0) == 0 for p in non_avapeers])
assert all([p.message_count.get(
"getavaproofs", 0) == 0 for p in inbound_avapeers])
def test_send_manual_getavaproofs(self):
self.log.info(
"Check we send a getavaproofs message to our manually connected peers that support avalanche")
node = self.nodes[0]
# Get rid of previously connected nodes
node.disconnect_p2ps()
def added_node_connected(ip_port):
added_node_info = node.getaddednodeinfo(ip_port)
return len(
added_node_info) == 1 and added_node_info[0]['connected']
def connect_callback(address, port):
self.log.debug("Connecting to {}:{}".format(address, port))
p = AvaP2PInterface(node)
p2p_idx = 1
p.peer_accept_connection(
connect_cb=connect_callback,
connect_id=p2p_idx,
net=node.chain,
timeout_factor=node.timeout_factor,
services=NODE_NETWORK | NODE_AVALANCHE,
)()
ip_port = f"127.0.01:{p2p_port(MAX_NODES - p2p_idx)}"
node.addnode(node=ip_port, command="add")
self.wait_until(lambda: added_node_connected(ip_port))
assert_equal(node.getpeerinfo()[-1]['addr'], ip_port)
assert_equal(node.getpeerinfo()[-1]['connection_type'], 'manual')
p.wait_until(lambda: p.last_message.get("getavaproofs"))
def test_respond_getavaproofs(self):
self.log.info("Check the node responds to getavaproofs messages")
node = self.nodes[0]
def send_getavaproof_check_shortid_len(peer, expected_len):
peer.send_message(msg_getavaproofs())
self.wait_until(lambda: self.received_avaproofs(peer))
avaproofs = self.received_avaproofs(peer)
assert_equal(len(avaproofs.shortids), expected_len)
# Initially the node has 0 peer
self.restart_node(0)
assert_equal(len(get_proof_ids(node)), 0)
peer = node.add_p2p_connection(NoHandshakeAvaP2PInterface())
send_getavaproof_check_shortid_len(peer, 0)
# Add some proofs
sending_peer = node.add_p2p_connection(NoHandshakeAvaP2PInterface())
for _ in range(50):
_, proof = gen_proof(node)
sending_peer.send_avaproof(proof)
wait_for_proof(node, f"{proof.proofid:0{64}x}")
proofids = get_proof_ids(node)
assert_equal(len(proofids), 50)
receiving_peer = node.add_p2p_connection(NoHandshakeAvaP2PInterface())
send_getavaproof_check_shortid_len(receiving_peer, len(proofids))
avaproofs = self.received_avaproofs(receiving_peer)
expected_shortids = [
calculate_shortid(
avaproofs.key0,
avaproofs.key1,
proofid) for proofid in sorted(proofids)]
assert_equal(expected_shortids, avaproofs.shortids)
# Don't expect any prefilled proof for now
assert_equal(len(avaproofs.prefilled_proofs), 0)
def test_request_missing_proofs(self):
self.log.info(
"Check the node requests the missing proofs after receiving an avaproofs message")
node = self.nodes[0]
self.restart_node(0)
key0 = random.randint(0, 2**64 - 1)
key1 = random.randint(0, 2**64 - 1)
proofs = [gen_proof(node)[1] for _ in range(10)]
# Build a map from proofid to shortid. Use sorted proofids so we don't
# have the same indices than the `proofs` list.
proofids = [p.proofid for p in proofs]
shortid_map = {}
for proofid in sorted(proofids):
shortid_map[proofid] = calculate_shortid(key0, key1, proofid)
self.log.info("The node ignores unsollicited avaproofs")
spam_peer = get_ava_p2p_interface(node)
msg = build_msg_avaproofs(
proofs, prefilled_proofs=[], key_pair=[
key0, key1])
with node.assert_debug_log(["Ignoring unsollicited avaproofs"]):
spam_peer.send_message(msg)
def received_avaproofsreq(peer):
with p2p_lock:
return peer.last_message.get("avaproofsreq")
p2p_idx = 0
def add_avalanche_p2p_outbound():
nonlocal p2p_idx
peer = AvaP2PInterface(node)
node.add_outbound_p2p_connection(
peer,
p2p_idx=p2p_idx,
connection_type="avalanche",
services=NODE_NETWORK | NODE_AVALANCHE,
)
p2p_idx += 1
peer.wait_until(lambda: peer.last_message.get("getavaproofs"))
return peer
def expect_indices(shortids, expected_indices, prefilled_proofs=None):
nonlocal p2p_idx
msg = build_msg_avaproofs(
[], prefilled_proofs=prefilled_proofs, key_pair=[
key0, key1])
msg.shortids = shortids
peer = add_avalanche_p2p_outbound()
peer.send_message(msg)
self.wait_until(lambda: received_avaproofsreq(peer))
avaproofsreq = received_avaproofsreq(peer)
assert_equal(avaproofsreq.indices, expected_indices)
self.log.info("Check no proof is requested if there is no shortid")
msg = build_msg_avaproofs([])
sender = add_avalanche_p2p_outbound()
with node.assert_debug_log(["Got an avaproofs message with no shortid"]):
sender.send_message(msg)
# Make sure we don't get an avaproofsreq message
sender.sync_send_with_ping()
with p2p_lock:
assert_equal(sender.message_count.get("avaproofsreq", 0), 0)
self.log.info(
"Check the node requests all the proofs if it known none")
expect_indices(
list(shortid_map.values()),
[i for i in range(len(shortid_map))]
)
self.log.info(
"Check the node requests only the missing proofs")
known_proofids = []
for proof in proofs[:5]:
node.sendavalancheproof(proof.serialize().hex())
known_proofids.append(proof.proofid)
expected_indices = [i for i, proofid in enumerate(
shortid_map) if proofid not in known_proofids]
expect_indices(list(shortid_map.values()), expected_indices)
self.log.info(
"Check the node don't request prefilled proofs")
# Get the indices for a couple of proofs
indice_proof5 = list(shortid_map.keys()).index(proofids[5])
indice_proof6 = list(shortid_map.keys()).index(proofids[6])
prefilled_proofs = [
AvalanchePrefilledProof(indice_proof5, proofs[5]),
AvalanchePrefilledProof(indice_proof6, proofs[6]),
]
prefilled_proofs = sorted(
prefilled_proofs,
key=lambda prefilled_proof: prefilled_proof.index)
remaining_shortids = [shortid for proofid, shortid in shortid_map.items(
) if proofid not in proofids[5:7]]
known_proofids.extend(proofids[5:7])
expected_indices = [i for i, proofid in enumerate(
shortid_map) if proofid not in known_proofids]
expect_indices(
remaining_shortids,
expected_indices,
prefilled_proofs=prefilled_proofs)
self.log.info(
"Check the node requests no proof if it knows all of them")
for proof in proofs[5:]:
node.sendavalancheproof(proof.serialize().hex())
known_proofids.append(proof.proofid)
expect_indices(list(shortid_map.values()), [])
self.log.info("Check out of bounds index")
bad_peer = add_avalanche_p2p_outbound()
msg = build_msg_avaproofs([], prefilled_proofs=[
AvalanchePrefilledProof(
len(shortid_map) + 1,
gen_proof(node)[1])], key_pair=[key0, key1])
msg.shortids = list(shortid_map.values())
with node.assert_debug_log(["Misbehaving", "avaproofs-bad-indexes"]):
bad_peer.send_message(msg)
bad_peer.wait_for_disconnect()
self.log.info("An invalid prefilled proof will trigger a ban")
_, no_stake = gen_proof(node)
no_stake.stakes = []
bad_peer = add_avalanche_p2p_outbound()
msg = build_msg_avaproofs([], prefilled_proofs=[
AvalanchePrefilledProof(len(shortid_map), no_stake),
], key_pair=[key0, key1])
msg.shortids = list(shortid_map.values())
with node.assert_debug_log(["Misbehaving", "invalid-proof"]):
bad_peer.send_message(msg)
bad_peer.wait_for_disconnect()
def test_send_missing_proofs(self):
self.log.info("Check the node respond to missing proofs requests")
node = self.nodes[0]
self.restart_node(0)
numof_proof = 10
proofs = [gen_proof(node)[1] for _ in range(numof_proof)]
for proof in proofs:
node.sendavalancheproof(proof.serialize().hex())
proofids = get_proof_ids(node)
assert all(proof.proofid in proofids for proof in proofs)
self.log.info("Unsollicited requests are ignored")
peer = node.add_p2p_connection(ProofStoreP2PInterface())
peer.send_and_ping(msg_avaproofsreq())
assert_equal(len(peer.get_proofs()), 0)
def request_proofs(peer):
peer.send_message(msg_getavaproofs())
self.wait_until(lambda: self.received_avaproofs(peer))
avaproofs = self.received_avaproofs(peer)
assert_equal(len(avaproofs.shortids), numof_proof)
return avaproofs
_ = request_proofs(peer)
self.log.info("Sending an empty request has no effect")
peer.send_and_ping(msg_avaproofsreq())
assert_equal(len(peer.get_proofs()), 0)
self.log.info("Check the requested proofs are sent by the node")
def check_received_proofs(indices):
requester = node.add_p2p_connection(ProofStoreP2PInterface())
avaproofs = request_proofs(requester)
req = msg_avaproofsreq()
req.indices = indices
requester.send_message(req)
# Check we got the expected number of proofs
self.wait_until(
lambda: len(
requester.get_proofs()) == len(indices))
# Check we got the expected proofs
received_shortids = [
calculate_shortid(
avaproofs.key0,
avaproofs.key1,
proof.proofid) for proof in requester.get_proofs()]
assert_equal(set(received_shortids),
set([avaproofs.shortids[i] for i in indices]))
# Only the first proof
check_received_proofs([0])
# Only the last proof
check_received_proofs([numof_proof - 1])
# Half first
check_received_proofs(range(0, numof_proof // 2))
# Half last
check_received_proofs(range(numof_proof // 2, numof_proof))
# Even
check_received_proofs([i for i in range(numof_proof) if i % 2 == 0])
# Odds
check_received_proofs([i for i in range(numof_proof) if i % 2 == 1])
# All
check_received_proofs(range(numof_proof))
self.log.info(
"Check the node will not send the proofs if not requested before the timeout elapsed")
# Disconnect the peers
for peer in node.p2ps:
peer.peer_disconnect()
peer.wait_for_disconnect()
mocktime = int(time.time())
node.setmocktime(mocktime)
slow_peer = ProofStoreP2PInterface()
node.add_outbound_p2p_connection(
slow_peer,
p2p_idx=0,
connection_type="avalanche",
services=NODE_NETWORK | NODE_AVALANCHE,
)
slow_peer.wait_until(
lambda: slow_peer.last_message.get("getavaproofs"))
slow_peer.nodeid = node.getpeerinfo()[-1]['id']
_ = request_proofs(slow_peer)
# Elapse the timeout
mocktime += AVALANCHE_AVAPROOFS_TIMEOUT + 1
node.setmocktime(mocktime)
node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL)
# Periodic compact proofs requests are sent in the same loop than the
# cleanup, so when such a request is made we are sure the cleanup did
# happen.
slow_peer.wait_until(
lambda: slow_peer.message_count.get("getavaproofs") > 1)
req = msg_avaproofsreq()
req.indices = range(numof_proof)
slow_peer.send_and_ping(req)
# Check we get no proof
assert_equal(len(slow_peer.get_proofs()), 0)
def test_compact_proofs_download_on_connect(self):
self.log.info(
"Check the node get compact proofs upon avalanche outbound discovery")
requestee = self.nodes[0]
requester = self.nodes[1]
self.restart_node(0)
numof_proof = 10
proofs = [gen_proof(requestee)[1] for _ in range(numof_proof)]
for proof in proofs:
requestee.sendavalancheproof(proof.serialize().hex())
proofids = get_proof_ids(requestee)
assert all(proof.proofid in proofids for proof in proofs)
# Start the requester and check it gets all the proofs
self.start_node(1)
self.connect_nodes(0, 1)
self.wait_until(
lambda: all(
proof.proofid in proofids for proof in get_proof_ids(requester)))
def test_no_compactproofs_during_ibs(self):
self.log.info(
"Check the node don't request compact proofs during IBD")
node = self.nodes[0]
chainwork = int(node.getblockchaininfo()['chainwork'], 16)
self.restart_node(
0,
extra_args=self.extra_args[0] +
[f'-minimumchainwork={chainwork + 2:#x}'])
assert node.getblockchaininfo()['initialblockdownload']
peer = P2PInterface()
node.add_outbound_p2p_connection(
peer,
p2p_idx=0,
connection_type="avalanche",
services=NODE_NETWORK | NODE_AVALANCHE,
)
# Force the node to process the sending loop
peer.sync_send_with_ping()
with p2p_lock:
assert_equal(peer.message_count.get("getavaproofs", 0), 0)
# Make sure there is no message sent as part as the periodic network
# messaging either
node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL)
peer.sync_send_with_ping()
with p2p_lock:
assert_equal(peer.message_count.get("getavaproofs", 0), 0)
def test_send_inbound_getavaproofs_until_quorum_is_established(self):
self.log.info(
"Check we also request the inbounds until the quorum is established")
node = self.nodes[0]
self.restart_node(
0,
extra_args=self.extra_args[0] +
['-avaminquorumstake=1000000'])
- assert_equal(node.getavalancheinfo()['active'], False)
+ assert_equal(node.getavalancheinfo()['ready_to_poll'], False)
outbound = AvaP2PInterface()
node.add_outbound_p2p_connection(outbound, p2p_idx=0)
inbound = AvaP2PInterface()
node.add_p2p_connection(inbound)
inbound.nodeid = node.getpeerinfo()[-1]['id']
def count_getavaproofs(peers):
with p2p_lock:
return sum([peer.message_count.get("getavaproofs", 0)
for peer in peers])
# Upon connection only the outbound gets a compact proofs message
assert_equal(count_getavaproofs([inbound]), 0)
self.wait_until(lambda: count_getavaproofs([outbound]) == 1)
# Periodic send will include the inbound as well
current_total = count_getavaproofs([inbound, outbound])
while count_getavaproofs([inbound]) == 0:
node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL)
self.wait_until(lambda: count_getavaproofs(
[inbound, outbound]) > current_total)
current_total = count_getavaproofs([inbound, outbound])
# Connect the minimum amount of stake and nodes
for _ in range(8):
node.add_p2p_connection(AvaP2PInterface(node))
- self.wait_until(lambda: node.getavalancheinfo()['active'] is True)
+ self.wait_until(lambda: node.getavalancheinfo()
+ ['ready_to_poll'] is True)
# From now only the outbound is requested
count_inbound = count_getavaproofs([inbound])
for _ in range(20):
node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL)
self.wait_until(lambda: count_getavaproofs(
[inbound, outbound]) > current_total)
current_total = count_getavaproofs([inbound, outbound])
assert_equal(count_getavaproofs([inbound]), count_inbound)
def run_test(self):
# Most if the tests only need a single node, let the other ones start
# the node when required
self.stop_node(1)
self.test_send_outbound_getavaproofs()
self.test_send_manual_getavaproofs()
self.test_respond_getavaproofs()
self.test_request_missing_proofs()
self.test_send_missing_proofs()
self.test_compact_proofs_download_on_connect()
self.test_no_compactproofs_during_ibs()
self.test_send_inbound_getavaproofs_until_quorum_is_established()
if __name__ == '__main__':
CompactProofsTest().main()
diff --git a/test/functional/abc_p2p_getavaaddr.py b/test/functional/abc_p2p_getavaaddr.py
index 5091c0fc6..a76effab8 100755
--- a/test/functional/abc_p2p_getavaaddr.py
+++ b/test/functional/abc_p2p_getavaaddr.py
@@ -1,470 +1,472 @@
#!/usr/bin/env python3
# Copyright (c) 2022 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test getavaaddr p2p message"""
import time
from decimal import Decimal
from test_framework.avatools import AvaP2PInterface, gen_proof
from test_framework.messages import (
NODE_AVALANCHE,
NODE_NETWORK,
AvalancheVote,
AvalancheVoteError,
msg_getavaaddr,
)
from test_framework.p2p import P2PInterface, p2p_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import MAX_NODES, assert_equal, p2p_port
# getavaaddr time interval in seconds, as defined in net_processing.cpp
# A node will ignore repeated getavaaddr during this interval
GETAVAADDR_INTERVAL = 2 * 60
# Address are sent every 30s on average, with a Poisson filter. Use a large
# enough delay so it's very unlikely we don't get the message within this time.
MAX_ADDR_SEND_DELAY = 5 * 60
# The interval between avalanche statistics computation
AVALANCHE_STATISTICS_INTERVAL = 10 * 60
# The getavaaddr messages are sent every 2 to 5 minutes
MAX_GETAVAADDR_DELAY = 5 * 60
class AddrReceiver(P2PInterface):
def __init__(self):
super().__init__()
self.received_addrs = None
def get_received_addrs(self):
with p2p_lock:
return self.received_addrs
def on_addr(self, message):
self.received_addrs = []
for addr in message.addrs:
self.received_addrs.append(f"{addr.ip}:{addr.port}")
def addr_received(self):
return self.received_addrs is not None
class MutedAvaP2PInterface(AvaP2PInterface):
def __init__(self, node=None):
super().__init__(node)
self.is_responding = False
self.privkey = None
self.addr = None
self.poll_received = 0
def set_addr(self, addr):
self.addr = addr
def on_avapoll(self, message):
self.poll_received += 1
class AllYesAvaP2PInterface(MutedAvaP2PInterface):
def __init__(self, node=None):
super().__init__(node)
self.is_responding = True
def on_avapoll(self, message):
self.send_avaresponse(
message.poll.round, [
AvalancheVote(
AvalancheVoteError.ACCEPTED, inv.hash) for inv in message.poll.invs],
self.master_privkey if self.delegation is None else self.delegated_privkey)
super().on_avapoll(message)
class AvaAddrTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [['-enableavalanche=1',
'-enableavalanchepeerdiscovery=1',
'-enableavalancheproofreplacement=1',
'-avaproofstakeutxoconfirmations=1',
'-avacooldown=0', '-whitelist=noban@127.0.0.1']]
def check_all_peers_received_getavaaddr_once(self, avapeers):
def received_all_getavaaddr(avapeers):
with p2p_lock:
return all([p.last_message.get("getavaaddr")
for p in avapeers])
self.wait_until(lambda: received_all_getavaaddr(avapeers))
with p2p_lock:
assert all([p.message_count.get(
"getavaaddr", 0) == 1 for p in avapeers])
def getavaaddr_interval_test(self):
node = self.nodes[0]
# Init mock time
mock_time = int(time.time())
node.setmocktime(mock_time)
# Add some avalanche peers to the node
for _ in range(10):
node.add_p2p_connection(AllYesAvaP2PInterface(node))
# Build some statistics to ensure some addresses will be returned
def all_peers_received_poll():
with p2p_lock:
return all([avanode.poll_received >
0 for avanode in node.p2ps])
self.wait_until(all_peers_received_poll)
node.mockscheduler(AVALANCHE_STATISTICS_INTERVAL)
requester = node.add_p2p_connection(AddrReceiver())
requester.send_message(msg_getavaaddr())
# Remember the time we sent the getavaaddr message
getavaddr_time = mock_time
# Spamming more get getavaaddr has no effect
for _ in range(10):
with node.assert_debug_log(["Ignoring repeated getavaaddr from peer"]):
requester.send_message(msg_getavaaddr())
# Move the time so we get an addr response
mock_time += MAX_ADDR_SEND_DELAY
node.setmocktime(mock_time)
requester.wait_until(requester.addr_received)
# Elapse the getavaaddr interval and check our message is now accepted
# again
mock_time = getavaddr_time + GETAVAADDR_INTERVAL
node.setmocktime(mock_time)
requester.send_message(msg_getavaaddr())
# We can get an addr message again
mock_time += MAX_ADDR_SEND_DELAY
node.setmocktime(mock_time)
requester.wait_until(requester.addr_received)
def address_test(self, maxaddrtosend, num_proof, num_avanode):
self.restart_node(
0,
extra_args=self.extra_args[0] +
[f'-maxaddrtosend={maxaddrtosend}'])
node = self.nodes[0]
# Init mock time
mock_time = int(time.time())
node.setmocktime(mock_time)
# Create a bunch of proofs and associate each a bunch of nodes.
avanodes = []
for _ in range(num_proof):
master_privkey, proof = gen_proof(node)
for n in range(num_avanode):
avanode = AllYesAvaP2PInterface() if n % 2 else MutedAvaP2PInterface()
avanode.master_privkey = master_privkey
avanode.proof = proof
node.add_p2p_connection(avanode)
peerinfo = node.getpeerinfo()[-1]
avanode.set_addr(peerinfo["addr"])
avanodes.append(avanode)
responding_addresses = [
avanode.addr for avanode in avanodes if avanode.is_responding]
assert_equal(len(responding_addresses), num_proof * num_avanode // 2)
# Check we have what we expect
def all_nodes_connected():
avapeers = node.getavalanchepeerinfo()
if len(avapeers) != num_proof:
return False
for avapeer in avapeers:
if avapeer['nodecount'] != num_avanode:
return False
return True
self.wait_until(all_nodes_connected)
# Force the availability score to diverge between the responding and the
# muted nodes.
node.generate(1)
def poll_all_for_block():
with p2p_lock:
return all([avanode.poll_received > (
10 if avanode.is_responding else 0) for avanode in avanodes])
self.wait_until(poll_all_for_block)
# Move the scheduler time 10 minutes forward so that so that our peers
# get an availability score computed.
node.mockscheduler(AVALANCHE_STATISTICS_INTERVAL)
requester = node.add_p2p_connection(AddrReceiver())
requester.send_and_ping(msg_getavaaddr())
# Sanity check that the availability score is set up as expected
peerinfo = node.getpeerinfo()
muted_addresses = [
avanode.addr for avanode in avanodes if not avanode.is_responding]
assert all([p['availability_score'] <
0 for p in peerinfo if p["addr"] in muted_addresses])
assert all([p['availability_score'] >
0 for p in peerinfo if p["addr"] in responding_addresses])
# Requester has no availability_score because it's not an avalanche
# peer
assert 'availability_score' not in peerinfo[-1].keys()
mock_time += MAX_ADDR_SEND_DELAY
node.setmocktime(mock_time)
requester.wait_until(requester.addr_received)
addresses = requester.get_received_addrs()
assert_equal(len(addresses),
min(maxaddrtosend, len(responding_addresses)))
# Check all the addresses belong to responding peer
assert all([address in responding_addresses for address in addresses])
def getavaaddr_outbound_test(self):
self.log.info(
"Check we send a getavaaddr message to our avalanche outbound peers")
node = self.nodes[0]
# Get rid of previously connected nodes
node.disconnect_p2ps()
avapeers = []
for i in range(16):
avapeer = AvaP2PInterface()
node.add_outbound_p2p_connection(
avapeer,
p2p_idx=i,
)
avapeers.append(avapeer)
self.check_all_peers_received_getavaaddr_once(avapeers)
# Generate some block to poll for
node.generate(1)
# Because none of the avalanche peers is responding, our node should
# fail out of option shortly and send a getavaaddr message to its
# outbound avalanche peers.
node.mockscheduler(MAX_GETAVAADDR_DELAY)
def all_peers_received_getavaaddr():
with p2p_lock:
return all([p.message_count.get(
"getavaaddr", 0) > 1 for p in avapeers])
self.wait_until(all_peers_received_getavaaddr)
def getavaaddr_manual_test(self):
self.log.info(
"Check we send a getavaaddr message to our manually connected peers that support avalanche")
node = self.nodes[0]
# Get rid of previously connected nodes
node.disconnect_p2ps()
def added_node_connected(ip_port):
added_node_info = node.getaddednodeinfo(ip_port)
return len(
added_node_info) == 1 and added_node_info[0]['connected']
def connect_callback(address, port):
self.log.debug("Connecting to {}:{}".format(address, port))
p = AvaP2PInterface()
p2p_idx = 1
p.peer_accept_connection(
connect_cb=connect_callback,
connect_id=p2p_idx,
net=node.chain,
timeout_factor=node.timeout_factor,
)()
ip_port = f"127.0.01:{p2p_port(MAX_NODES - p2p_idx)}"
node.addnode(node=ip_port, command="add")
self.wait_until(lambda: added_node_connected(ip_port))
assert_equal(node.getpeerinfo()[-1]['addr'], ip_port)
assert_equal(node.getpeerinfo()[-1]['connection_type'], 'manual')
p.wait_until(lambda: p.last_message.get("getavaaddr"))
# Generate some block to poll for
node.generate(1)
# Because our avalanche peer is not responding, our node should fail
# out of option shortly and send another getavaaddr message.
node.mockscheduler(MAX_GETAVAADDR_DELAY)
p.wait_until(lambda: p.message_count.get("getavaaddr", 0) > 1)
def getavaaddr_noquorum(self):
self.log.info(
"Check we send a getavaaddr message while our quorum is not established")
node = self.nodes[0]
self.restart_node(0, extra_args=self.extra_args[0] + [
'-avaminquorumstake=500000000',
'-avaminquorumconnectedstakeratio=0.8',
])
avapeers = []
for i in range(16):
avapeer = AllYesAvaP2PInterface(node)
node.add_outbound_p2p_connection(
avapeer,
p2p_idx=i,
connection_type="avalanche",
services=NODE_NETWORK | NODE_AVALANCHE,
)
avapeers.append(avapeer)
peerinfo = node.getpeerinfo()[-1]
avapeer.set_addr(peerinfo["addr"])
self.check_all_peers_received_getavaaddr_once(avapeers)
def total_getavaaddr_msg():
with p2p_lock:
return sum([p.message_count.get("getavaaddr", 0)
for p in avapeers])
# Because we have not enough stake to start polling, we keep requesting
# more addresses from all our peers
total_getavaaddr = total_getavaaddr_msg()
node.mockscheduler(MAX_GETAVAADDR_DELAY)
self.wait_until(lambda: total_getavaaddr_msg() ==
total_getavaaddr + len(avapeers))
# Move the schedulter time forward to make sure we get statistics
# computed. But since we did not start polling yet it should remain all
# zero.
node.mockscheduler(AVALANCHE_STATISTICS_INTERVAL)
def wait_for_availability_score():
peerinfo = node.getpeerinfo()
return all([p.get('availability_score', None) == Decimal(0)
for p in peerinfo])
self.wait_until(wait_for_availability_score)
requester = node.add_p2p_connection(AddrReceiver())
requester.send_and_ping(msg_getavaaddr())
node.setmocktime(int(time.time() + MAX_ADDR_SEND_DELAY))
# Check all the peers addresses are returned.
requester.wait_until(requester.addr_received)
addresses = requester.get_received_addrs()
assert_equal(len(addresses), len(avapeers))
expected_addresses = [avapeer.addr for avapeer in avapeers]
assert all([address in expected_addresses for address in addresses])
# Add more nodes so we reach the mininum quorum stake amount.
for _ in range(4):
avapeer = AllYesAvaP2PInterface(node)
node.add_p2p_connection(avapeer)
- self.wait_until(lambda: node.getavalancheinfo()['active'] is True)
+ self.wait_until(lambda: node.getavalancheinfo()
+ ['ready_to_poll'] is True)
def is_vote_finalized(proof):
return node.getrawavalancheproof(
f"{proof.proofid:0{64}x}").get("finalized", False)
# Wait until all proofs are finalized
self.wait_until(lambda: all([is_vote_finalized(p.proof)
for p in node.p2ps if isinstance(p, AvaP2PInterface)]))
# Go through several rounds of getavaaddr requests. We don't know for
# sure how many will be sent as it depends on whether the peers
# responded fast enough during the polling phase, but at some point a
# single outbound peer will be requested and no more.
def sent_single_getavaaddr():
total_getavaaddr = total_getavaaddr_msg()
node.mockscheduler(MAX_GETAVAADDR_DELAY)
self.wait_until(lambda: total_getavaaddr_msg()
>= total_getavaaddr + 1)
for p in avapeers:
p.sync_send_with_ping()
return total_getavaaddr_msg() == total_getavaaddr + 1
self.wait_until(sent_single_getavaaddr)
def test_send_inbound_getavaaddr_until_quorum_is_established(self):
self.log.info(
"Check we also request the inbounds until the quorum is established")
node = self.nodes[0]
self.restart_node(
0,
extra_args=self.extra_args[0] +
['-avaminquorumstake=1000000'])
- assert_equal(node.getavalancheinfo()['active'], False)
+ assert_equal(node.getavalancheinfo()['ready_to_poll'], False)
outbound = MutedAvaP2PInterface()
node.add_outbound_p2p_connection(outbound, p2p_idx=0)
inbound = MutedAvaP2PInterface()
node.add_p2p_connection(inbound)
inbound.nodeid = node.getpeerinfo()[-1]['id']
def count_getavaaddr(peers):
with p2p_lock:
return sum([peer.message_count.get("getavaaddr", 0)
for peer in peers])
# Upon connection only the outbound gets a getavaaddr message
assert_equal(count_getavaaddr([inbound]), 0)
self.wait_until(lambda: count_getavaaddr([outbound]) == 1)
# Periodic send will include the inbound as well
current_total = count_getavaaddr([inbound, outbound])
while count_getavaaddr([inbound]) == 0:
node.mockscheduler(MAX_GETAVAADDR_DELAY)
self.wait_until(lambda: count_getavaaddr(
[inbound, outbound]) > current_total)
current_total = count_getavaaddr([inbound, outbound])
# Connect the minimum amount of stake and nodes
for _ in range(8):
node.add_p2p_connection(AvaP2PInterface(node))
- self.wait_until(lambda: node.getavalancheinfo()['active'] is True)
+ self.wait_until(lambda: node.getavalancheinfo()
+ ['ready_to_poll'] is True)
# From now only the outbound is requested
count_inbound = count_getavaaddr([inbound])
for _ in range(10):
# Trigger a poll
node.generate(1)
inbound.sync_send_with_ping()
node.mockscheduler(MAX_GETAVAADDR_DELAY)
self.wait_until(lambda: count_getavaaddr(
[inbound, outbound]) > current_total)
current_total = count_getavaaddr([inbound, outbound])
assert_equal(count_getavaaddr([inbound]), count_inbound)
def run_test(self):
self.getavaaddr_interval_test()
# Limited by maxaddrtosend
self.address_test(maxaddrtosend=3, num_proof=2, num_avanode=8)
# Limited by the number of good nodes
self.address_test(maxaddrtosend=100, num_proof=2, num_avanode=8)
self.getavaaddr_outbound_test()
self.getavaaddr_manual_test()
self.getavaaddr_noquorum()
self.test_send_inbound_getavaaddr_until_quorum_is_established()
if __name__ == '__main__':
AvaAddrTest().main()
diff --git a/test/functional/abc_rpc_getavalancheinfo.py b/test/functional/abc_rpc_getavalancheinfo.py
index 3718ac320..8bb4a832c 100755
--- a/test/functional/abc_rpc_getavalancheinfo.py
+++ b/test/functional/abc_rpc_getavalancheinfo.py
@@ -1,377 +1,378 @@
#!/usr/bin/env python3
# Copyright (c) 2022 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getavalancheinfo RPC."""
import time
from decimal import Decimal
from test_framework.address import ADDRESS_ECREG_UNSPENDABLE
from test_framework.avatools import (
AvaP2PInterface,
avalanche_proof_from_hex,
create_coinbase_stakes,
gen_proof,
get_ava_p2p_interface,
wait_for_proof,
)
from test_framework.key import ECKey
from test_framework.messages import (
AvalancheProofVoteResponse,
AvalancheVote,
LegacyAvalancheProof,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, try_rpc
from test_framework.wallet_util import bytes_to_wif
class GetAvalancheInfoTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.conflicting_proof_cooldown = 100
self.extra_args = [[
'-enableavalanche=1',
'-enableavalancheproofreplacement=1',
f'-avalancheconflictingproofcooldown={self.conflicting_proof_cooldown}',
'-avaproofstakeutxoconfirmations=2',
'-avacooldown=0',
'-enableavalanchepeerdiscovery=1',
'-avaminquorumstake=250000000',
'-avaminquorumconnectedstakeratio=0.9',
]]
def run_test(self):
node = self.nodes[0]
privkey, proof = gen_proof(node)
is_legacy = isinstance(proof, LegacyAvalancheProof)
# Make the proof mature
node.generate(1)
def handle_legacy_format(expected):
# Add the payout address to the expected output if the legacy format
# is diabled
if not is_legacy and "local" in expected.keys():
expected["local"]["payout_address"] = ADDRESS_ECREG_UNSPENDABLE
return expected
def assert_avalancheinfo(expected):
assert_equal(
node.getavalancheinfo(),
handle_legacy_format(expected)
)
coinbase_amount = Decimal('25000000.00')
self.log.info("The test node has no proof")
assert_avalancheinfo({
- "active": False,
+ "ready_to_poll": False,
"network": {
"proof_count": 0,
"connected_proof_count": 0,
"dangling_proof_count": 0,
"finalized_proof_count": 0,
"conflicting_proof_count": 0,
- "orphan_proof_count": 0,
+ "immature_proof_count": 0,
"total_stake_amount": Decimal('0.00'),
"connected_stake_amount": Decimal('0.00'),
"dangling_stake_amount": Decimal('0.00'),
"node_count": 0,
"connected_node_count": 0,
"pending_node_count": 0,
}
})
self.log.info("The test node has a proof")
self.restart_node(0, self.extra_args[0] + [
'-enableavalanche=1',
'-avaproof={}'.format(proof.serialize().hex()),
'-avamasterkey={}'.format(bytes_to_wif(privkey.get_bytes()))
])
assert_avalancheinfo({
- "active": False,
+ "ready_to_poll": False,
"local": {
- "live": False,
+ "verified": False,
"proofid": f"{proof.proofid:0{64}x}",
"limited_proofid": f"{proof.limited_proofid:0{64}x}",
"master": privkey.get_pubkey().get_bytes().hex(),
"stake_amount": coinbase_amount,
},
"network": {
"proof_count": 0,
"connected_proof_count": 0,
"dangling_proof_count": 0,
"finalized_proof_count": 0,
"conflicting_proof_count": 0,
- "orphan_proof_count": 0,
+ "immature_proof_count": 0,
"total_stake_amount": Decimal('0.00'),
"connected_stake_amount": Decimal('0.00'),
"dangling_stake_amount": Decimal('0.00'),
"node_count": 0,
"connected_node_count": 0,
"pending_node_count": 0,
}
})
# Mine a block to trigger proof validation
node.generate(1)
self.wait_until(
lambda: node.getavalancheinfo() == handle_legacy_format({
- "active": False,
+ "ready_to_poll": False,
"local": {
- "live": True,
+ "verified": True,
"proofid": f"{proof.proofid:0{64}x}",
"limited_proofid": f"{proof.limited_proofid:0{64}x}",
"master": privkey.get_pubkey().get_bytes().hex(),
"stake_amount": coinbase_amount,
},
"network": {
- "proof_count": 0,
- "connected_proof_count": 0,
+ "proof_count": 1,
+ "connected_proof_count": 1,
"dangling_proof_count": 0,
"finalized_proof_count": 0,
"conflicting_proof_count": 0,
- "orphan_proof_count": 0,
- "total_stake_amount": Decimal('0.00'),
- "connected_stake_amount": Decimal('0.00'),
+ "immature_proof_count": 0,
+ "total_stake_amount": coinbase_amount,
+ "connected_stake_amount": coinbase_amount,
"dangling_stake_amount": Decimal('0.00'),
- "node_count": 0,
- "connected_node_count": 0,
+ "node_count": 1,
+ "connected_node_count": 1,
"pending_node_count": 0,
}
})
)
self.log.info("Connect a bunch of peers and nodes")
mock_time = int(time.time())
node.setmocktime(mock_time)
privkeys = []
proofs = []
conflicting_proofs = []
quorum = []
N = 13
for _ in range(N):
_privkey, _proof = gen_proof(node)
proofs.append(_proof)
privkeys.append(_privkey)
# For each proof, also make a conflicting one
stakes = create_coinbase_stakes(
node, [node.getbestblockhash()], node.get_deterministic_priv_key().key)
conflicting_proof_hex = node.buildavalancheproof(
10, 9999, bytes_to_wif(_privkey.get_bytes()), stakes)
conflicting_proof = avalanche_proof_from_hex(conflicting_proof_hex)
conflicting_proofs.append(conflicting_proof)
# Make the proof and its conflicting proof mature
node.generate(1)
n = AvaP2PInterface()
n.proof = _proof
n.master_privkey = _privkey
node.add_p2p_connection(n)
quorum.append(n)
n.send_avaproof(_proof)
wait_for_proof(node, f"{_proof.proofid:0{64}x}")
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
n.send_avaproof(conflicting_proof)
# Generate an orphan (immature) proof
_, orphan_proof = gen_proof(node)
n.send_avaproof(orphan_proof)
self.wait_until(
lambda: node.getavalancheinfo() == handle_legacy_format({
- "active": True,
+ "ready_to_poll": True,
"local": {
- "live": True,
+ "verified": True,
"proofid": f"{proof.proofid:0{64}x}",
"limited_proofid": f"{proof.limited_proofid:0{64}x}",
"master": privkey.get_pubkey().get_bytes().hex(),
"stake_amount": coinbase_amount,
},
"network": {
- "proof_count": N,
- "connected_proof_count": N,
+ "proof_count": N + 1,
+ "connected_proof_count": N + 1,
"dangling_proof_count": 0,
"finalized_proof_count": 0,
"conflicting_proof_count": N,
- "orphan_proof_count": 1,
- "total_stake_amount": coinbase_amount * N,
- "connected_stake_amount": coinbase_amount * N,
+ "immature_proof_count": 1,
+ "total_stake_amount": coinbase_amount * (N + 1),
+ "connected_stake_amount": coinbase_amount * (N + 1),
"dangling_stake_amount": Decimal('0.00'),
- "node_count": N,
- "connected_node_count": N,
+ "node_count": N + 1,
+ "connected_node_count": N + 1,
"pending_node_count": 0,
}
})
)
self.log.info("Disconnect some nodes")
D = 3
for _ in range(D):
n = node.p2ps.pop()
n.peer_disconnect()
n.wait_for_disconnect()
self.wait_until(
lambda: node.getavalancheinfo() == handle_legacy_format({
- "active": True,
+ "ready_to_poll": True,
"local": {
- "live": True,
+ "verified": True,
"proofid": f"{proof.proofid:0{64}x}",
"limited_proofid": f"{proof.limited_proofid:0{64}x}",
"master": privkey.get_pubkey().get_bytes().hex(),
"stake_amount": coinbase_amount,
},
"network": {
- "proof_count": N,
- "connected_proof_count": N - D,
+ "proof_count": N + 1,
+ "connected_proof_count": N - D + 1,
"dangling_proof_count": D,
"finalized_proof_count": 0,
"conflicting_proof_count": N,
- "orphan_proof_count": 1,
- "total_stake_amount": coinbase_amount * N,
- "connected_stake_amount": coinbase_amount * (N - D),
+ "immature_proof_count": 1,
+ "total_stake_amount": coinbase_amount * (N + 1),
+ "connected_stake_amount": coinbase_amount * (N + 1 - D),
"dangling_stake_amount": coinbase_amount * D,
- "node_count": N - D,
- "connected_node_count": N - D,
+ "node_count": N + 1 - D,
+ "connected_node_count": N + 1 - D,
"pending_node_count": 0,
}
})
)
self.log.info("Add some pending nodes")
P = 3
for _ in range(P):
dg_priv = ECKey()
dg_priv.generate()
dg_pub = dg_priv.get_pubkey().get_bytes().hex()
_privkey, _proof = gen_proof(node)
# Make the proof mature
node.generate(1)
delegation = node.delegateavalancheproof(
f"{_proof.limited_proofid:0{64}x}",
bytes_to_wif(_privkey.get_bytes()),
dg_pub,
None
)
n = get_ava_p2p_interface(node)
n.send_avahello(delegation, dg_priv)
# Make sure we completed at least one time the ProcessMessage or we
# might miss the last pending node for the following assert
n.sync_with_ping()
assert_avalancheinfo({
- "active": True,
+ "ready_to_poll": True,
"local": {
- "live": True,
+ "verified": True,
"proofid": f"{proof.proofid:0{64}x}",
"limited_proofid": f"{proof.limited_proofid:0{64}x}",
"master": privkey.get_pubkey().get_bytes().hex(),
"stake_amount": coinbase_amount,
},
"network": {
# Orphan became mature
- "proof_count": N + 1,
- "connected_proof_count": N - D,
+ "proof_count": N + 2,
+ "connected_proof_count": N + 1 - D,
"dangling_proof_count": D + 1,
"finalized_proof_count": 0,
"conflicting_proof_count": N,
- "orphan_proof_count": 0,
- "total_stake_amount": coinbase_amount * (N + 1),
- "connected_stake_amount": coinbase_amount * (N - D),
+ "immature_proof_count": 0,
+ "total_stake_amount": coinbase_amount * (N + 2),
+ "connected_stake_amount": coinbase_amount * (N + 1 - D),
"dangling_stake_amount": coinbase_amount * (D + 1),
- "node_count": N - D + P,
- "connected_node_count": N - D,
+ "node_count": N + 1 - D + P,
+ "connected_node_count": N + 1 - D,
"pending_node_count": P,
}
})
self.log.info("Finalize the proofs for some peers")
def vote_for_all_proofs():
for i, n in enumerate(quorum):
if not n.is_connected:
continue
poll = n.get_avapoll_if_available()
# That node has not received a poll
if poll is None:
continue
# Respond yes to all polls except the conflicting proofs
votes = []
for inv in poll.invs:
response = AvalancheProofVoteResponse.ACTIVE
if inv.hash in [p.proofid for p in conflicting_proofs]:
response = AvalancheProofVoteResponse.REJECTED
votes.append(AvalancheVote(response, inv.hash))
n.send_avaresponse(poll.round, votes, privkeys[i])
# Check if all proofs are finalized or invalidated
return all(
[node.getrawavalancheproof(f"{p.proofid:0{64}x}").get("finalized", False) for p in proofs] +
[try_rpc(-8, "Proof not found", node.getrawavalancheproof,
f"{c.proofid:0{64}x}") for c in conflicting_proofs]
)
# Vote until proofs have finalized
expected_logs = []
for p in proofs:
expected_logs.append(
f"Avalanche finalized proof {p.proofid:0{64}x}")
with node.assert_debug_log(expected_logs):
self.wait_until(lambda: vote_for_all_proofs())
- self.log.info("Disconnect all the nodes")
+ self.log.info(
+ "Disconnect all the nodes, so we are the only node left on the network")
node.disconnect_p2ps()
assert_avalancheinfo({
- "active": False,
+ "ready_to_poll": False,
"local": {
- "live": True,
+ "verified": True,
"proofid": f"{proof.proofid:0{64}x}",
"limited_proofid": f"{proof.limited_proofid:0{64}x}",
"master": privkey.get_pubkey().get_bytes().hex(),
"stake_amount": coinbase_amount,
},
"network": {
- "proof_count": N + 1,
- "connected_proof_count": 0,
+ "proof_count": N + 2,
+ "connected_proof_count": 1,
"dangling_proof_count": N + 1,
"finalized_proof_count": N + 1,
"conflicting_proof_count": 0,
- "orphan_proof_count": 0,
- "total_stake_amount": coinbase_amount * (N + 1),
- "connected_stake_amount": 0,
+ "immature_proof_count": 0,
+ "total_stake_amount": coinbase_amount * (N + 2),
+ "connected_stake_amount": coinbase_amount,
"dangling_stake_amount": coinbase_amount * (N + 1),
- "node_count": 0,
- "connected_node_count": 0,
+ "node_count": 1,
+ "connected_node_count": 1,
"pending_node_count": 0,
}
})
if __name__ == '__main__':
GetAvalancheInfoTest().main()
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Tue, May 13, 01:48 (1 d, 20 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5777072
Default Alt Text
(107 KB)
Attached To
rABC Bitcoin ABC
Event Timeline
Log In to Comment