diff --git a/src/base58.cpp b/src/base58.cpp index dfcff447b..4f05afe9e 100644 --- a/src/base58.cpp +++ b/src/base58.cpp @@ -1,189 +1,189 @@ // Copyright (c) 2014-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include /** All alphanumeric characters except for "0", "I", "O", and "l" */ static const char *pszBase58 = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"; static const int8_t mapBase58[256] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, }; bool DecodeBase58(const char *psz, std::vector &vch, int max_ret_len) { // Skip leading spaces. while (*psz && IsSpace(*psz)) { psz++; } // Skip and count leading '1's. int zeroes = 0; int length = 0; while (*psz == '1') { zeroes++; if (zeroes > max_ret_len) { return false; } psz++; } // Allocate enough space in big-endian base256 representation. // log(58) / log(256), rounded up. int size = strlen(psz) * 733 / 1000 + 1; std::vector b256(size); // Process the characters. // guarantee not out of range static_assert(sizeof(mapBase58) / sizeof(mapBase58[0]) == 256, "mapBase58.size() should be 256"); while (*psz && !IsSpace(*psz)) { // Decode base58 character int carry = mapBase58[(uint8_t)*psz]; // Invalid b58 character if (carry == -1) { return false; } int i = 0; for (std::vector::reverse_iterator it = b256.rbegin(); (carry != 0 || i < length) && (it != b256.rend()); ++it, ++i) { carry += 58 * (*it); *it = carry % 256; carry /= 256; } assert(carry == 0); length = i; if (length + zeroes > max_ret_len) { return false; } psz++; } // Skip trailing spaces. while (IsSpace(*psz)) { psz++; } if (*psz != 0) { return false; } // Skip leading zeroes in b256. std::vector::iterator it = b256.begin() + (size - length); // Copy result into output vector. vch.reserve(zeroes + (b256.end() - it)); vch.assign(zeroes, 0x00); while (it != b256.end()) { vch.push_back(*(it++)); } return true; } std::string EncodeBase58(const uint8_t *pbegin, const uint8_t *pend) { // Skip & count leading zeroes. int zeroes = 0; int length = 0; while (pbegin != pend && *pbegin == 0) { pbegin++; zeroes++; } // Allocate enough space in big-endian base58 representation. // log(256) / log(58), rounded up. int size = (pend - pbegin) * 138 / 100 + 1; std::vector b58(size); // Process the bytes. while (pbegin != pend) { int carry = *pbegin; int i = 0; // Apply "b58 = b58 * 256 + ch". for (std::vector::reverse_iterator it = b58.rbegin(); (carry != 0 || i < length) && (it != b58.rend()); it++, i++) { carry += 256 * (*it); *it = carry % 58; carry /= 58; } assert(carry == 0); length = i; pbegin++; } // Skip leading zeroes in base58 result. std::vector::iterator it = b58.begin() + (size - length); while (it != b58.end() && *it == 0) { it++; } // Translate the result into a string. std::string str; str.reserve(zeroes + (b58.end() - it)); str.assign(zeroes, '1'); while (it != b58.end()) { str += pszBase58[*(it++)]; } return str; } std::string EncodeBase58(const std::vector &vch) { return EncodeBase58(vch.data(), vch.data() + vch.size()); } bool DecodeBase58(const std::string &str, std::vector &vchRet, int max_ret_len) { if (!ValidAsCString(str)) { return false; } return DecodeBase58(str.c_str(), vchRet, max_ret_len); } std::string EncodeBase58Check(const std::vector &vchIn) { // add 4-byte hash check to the end std::vector vch(vchIn); - uint256 hash = Hash(vch.begin(), vch.end()); + uint256 hash = Hash(vch); vch.insert(vch.end(), (uint8_t *)&hash, (uint8_t *)&hash + 4); return EncodeBase58(vch); } bool DecodeBase58Check(const char *psz, std::vector &vchRet, int max_ret_len) { if (!DecodeBase58(psz, vchRet, max_ret_len > std::numeric_limits::max() - 4 ? std::numeric_limits::max() : max_ret_len + 4) || (vchRet.size() < 4)) { vchRet.clear(); return false; } // re-calculate the checksum, ensure it matches the included 4-byte checksum - uint256 hash = Hash(vchRet.begin(), vchRet.end() - 4); + uint256 hash = Hash(MakeSpan(vchRet).first(vchRet.size() - 4)); if (memcmp(&hash, &vchRet[vchRet.size() - 4], 4) != 0) { vchRet.clear(); return false; } vchRet.resize(vchRet.size() - 4); return true; } bool DecodeBase58Check(const std::string &str, std::vector &vchRet, int max_ret) { if (!ValidAsCString(str)) { return false; } return DecodeBase58Check(str.c_str(), vchRet, max_ret); } diff --git a/src/hash.h b/src/hash.h index d92b9c0be..d4acebaca 100644 --- a/src/hash.h +++ b/src/hash.h @@ -1,210 +1,187 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_HASH_H #define BITCOIN_HASH_H #include #include #include #include #include #include #include #include typedef uint256 ChainCode; /** A hasher class for Bitcoin's 256-bit hash (double SHA-256). */ class CHash256 { private: CSHA256 sha; public: static const size_t OUTPUT_SIZE = CSHA256::OUTPUT_SIZE; void Finalize(Span output) { assert(output.size() == OUTPUT_SIZE); uint8_t buf[CSHA256::OUTPUT_SIZE]; sha.Finalize(buf); sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data()); } CHash256 &Write(Span input) { sha.Write(input.data(), input.size()); return *this; } CHash256 &Reset() { sha.Reset(); return *this; } }; /** A hasher class for Bitcoin's 160-bit hash (SHA-256 + RIPEMD-160). */ class CHash160 { private: CSHA256 sha; public: static const size_t OUTPUT_SIZE = CRIPEMD160::OUTPUT_SIZE; void Finalize(Span output) { assert(output.size() == OUTPUT_SIZE); uint8_t buf[CSHA256::OUTPUT_SIZE]; sha.Finalize(buf); CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data()); } CHash160 &Write(Span input) { sha.Write(input.data(), input.size()); return *this; } CHash160 &Reset() { sha.Reset(); return *this; } }; /** Compute the 256-bit hash of an object. */ -template inline uint256 Hash(const T1 pbegin, const T1 pend) { - static const uint8_t pblank[1] = {}; +template inline uint256 Hash(const T &in1) { uint256 result; - CHash256() - .Write({pbegin == pend ? pblank : (const uint8_t *)&pbegin[0], - (pend - pbegin) * sizeof(pbegin[0])}) - .Finalize(result); + CHash256().Write(MakeUCharSpan(in1)).Finalize(result); return result; } /** Compute the 256-bit hash of the concatenation of two objects. */ template -inline uint256 Hash(const T1 p1begin, const T1 p1end, const T2 p2begin, - const T2 p2end) { - static const uint8_t pblank[1] = {}; +inline uint256 Hash(const T1 &in1, const T2 &in2) { uint256 result; CHash256() - .Write({p1begin == p1end ? pblank : (const uint8_t *)&p1begin[0], - (p1end - p1begin) * sizeof(p1begin[0])}) - .Write({p2begin == p2end ? pblank : (const uint8_t *)&p2begin[0], - (p2end - p2begin) * sizeof(p2begin[0])}) + .Write(MakeUCharSpan(in1)) + .Write(MakeUCharSpan(in2)) .Finalize(result); return result; } /** Compute the 160-bit hash an object. */ -template inline uint160 Hash160(const T1 pbegin, const T1 pend) { - static uint8_t pblank[1] = {}; +template inline uint160 Hash160(const T1 &in1) { uint160 result; - CHash160() - .Write({pbegin == pend ? pblank : (const uint8_t *)&pbegin[0], - (pend - pbegin) * sizeof(pbegin[0])}) - .Finalize(result); + CHash160().Write(MakeUCharSpan(in1)).Finalize(result); return result; } -/** Compute the 160-bit hash of a vector. */ -inline uint160 Hash160(const std::vector &vch) { - return Hash160(vch.begin(), vch.end()); -} - -/** Compute the 160-bit hash of a vector. */ -template -inline uint160 Hash160(const prevector &vch) { - return Hash160(vch.begin(), vch.end()); -} - /** A writer stream (for serialization) that computes a 256-bit hash. */ class CHashWriter { private: CHash256 ctx; const int nType; const int nVersion; public: CHashWriter(int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn) {} int GetType() const { return nType; } int GetVersion() const { return nVersion; } void write(const char *pch, size_t size) { ctx.Write({(const uint8_t *)pch, size}); } // invalidates the object uint256 GetHash() { uint256 result; ctx.Finalize(result); return result; } /** * Returns the first 64 bits from the resulting hash. */ inline uint64_t GetCheapHash() { uint8_t result[CHash256::OUTPUT_SIZE]; ctx.Finalize(result); return ReadLE64(result); } template CHashWriter &operator<<(const T &obj) { // Serialize to this stream ::Serialize(*this, obj); return (*this); } }; /** * Reads data from an underlying stream, while hashing the read data. */ template class CHashVerifier : public CHashWriter { private: Source *source; public: explicit CHashVerifier(Source *source_) : CHashWriter(source_->GetType(), source_->GetVersion()), source(source_) {} void read(char *pch, size_t nSize) { source->read(pch, nSize); this->write(pch, nSize); } void ignore(size_t nSize) { char data[1024]; while (nSize > 0) { size_t now = std::min(nSize, 1024); read(data, now); nSize -= now; } } template CHashVerifier &operator>>(T &&obj) { // Unserialize from this stream ::Unserialize(*this, obj); return (*this); } }; /** Compute the 256-bit hash of an object's serialization. */ template uint256 SerializeHash(const T &obj, int nType = SER_GETHASH, int nVersion = PROTOCOL_VERSION) { CHashWriter ss(nType, nVersion); ss << obj; return ss.GetHash(); } uint32_t MurmurHash3(uint32_t nHashSeed, Span vDataToHash); void BIP32Hash(const ChainCode &chainCode, uint32_t nChild, uint8_t header, const uint8_t data[32], uint8_t output[64]); #endif // BITCOIN_HASH_H diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp index 5ea38e742..ff47ce38b 100644 --- a/src/merkleblock.cpp +++ b/src/merkleblock.cpp @@ -1,215 +1,215 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include CMerkleBlock::CMerkleBlock(const CBlock &block, CBloomFilter *filter, const std::set *txids) { header = block.GetBlockHeader(); std::vector vMatch; std::vector vHashes; vMatch.reserve(block.vtx.size()); vHashes.reserve(block.vtx.size()); if (filter) { for (const auto &tx : block.vtx) { vMatch.push_back(filter->MatchAndInsertOutputs(*tx)); } } for (size_t i = 0; i < block.vtx.size(); i++) { const CTransaction *tx = block.vtx[i].get(); const TxId &txid = tx->GetId(); if (filter) { if (!vMatch[i]) { vMatch[i] = filter->MatchInputs(*tx); } if (vMatch[i]) { vMatchedTxn.push_back(std::make_pair(i, txid)); } } else { vMatch.push_back(txids && txids->count(txid)); } vHashes.push_back(txid); } txn = CPartialMerkleTree(vHashes, vMatch); } uint256 CPartialMerkleTree::CalcHash(int height, size_t pos, const std::vector &vTxid) { // we can never have zero txs in a merkle block, we always need the // coinbase tx if we do not have this assert, we can hit a memory // access violation when indexing into vTxid assert(vTxid.size() != 0); if (height == 0) { // hash at height 0 is the txids themself. return vTxid[pos]; } // Calculate left hash. uint256 left = CalcHash(height - 1, pos * 2, vTxid), right; // Calculate right hash if not beyond the end of the array - copy left hash // otherwise. if (pos * 2 + 1 < CalcTreeWidth(height - 1)) { right = CalcHash(height - 1, pos * 2 + 1, vTxid); } else { right = left; } // Combine subhashes. - return Hash(left.begin(), left.end(), right.begin(), right.end()); + return Hash(left, right); } void CPartialMerkleTree::TraverseAndBuild(int height, size_t pos, const std::vector &vTxid, const std::vector &vMatch) { // Determine whether this node is the parent of at least one matched txid. bool fParentOfMatch = false; for (size_t p = pos << height; p < (pos + 1) << height && p < nTransactions; p++) { fParentOfMatch |= vMatch[p]; } // Store as flag bit. vBits.push_back(fParentOfMatch); if (height == 0 || !fParentOfMatch) { // If at height 0, or nothing interesting below, store hash and stop. vHash.push_back(CalcHash(height, pos, vTxid)); } else { // Otherwise, don't store any hash, but descend into the subtrees. TraverseAndBuild(height - 1, pos * 2, vTxid, vMatch); if (pos * 2 + 1 < CalcTreeWidth(height - 1)) { TraverseAndBuild(height - 1, pos * 2 + 1, vTxid, vMatch); } } } uint256 CPartialMerkleTree::TraverseAndExtract(int height, size_t pos, size_t &nBitsUsed, size_t &nHashUsed, std::vector &vMatch, std::vector &vnIndex) { if (nBitsUsed >= vBits.size()) { // Overflowed the bits array - failure fBad = true; return uint256(); } bool fParentOfMatch = vBits[nBitsUsed++]; if (height == 0 || !fParentOfMatch) { // If at height 0, or nothing interesting below, use stored hash and do // not descend. if (nHashUsed >= vHash.size()) { // Overflowed the hash array - failure fBad = true; return uint256(); } const uint256 &hash = vHash[nHashUsed++]; // In case of height 0, we have a matched txid. if (height == 0 && fParentOfMatch) { vMatch.push_back(hash); vnIndex.push_back(pos); } return hash; } // Otherwise, descend into the subtrees to extract matched txids and hashes. uint256 left = TraverseAndExtract(height - 1, pos * 2, nBitsUsed, nHashUsed, vMatch, vnIndex), right; if (pos * 2 + 1 < CalcTreeWidth(height - 1)) { right = TraverseAndExtract(height - 1, pos * 2 + 1, nBitsUsed, nHashUsed, vMatch, vnIndex); if (right == left) { // The left and right branches should never be identical, as the // transaction hashes covered by them must each be unique. fBad = true; } } else { right = left; } // and combine them before returning. - return Hash(left.begin(), left.end(), right.begin(), right.end()); + return Hash(left, right); } CPartialMerkleTree::CPartialMerkleTree(const std::vector &vTxid, const std::vector &vMatch) : nTransactions(vTxid.size()), fBad(false) { // reset state vBits.clear(); vHash.clear(); // calculate height of tree int nHeight = 0; while (CalcTreeWidth(nHeight) > 1) { nHeight++; } // traverse the partial tree TraverseAndBuild(nHeight, 0, vTxid, vMatch); } CPartialMerkleTree::CPartialMerkleTree() : nTransactions(0), fBad(true) {} uint256 CPartialMerkleTree::ExtractMatches(std::vector &vMatch, std::vector &vnIndex) { vMatch.clear(); // An empty set will not work if (nTransactions == 0) { return uint256(); } // Check for excessively high numbers of transactions. // FIXME: Track the maximum block size we've seen and use it here. // There can never be more hashes provided than one for every txid. if (vHash.size() > nTransactions) { return uint256(); } // There must be at least one bit per node in the partial tree, and at least // one node per hash. if (vBits.size() < vHash.size()) { return uint256(); } // calculate height of tree. int nHeight = 0; while (CalcTreeWidth(nHeight) > 1) { nHeight++; } // traverse the partial tree. size_t nBitsUsed = 0, nHashUsed = 0; uint256 hashMerkleRoot = TraverseAndExtract(nHeight, 0, nBitsUsed, nHashUsed, vMatch, vnIndex); // verify that no problems occurred during the tree traversal. if (fBad) { return uint256(); } // verify that all bits were consumed (except for the padding caused by // serializing it as a byte sequence) if ((nBitsUsed + 7) / 8 != (vBits.size() + 7) / 8) { return uint256(); } // verify that all hashes were consumed. if (nHashUsed != vHash.size()) { return uint256(); } return hashMerkleRoot; } diff --git a/src/net.cpp b/src/net.cpp index b42b98dfe..38ac92761 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1,3186 +1,3186 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef WIN32 #include #else #include #endif #ifdef USE_POLL #include #endif #ifdef USE_UPNP #include #include #include // The minimum supported miniUPnPc API version is set to 10. This keeps // compatibility with Ubuntu 16.04 LTS and Debian 8 libminiupnpc-dev packages. static_assert(MINIUPNPC_API_VERSION >= 10, "miniUPnPc API version >= 10 assumed"); #endif #include #include // How often to dump addresses to peers.dat static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15}; /** * Number of DNS seeds to query when the number of connections is low. */ static constexpr int DNSSEEDS_TO_QUERY_AT_ONCE = 3; /** * How long to delay before querying DNS seeds * * If we have more than THRESHOLD entries in addrman, then it's likely * that we got those addresses from having previously connected to the P2P * network, and that we'll be able to successfully reconnect to the P2P * network via contacting one of them. So if that's the case, spend a * little longer trying to connect to known peers before querying the * DNS seeds. */ static constexpr std::chrono::seconds DNSSEEDS_DELAY_FEW_PEERS{11}; static constexpr std::chrono::minutes DNSSEEDS_DELAY_MANY_PEERS{5}; // "many" vs "few" peers static constexpr int DNSSEEDS_DELAY_PEER_THRESHOLD = 1000; // We add a random period time (0 to 1 seconds) to feeler connections to prevent // synchronization. #define FEELER_SLEEP_WINDOW 1 // MSG_NOSIGNAL is not available on some platforms, if it doesn't exist define // it as 0 #if !defined(MSG_NOSIGNAL) #define MSG_NOSIGNAL 0 #endif // MSG_DONTWAIT is not available on some platforms, if it doesn't exist define // it as 0 #if !defined(MSG_DONTWAIT) #define MSG_DONTWAIT 0 #endif /** Used to pass flags to the Bind() function */ enum BindFlags { BF_NONE = 0, BF_EXPLICIT = (1U << 0), BF_REPORT_ERROR = (1U << 1), }; // The set of sockets cannot be modified while waiting // The sleep time needs to be small to avoid new sockets stalling static const uint64_t SELECT_TIMEOUT_MILLISECONDS = 50; const std::string NET_MESSAGE_COMMAND_OTHER = "*other*"; // SHA256("netgroup")[0:8] static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("localhostnonce")[0:8] static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // SHA256("localhostnonce")[8:16] static const uint64_t RANDOMIZER_ID_EXTRAENTROPY = 0x94b05d41679a4ff7ULL; // // Global state variables // bool fDiscover = true; bool fListen = true; bool g_relay_txes = !DEFAULT_BLOCKSONLY; RecursiveMutex cs_mapLocalHost; std::map mapLocalHost GUARDED_BY(cs_mapLocalHost); static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {}; void CConnman::AddAddrFetch(const std::string &strDest) { LOCK(m_addr_fetches_mutex); m_addr_fetches.push_back(strDest); } unsigned short GetListenPort() { return (unsigned short)(gArgs.GetArg("-port", Params().GetDefaultPort())); } // find 'best' local address for a particular peer bool GetLocal(CService &addr, const CNetAddr *paddrPeer) { if (!fListen) { return false; } int nBestScore = -1; int nBestReachability = -1; { LOCK(cs_mapLocalHost); for (const auto &entry : mapLocalHost) { int nScore = entry.second.nScore; int nReachability = entry.first.GetReachabilityFrom(paddrPeer); if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) { addr = CService(entry.first, entry.second.nPort); nBestReachability = nReachability; nBestScore = nScore; } } } return nBestScore >= 0; } //! Convert the pnSeed6 array into usable address objects. static std::vector convertSeed6(const std::vector &vSeedsIn) { // It'll only connect to one or two seed nodes because once it connects, // it'll get a pile of addresses with newer timestamps. Seed nodes are given // a random 'last seen time' of between one and two weeks ago. const int64_t nOneWeek = 7 * 24 * 60 * 60; std::vector vSeedsOut; vSeedsOut.reserve(vSeedsIn.size()); FastRandomContext rng; for (const auto &seed_in : vSeedsIn) { struct in6_addr ip; memcpy(&ip, seed_in.addr, sizeof(ip)); CAddress addr(CService(ip, seed_in.port), GetDesirableServiceFlags(NODE_NONE)); addr.nTime = GetTime() - rng.randrange(nOneWeek) - nOneWeek; vSeedsOut.push_back(addr); } return vSeedsOut; } // Get best local address for a particular peer as a CAddress. Otherwise, return // the unroutable 0.0.0.0 but filled in with the normal parameters, since the IP // may be changed to a useful one by discovery. CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices) { CAddress ret(CService(CNetAddr(), GetListenPort()), nLocalServices); CService addr; if (GetLocal(addr, paddrPeer)) { ret = CAddress(addr, nLocalServices); } ret.nTime = GetAdjustedTime(); return ret; } static int GetnScore(const CService &addr) { LOCK(cs_mapLocalHost); if (mapLocalHost.count(addr) == 0) { return 0; } return mapLocalHost[addr].nScore; } // Is our peer's addrLocal potentially useful as an external IP source? bool IsPeerAddrLocalGood(CNode *pnode) { CService addrLocal = pnode->GetAddrLocal(); return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() && IsReachable(addrLocal.GetNetwork()); } // Pushes our own address to a peer. void AdvertiseLocal(CNode *pnode) { if (fListen && pnode->fSuccessfullyConnected) { CAddress addrLocal = GetLocalAddress(&pnode->addr, pnode->GetLocalServices()); if (gArgs.GetBoolArg("-addrmantest", false)) { // use IPv4 loopback during addrmantest addrLocal = CAddress(CService(LookupNumeric("127.0.0.1", GetListenPort())), pnode->GetLocalServices()); } // If discovery is enabled, sometimes give our peer the address it // tells us that it sees us as in case it has a better idea of our // address than we do. FastRandomContext rng; if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() || rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0)) { addrLocal.SetIP(pnode->GetAddrLocal()); } if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false)) { LogPrint(BCLog::NET, "AdvertiseLocal: advertising address %s\n", addrLocal.ToString()); pnode->PushAddress(addrLocal, rng); } } } // Learn a new local address. bool AddLocal(const CService &addr, int nScore) { if (!addr.IsRoutable()) { return false; } if (!fDiscover && nScore < LOCAL_MANUAL) { return false; } if (!IsReachable(addr)) { return false; } LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore); { LOCK(cs_mapLocalHost); bool fAlready = mapLocalHost.count(addr) > 0; LocalServiceInfo &info = mapLocalHost[addr]; if (!fAlready || nScore >= info.nScore) { info.nScore = nScore + (fAlready ? 1 : 0); info.nPort = addr.GetPort(); } } return true; } bool AddLocal(const CNetAddr &addr, int nScore) { return AddLocal(CService(addr, GetListenPort()), nScore); } void RemoveLocal(const CService &addr) { LOCK(cs_mapLocalHost); LogPrintf("RemoveLocal(%s)\n", addr.ToString()); mapLocalHost.erase(addr); } void SetReachable(enum Network net, bool reachable) { if (net == NET_UNROUTABLE || net == NET_INTERNAL) { return; } LOCK(cs_mapLocalHost); vfLimited[net] = !reachable; } bool IsReachable(enum Network net) { LOCK(cs_mapLocalHost); return !vfLimited[net]; } bool IsReachable(const CNetAddr &addr) { return IsReachable(addr.GetNetwork()); } /** vote for a local address */ bool SeenLocal(const CService &addr) { LOCK(cs_mapLocalHost); if (mapLocalHost.count(addr) == 0) { return false; } mapLocalHost[addr].nScore++; return true; } /** check whether a given address is potentially local */ bool IsLocal(const CService &addr) { LOCK(cs_mapLocalHost); return mapLocalHost.count(addr) > 0; } CNode *CConnman::FindNode(const CNetAddr &ip) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (static_cast(pnode->addr) == ip) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const CSubNet &subNet) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (subNet.Match(static_cast(pnode->addr))) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const std::string &addrName) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (pnode->GetAddrName() == addrName) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const CService &addr) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (static_cast(pnode->addr) == addr) { return pnode; } } return nullptr; } bool CConnman::CheckIncomingNonce(uint64_t nonce) { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (!pnode->fSuccessfullyConnected && !pnode->IsInboundConn() && pnode->GetLocalNonce() == nonce) { return false; } } return true; } /** Get the bind address for a socket as CAddress */ static CAddress GetBindAddress(SOCKET sock) { CAddress addr_bind; struct sockaddr_storage sockaddr_bind; socklen_t sockaddr_bind_len = sizeof(sockaddr_bind); if (sock != INVALID_SOCKET) { if (!getsockname(sock, (struct sockaddr *)&sockaddr_bind, &sockaddr_bind_len)) { addr_bind.SetSockAddr((const struct sockaddr *)&sockaddr_bind); } else { LogPrint(BCLog::NET, "Warning: getsockname failed\n"); } } return addr_bind; } CNode *CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type) { assert(conn_type != ConnectionType::INBOUND); if (pszDest == nullptr) { if (IsLocal(addrConnect)) { return nullptr; } // Look for an existing connection CNode *pnode = FindNode(static_cast(addrConnect)); if (pnode) { LogPrintf("Failed to open new connection, already connected\n"); return nullptr; } } /// debug print LogPrint(BCLog::NET, "trying connection %s lastseen=%.1fhrs\n", pszDest ? pszDest : addrConnect.ToString(), pszDest ? 0.0 : (double)(GetAdjustedTime() - addrConnect.nTime) / 3600.0); // Resolve const int default_port = Params().GetDefaultPort(); if (pszDest) { std::vector resolved; if (Lookup(pszDest, resolved, default_port, fNameLookup && !HaveNameProxy(), 256) && !resolved.empty()) { addrConnect = CAddress(resolved[GetRand(resolved.size())], NODE_NONE); if (!addrConnect.IsValid()) { LogPrint(BCLog::NET, "Resolver returned invalid address %s for %s\n", addrConnect.ToString(), pszDest); return nullptr; } // It is possible that we already have a connection to the IP/port // pszDest resolved to. In that case, drop the connection that was // just created, and return the existing CNode instead. Also store // the name we used to connect in that CNode, so that future // FindNode() calls to that name catch this early. LOCK(cs_vNodes); CNode *pnode = FindNode(static_cast(addrConnect)); if (pnode) { pnode->MaybeSetAddrName(std::string(pszDest)); LogPrintf("Failed to open new connection, already connected\n"); return nullptr; } } } // Connect bool connected = false; SOCKET hSocket = INVALID_SOCKET; proxyType proxy; if (addrConnect.IsValid()) { bool proxyConnectionFailed = false; if (GetProxy(addrConnect.GetNetwork(), proxy)) { hSocket = CreateSocket(proxy.proxy); if (hSocket == INVALID_SOCKET) { return nullptr; } connected = ConnectThroughProxy( proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), hSocket, nConnectTimeout, proxyConnectionFailed); } else { // no proxy needed (none set for target network) hSocket = CreateSocket(addrConnect); if (hSocket == INVALID_SOCKET) { return nullptr; } connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, conn_type == ConnectionType::MANUAL); } if (!proxyConnectionFailed) { // If a connection to the node was attempted, and failure (if any) // is not caused by a problem connecting to the proxy, mark this as // an attempt. addrman.Attempt(addrConnect, fCountFailure); } } else if (pszDest && GetNameProxy(proxy)) { hSocket = CreateSocket(proxy.proxy); if (hSocket == INVALID_SOCKET) { return nullptr; } std::string host; int port = default_port; SplitHostPort(std::string(pszDest), port, host); bool proxyConnectionFailed; connected = ConnectThroughProxy(proxy, host, port, hSocket, nConnectTimeout, proxyConnectionFailed); } if (!connected) { CloseSocket(hSocket); return nullptr; } // Add node NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE) .Write(id) .Finalize(); uint64_t extra_entropy = GetDeterministicRandomizer(RANDOMIZER_ID_EXTRAENTROPY) .Write(id) .Finalize(); CAddress addr_bind = GetBindAddress(hSocket); CNode *pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, extra_entropy, addr_bind, pszDest ? pszDest : "", conn_type); pnode->AddRef(); // We're making a new connection, harvest entropy from the time (and our // peer count) RandAddEvent(uint32_t(id)); return pnode; } void CNode::CloseSocketDisconnect() { fDisconnect = true; LOCK(cs_hSocket); if (hSocket != INVALID_SOCKET) { LogPrint(BCLog::NET, "disconnecting peer=%d\n", id); CloseSocket(hSocket); } } void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags &flags, const CNetAddr &addr) const { for (const auto &subnet : vWhitelistedRange) { if (subnet.m_subnet.Match(addr)) { NetPermissions::AddFlag(flags, subnet.m_flags); } } } std::string CNode::GetAddrName() const { LOCK(cs_addrName); return addrName; } void CNode::MaybeSetAddrName(const std::string &addrNameIn) { LOCK(cs_addrName); if (addrName.empty()) { addrName = addrNameIn; } } CService CNode::GetAddrLocal() const { LOCK(cs_addrLocal); return addrLocal; } void CNode::SetAddrLocal(const CService &addrLocalIn) { LOCK(cs_addrLocal); if (addrLocal.IsValid()) { error("Addr local already set for node: %i. Refusing to change from %s " "to %s", id, addrLocal.ToString(), addrLocalIn.ToString()); } else { addrLocal = addrLocalIn; } } void CNode::copyStats(CNodeStats &stats, const std::vector &m_asmap) { stats.nodeid = this->GetId(); stats.nServices = nServices; stats.addr = addr; stats.addrBind = addrBind; stats.m_mapped_as = addr.GetMappedAS(m_asmap); if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_filter); stats.fRelayTxes = m_tx_relay->fRelayTxes; } else { stats.fRelayTxes = false; } stats.nLastSend = nLastSend; stats.nLastRecv = nLastRecv; stats.nTimeConnected = nTimeConnected; stats.nTimeOffset = nTimeOffset; stats.addrName = GetAddrName(); stats.nVersion = nVersion; { LOCK(cs_SubVer); stats.cleanSubVer = cleanSubVer; } stats.fInbound = IsInboundConn(); stats.m_manual_connection = IsManualConn(); stats.nStartingHeight = nStartingHeight; { LOCK(cs_vSend); stats.mapSendBytesPerMsgCmd = mapSendBytesPerMsgCmd; stats.nSendBytes = nSendBytes; } { LOCK(cs_vRecv); stats.mapRecvBytesPerMsgCmd = mapRecvBytesPerMsgCmd; stats.nRecvBytes = nRecvBytes; } stats.m_legacyWhitelisted = m_legacyWhitelisted; stats.m_permissionFlags = m_permissionFlags; if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_feeFilter); stats.minFeeFilter = m_tx_relay->minFeeFilter; } else { stats.minFeeFilter = Amount::zero(); } // It is common for nodes with good ping times to suddenly become lagged, // due to a new block arriving or other large transfer. Merely reporting // pingtime might fool the caller into thinking the node was still // responsive, since pingtime does not update until the ping is complete, // which might take a while. So, if a ping is taking an unusually long time // in flight, the caller can immediately detect that this is happening. std::chrono::microseconds ping_wait{0}; if ((0 != nPingNonceSent) && (0 != m_ping_start.load().count())) { ping_wait = GetTime() - m_ping_start.load(); } // Raw ping time is in microseconds, but show it to user as whole seconds // (Bitcoin users should be well used to small numbers with many decimal // places by now :) stats.m_ping_usec = nPingUsecTime; stats.m_min_ping_usec = nMinPingUsecTime; stats.m_ping_wait_usec = count_microseconds(ping_wait); // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : ""; } bool CNode::ReceiveMsgBytes(const Config &config, const char *pch, uint32_t nBytes, bool &complete) { complete = false; const auto time = GetTime(); LOCK(cs_vRecv); nLastRecv = std::chrono::duration_cast(time).count(); nRecvBytes += nBytes; while (nBytes > 0) { // Absorb network data. int handled = m_deserializer->Read(config, pch, nBytes); if (handled < 0) { return false; } pch += handled; nBytes -= handled; if (m_deserializer->Complete()) { // decompose a transport agnostic CNetMessage from the deserializer CNetMessage msg = m_deserializer->GetMessage(config, time); // Store received bytes per message command to prevent a memory DOS, // only allow valid commands. mapMsgCmdSize::iterator i = mapRecvBytesPerMsgCmd.find(msg.m_command); if (i == mapRecvBytesPerMsgCmd.end()) { i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER); } assert(i != mapRecvBytesPerMsgCmd.end()); i->second += msg.m_raw_message_size; // push the message to the process queue, vRecvMsg.push_back(std::move(msg)); complete = true; } } return true; } void CNode::SetSendVersion(int nVersionIn) { // Send version may only be changed in the version message, and only one // version message is allowed per session. We can therefore treat this value // as const and even atomic as long as it's only used once a version message // has been successfully processed. Any attempt to set this twice is an // error. if (nSendVersion != 0) { error("Send version already set for node: %i. Refusing to change from " "%i to %i", id, nSendVersion, nVersionIn); } else { nSendVersion = nVersionIn; } } int CNode::GetSendVersion() const { // The send version should always be explicitly set to INIT_PROTO_VERSION // rather than using this value until SetSendVersion has been called. if (nSendVersion == 0) { error("Requesting unset send version for node: %i. Using %i", id, INIT_PROTO_VERSION); return INIT_PROTO_VERSION; } return nSendVersion; } int V1TransportDeserializer::readHeader(const Config &config, const char *pch, uint32_t nBytes) { // copy data to temporary parsing buffer uint32_t nRemaining = CMessageHeader::HEADER_SIZE - nHdrPos; uint32_t nCopy = std::min(nRemaining, nBytes); memcpy(&hdrbuf[nHdrPos], pch, nCopy); nHdrPos += nCopy; // if header incomplete, exit if (nHdrPos < CMessageHeader::HEADER_SIZE) { return nCopy; } // deserialize to CMessageHeader try { hdrbuf >> hdr; } catch (const std::exception &) { return -1; } // Reject oversized messages if (hdr.IsOversized(config)) { LogPrint(BCLog::NET, "Oversized header detected\n"); return -1; } // switch state to reading message data in_data = true; return nCopy; } int V1TransportDeserializer::readData(const char *pch, uint32_t nBytes) { unsigned int nRemaining = hdr.nMessageSize - nDataPos; unsigned int nCopy = std::min(nRemaining, nBytes); if (vRecv.size() < nDataPos + nCopy) { // Allocate up to 256 KiB ahead, but never more than the total message // size. vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); } hasher.Write({(const uint8_t *)pch, nCopy}); memcpy(&vRecv[nDataPos], pch, nCopy); nDataPos += nCopy; return nCopy; } const uint256 &V1TransportDeserializer::GetMessageHash() const { assert(Complete()); if (data_hash.IsNull()) { hasher.Finalize(data_hash); } return data_hash; } CNetMessage V1TransportDeserializer::GetMessage(const Config &config, const std::chrono::microseconds time) { // decompose a single CNetMessage from the TransportDeserializer CNetMessage msg(std::move(vRecv)); // store state about valid header, netmagic and checksum msg.m_valid_header = hdr.IsValid(config); // FIXME Split CheckHeaderMagicAndCommand() into CheckHeaderMagic() and // CheckCommand() to prevent the net magic check code duplication. msg.m_valid_netmagic = (memcmp(std::begin(hdr.pchMessageStart), std::begin(config.GetChainParams().NetMagic()), CMessageHeader::MESSAGE_START_SIZE) == 0); uint256 hash = GetMessageHash(); // store command string, payload size msg.m_command = hdr.GetCommand(); msg.m_message_size = hdr.nMessageSize; msg.m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE; // We just received a message off the wire, harvest entropy from the time // (and the message checksum) RandAddEvent(ReadLE32(hash.begin())); msg.m_valid_checksum = (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) == 0); if (!msg.m_valid_checksum) { LogPrint( BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s\n", SanitizeString(msg.m_command), msg.m_message_size, HexStr(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE), HexStr(hdr.pchChecksum, hdr.pchChecksum + CMessageHeader::CHECKSUM_SIZE)); } // store receive time msg.m_time = time; // reset the network deserializer (prepare for the next message) Reset(); return msg; } void V1TransportSerializer::prepareForTransport(const Config &config, CSerializedNetMsg &msg, std::vector &header) { // create dbl-sha256 checksum - uint256 hash = Hash(msg.data.begin(), msg.data.end()); + uint256 hash = Hash(msg.data); // create header CMessageHeader hdr(config.GetChainParams().NetMagic(), msg.m_type.c_str(), msg.data.size()); memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE); // serialize header header.reserve(CMessageHeader::HEADER_SIZE); CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, header, 0, hdr}; } size_t CConnman::SocketSendData(CNode *pnode) const EXCLUSIVE_LOCKS_REQUIRED(pnode->cs_vSend) { size_t nSentSize = 0; size_t nMsgCount = 0; for (const auto &data : pnode->vSendMsg) { assert(data.size() > pnode->nSendOffset); int nBytes = 0; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { break; } nBytes = send(pnode->hSocket, reinterpret_cast(data.data()) + pnode->nSendOffset, data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); } if (nBytes == 0) { // couldn't send anything at all break; } if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { LogPrintf("socket send error %s\n", NetworkErrorString(nErr)); pnode->CloseSocketDisconnect(); } break; } assert(nBytes > 0); pnode->nLastSend = GetSystemTimeInSeconds(); pnode->nSendBytes += nBytes; pnode->nSendOffset += nBytes; nSentSize += nBytes; if (pnode->nSendOffset != data.size()) { // could not send full message; stop sending more break; } pnode->nSendOffset = 0; pnode->nSendSize -= data.size(); pnode->fPauseSend = pnode->nSendSize > nSendBufferMaxSize; nMsgCount++; } pnode->vSendMsg.erase(pnode->vSendMsg.begin(), pnode->vSendMsg.begin() + nMsgCount); if (pnode->vSendMsg.empty()) { assert(pnode->nSendOffset == 0); assert(pnode->nSendSize == 0); } return nSentSize; } struct NodeEvictionCandidate { NodeId id; int64_t nTimeConnected; int64_t nMinPingUsecTime; int64_t nLastBlockTime; int64_t nLastTXTime; bool fRelevantServices; bool fRelayTxes; bool fBloomFilter; CAddress addr; uint64_t nKeyedNetGroup; bool prefer_evict; }; static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nMinPingUsecTime > b.nMinPingUsecTime; } static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nTimeConnected > b.nTimeConnected; } static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nKeyedNetGroup < b.nKeyedNetGroup; } static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { // There is a fall-through here because it is common for a node to have many // peers which have not yet relayed a block. if (a.nLastBlockTime != b.nLastBlockTime) { return a.nLastBlockTime < b.nLastBlockTime; } if (a.fRelevantServices != b.fRelevantServices) { return b.fRelevantServices; } return a.nTimeConnected > b.nTimeConnected; } static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { // There is a fall-through here because it is common for a node to have more // than a few peers that have not yet relayed txn. if (a.nLastTXTime != b.nLastTXTime) { return a.nLastTXTime < b.nLastTXTime; } if (a.fRelayTxes != b.fRelayTxes) { return b.fRelayTxes; } if (a.fBloomFilter != b.fBloomFilter) { return a.fBloomFilter; } return a.nTimeConnected > b.nTimeConnected; } //! Sort an array by the specified comparator, then erase the last K elements. template static void EraseLastKElements(std::vector &elements, Comparator comparator, size_t k) { std::sort(elements.begin(), elements.end(), comparator); size_t eraseSize = std::min(k, elements.size()); elements.erase(elements.end() - eraseSize, elements.end()); } /** * Try to find a connection to evict when the node is full. * Extreme care must be taken to avoid opening the node to attacker triggered * network partitioning. * The strategy used here is to protect a small number of peers for each of * several distinct characteristics which are difficult to forge. In order to * partition a node the attacker must be simultaneously better at all of them * than honest peers. */ bool CConnman::AttemptToEvictConnection() { std::vector vEvictionCandidates; { LOCK(cs_vNodes); for (const CNode *node : vNodes) { if (node->HasPermission(PF_NOBAN)) { continue; } if (!node->IsInboundConn()) { continue; } if (node->fDisconnect) { continue; } bool peer_relay_txes = false; bool peer_filter_not_null = false; if (node->m_tx_relay != nullptr) { LOCK(node->m_tx_relay->cs_filter); peer_relay_txes = node->m_tx_relay->fRelayTxes; peer_filter_not_null = node->m_tx_relay->pfilter != nullptr; } NodeEvictionCandidate candidate = { node->GetId(), node->nTimeConnected, node->nMinPingUsecTime, node->nLastBlockTime, node->nLastTXTime, HasAllDesirableServiceFlags(node->nServices), peer_relay_txes, peer_filter_not_null, node->addr, node->nKeyedNetGroup, node->m_prefer_evict}; vEvictionCandidates.push_back(candidate); } } // Protect connections with certain characteristics // Deterministically select 4 peers to protect by netgroup. // An attacker cannot predict which netgroups will be protected EraseLastKElements(vEvictionCandidates, CompareNetGroupKeyed, 4); // Protect the 8 nodes with the lowest minimum ping time. // An attacker cannot manipulate this metric without physically moving nodes // closer to the target. EraseLastKElements(vEvictionCandidates, ReverseCompareNodeMinPingTime, 8); // Protect 4 nodes that most recently sent us transactions. // An attacker cannot manipulate this metric without performing useful work. EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4); // Protect 4 nodes that most recently sent us blocks. // An attacker cannot manipulate this metric without performing useful work. EraseLastKElements(vEvictionCandidates, CompareNodeBlockTime, 4); // Protect the half of the remaining nodes which have been connected the // longest. This replicates the non-eviction implicit behavior, and // precludes attacks that start later. EraseLastKElements(vEvictionCandidates, ReverseCompareNodeTimeConnected, vEvictionCandidates.size() / 2); if (vEvictionCandidates.empty()) { return false; } // If any remaining peers are preferred for eviction consider only them. // This happens after the other preferences since if a peer is really the // best by other criteria (esp relaying blocks) // then we probably don't want to evict it no matter what. if (std::any_of( vEvictionCandidates.begin(), vEvictionCandidates.end(), [](NodeEvictionCandidate const &n) { return n.prefer_evict; })) { vEvictionCandidates.erase( std::remove_if( vEvictionCandidates.begin(), vEvictionCandidates.end(), [](NodeEvictionCandidate const &n) { return !n.prefer_evict; }), vEvictionCandidates.end()); } // Identify the network group with the most connections and youngest member. // (vEvictionCandidates is already sorted by reverse connect time) uint64_t naMostConnections; unsigned int nMostConnections = 0; int64_t nMostConnectionsTime = 0; std::map> mapNetGroupNodes; for (const NodeEvictionCandidate &node : vEvictionCandidates) { std::vector &group = mapNetGroupNodes[node.nKeyedNetGroup]; group.push_back(node); int64_t grouptime = group[0].nTimeConnected; size_t group_size = group.size(); if (group_size > nMostConnections || (group_size == nMostConnections && grouptime > nMostConnectionsTime)) { nMostConnections = group_size; nMostConnectionsTime = grouptime; naMostConnections = node.nKeyedNetGroup; } } // Reduce to the network group with the most connections vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]); // Disconnect from the network group with the most connections NodeId evicted = vEvictionCandidates.front().id; LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (pnode->GetId() == evicted) { pnode->fDisconnect = true; return true; } } return false; } void CConnman::AcceptConnection(const ListenSocket &hListenSocket) { struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); SOCKET hSocket = accept(hListenSocket.socket, (struct sockaddr *)&sockaddr, &len); CAddress addr; int nInbound = 0; int nMaxInbound = nMaxConnections - m_max_outbound; if (hSocket != INVALID_SOCKET) { if (!addr.SetSockAddr((const struct sockaddr *)&sockaddr)) { LogPrintf("Warning: Unknown socket family\n"); } } NetPermissionFlags permissionFlags = NetPermissionFlags::PF_NONE; hListenSocket.AddSocketPermissionFlags(permissionFlags); AddWhitelistPermissionFlags(permissionFlags, addr); bool legacyWhitelisted = false; if (NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_ISIMPLICIT)) { NetPermissions::ClearFlag(permissionFlags, PF_ISIMPLICIT); if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { NetPermissions::AddFlag(permissionFlags, PF_FORCERELAY); } if (gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) { NetPermissions::AddFlag(permissionFlags, PF_RELAY); } NetPermissions::AddFlag(permissionFlags, PF_MEMPOOL); NetPermissions::AddFlag(permissionFlags, PF_NOBAN); legacyWhitelisted = true; } { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (pnode->IsInboundConn()) { nInbound++; } } } if (hSocket == INVALID_SOCKET) { int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK) { LogPrintf("socket error accept failed: %s\n", NetworkErrorString(nErr)); } return; } if (!fNetworkActive) { LogPrintf("connection from %s dropped: not accepting new connections\n", addr.ToString()); CloseSocket(hSocket); return; } if (!IsSelectableSocket(hSocket)) { LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToString()); CloseSocket(hSocket); return; } // According to the internet TCP_NODELAY is not carried into accepted // sockets on all platforms. Set it again here just to be sure. SetSocketNoDelay(hSocket); // Don't accept connections from banned peers. bool banned = m_banman && m_banman->IsBanned(addr); if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_NOBAN) && banned) { LogPrint(BCLog::NET, "connection from %s dropped (banned)\n", addr.ToString()); CloseSocket(hSocket); return; } // Only accept connections from discouraged peers if our inbound slots // aren't (almost) full. bool discouraged = m_banman && m_banman->IsDiscouraged(addr); if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_NOBAN) && nInbound + 1 >= nMaxInbound && discouraged) { LogPrint(BCLog::NET, "connection from %s dropped (discouraged)\n", addr.ToString()); CloseSocket(hSocket); return; } if (nInbound >= nMaxInbound) { if (!AttemptToEvictConnection()) { // No connection to evict, disconnect the new connection LogPrint(BCLog::NET, "failed to find an eviction candidate - " "connection dropped (full)\n"); CloseSocket(hSocket); return; } } NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE) .Write(id) .Finalize(); uint64_t extra_entropy = GetDeterministicRandomizer(RANDOMIZER_ID_EXTRAENTROPY) .Write(id) .Finalize(); CAddress addr_bind = GetBindAddress(hSocket); ServiceFlags nodeServices = nLocalServices; if (NetPermissions::HasFlag(permissionFlags, PF_BLOOMFILTER)) { nodeServices = static_cast(nodeServices | NODE_BLOOM); } CNode *pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, extra_entropy, addr_bind, "", ConnectionType::INBOUND); pnode->AddRef(); pnode->m_permissionFlags = permissionFlags; // If this flag is present, the user probably expect that RPC and QT report // it as whitelisted (backward compatibility) pnode->m_legacyWhitelisted = legacyWhitelisted; pnode->m_prefer_evict = discouraged; m_msgproc->InitializeNode(*config, pnode); LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString()); { LOCK(cs_vNodes); vNodes.push_back(pnode); } // We received a new connection, harvest entropy from the time (and our peer // count) RandAddEvent(uint32_t(id)); } void CConnman::DisconnectNodes() { { LOCK(cs_vNodes); if (!fNetworkActive) { // Disconnect any connected nodes for (CNode *pnode : vNodes) { if (!pnode->fDisconnect) { LogPrint(BCLog::NET, "Network not active, dropping peer=%d\n", pnode->GetId()); pnode->fDisconnect = true; } } } // Disconnect unused nodes std::vector vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { if (pnode->fDisconnect) { // remove from vNodes vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end()); // release outbound grant (if any) pnode->grantOutbound.Release(); // close socket and cleanup pnode->CloseSocketDisconnect(); // hold in disconnected pool until all refs are released pnode->Release(); vNodesDisconnected.push_back(pnode); } } } { // Delete disconnected nodes std::list vNodesDisconnectedCopy = vNodesDisconnected; for (CNode *pnode : vNodesDisconnectedCopy) { // wait until threads are done using it if (pnode->GetRefCount() <= 0) { bool fDelete = false; { TRY_LOCK(pnode->cs_inventory, lockInv); if (lockInv) { TRY_LOCK(pnode->cs_vSend, lockSend); if (lockSend) { fDelete = true; } } } if (fDelete) { vNodesDisconnected.remove(pnode); DeleteNode(pnode); } } } } } void CConnman::NotifyNumConnectionsChanged() { size_t vNodesSize; { LOCK(cs_vNodes); vNodesSize = vNodes.size(); } if (vNodesSize != nPrevNodeCount) { nPrevNodeCount = vNodesSize; if (clientInterface) { clientInterface->NotifyNumConnectionsChanged(vNodesSize); } } } void CConnman::InactivityCheck(CNode *pnode) { int64_t nTime = GetSystemTimeInSeconds(); if (nTime - pnode->nTimeConnected > m_peer_connect_timeout) { if (pnode->nLastRecv == 0 || pnode->nLastSend == 0) { LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d from %d\n", m_peer_connect_timeout, pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->GetId()); pnode->fDisconnect = true; } else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL) { LogPrintf("socket sending timeout: %is\n", nTime - pnode->nLastSend); pnode->fDisconnect = true; } else if (nTime - pnode->nLastRecv > (pnode->nVersion > BIP0031_VERSION ? TIMEOUT_INTERVAL : 90 * 60)) { LogPrintf("socket receive timeout: %is\n", nTime - pnode->nLastRecv); pnode->fDisconnect = true; } else if (pnode->nPingNonceSent && pnode->m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL} < GetTime()) { LogPrintf("ping timeout: %fs\n", 0.000001 * count_microseconds( GetTime() - pnode->m_ping_start.load())); pnode->fDisconnect = true; } else if (!pnode->fSuccessfullyConnected) { LogPrint(BCLog::NET, "version handshake timeout from %d\n", pnode->GetId()); pnode->fDisconnect = true; } } } bool CConnman::GenerateSelectSet(std::set &recv_set, std::set &send_set, std::set &error_set) { for (const ListenSocket &hListenSocket : vhListenSocket) { recv_set.insert(hListenSocket.socket); } { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { // Implement the following logic: // * If there is data to send, select() for sending data. As this // only happens when optimistic write failed, we choose to first // drain the write buffer in this case before receiving more. This // avoids needlessly queueing received data, if the remote peer is // not themselves receiving data. This means properly utilizing // TCP flow control signalling. // * Otherwise, if there is space left in the receive buffer, // select() for receiving data. // * Hand off all complete messages to the processor, to be handled // without blocking here. bool select_recv = !pnode->fPauseRecv; bool select_send; { LOCK(pnode->cs_vSend); select_send = !pnode->vSendMsg.empty(); } LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } error_set.insert(pnode->hSocket); if (select_send) { send_set.insert(pnode->hSocket); continue; } if (select_recv) { recv_set.insert(pnode->hSocket); } } } return !recv_set.empty() || !send_set.empty() || !error_set.empty(); } #ifdef USE_POLL void CConnman::SocketEvents(std::set &recv_set, std::set &send_set, std::set &error_set) { std::set recv_select_set, send_select_set, error_select_set; if (!GenerateSelectSet(recv_select_set, send_select_set, error_select_set)) { interruptNet.sleep_for( std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS)); return; } std::unordered_map pollfds; for (SOCKET socket_id : recv_select_set) { pollfds[socket_id].fd = socket_id; pollfds[socket_id].events |= POLLIN; } for (SOCKET socket_id : send_select_set) { pollfds[socket_id].fd = socket_id; pollfds[socket_id].events |= POLLOUT; } for (SOCKET socket_id : error_select_set) { pollfds[socket_id].fd = socket_id; // These flags are ignored, but we set them for clarity pollfds[socket_id].events |= POLLERR | POLLHUP; } std::vector vpollfds; vpollfds.reserve(pollfds.size()); for (auto it : pollfds) { vpollfds.push_back(std::move(it.second)); } if (poll(vpollfds.data(), vpollfds.size(), SELECT_TIMEOUT_MILLISECONDS) < 0) { return; } if (interruptNet) { return; } for (struct pollfd pollfd_entry : vpollfds) { if (pollfd_entry.revents & POLLIN) { recv_set.insert(pollfd_entry.fd); } if (pollfd_entry.revents & POLLOUT) { send_set.insert(pollfd_entry.fd); } if (pollfd_entry.revents & (POLLERR | POLLHUP)) { error_set.insert(pollfd_entry.fd); } } } #else void CConnman::SocketEvents(std::set &recv_set, std::set &send_set, std::set &error_set) { std::set recv_select_set, send_select_set, error_select_set; if (!GenerateSelectSet(recv_select_set, send_select_set, error_select_set)) { interruptNet.sleep_for( std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS)); return; } // // Find which sockets have data to receive // struct timeval timeout; timeout.tv_sec = 0; // frequency to poll pnode->vSend timeout.tv_usec = SELECT_TIMEOUT_MILLISECONDS * 1000; fd_set fdsetRecv; fd_set fdsetSend; fd_set fdsetError; FD_ZERO(&fdsetRecv); FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); SOCKET hSocketMax = 0; for (SOCKET hSocket : recv_select_set) { FD_SET(hSocket, &fdsetRecv); hSocketMax = std::max(hSocketMax, hSocket); } for (SOCKET hSocket : send_select_set) { FD_SET(hSocket, &fdsetSend); hSocketMax = std::max(hSocketMax, hSocket); } for (SOCKET hSocket : error_select_set) { FD_SET(hSocket, &fdsetError); hSocketMax = std::max(hSocketMax, hSocket); } int nSelect = select(hSocketMax + 1, &fdsetRecv, &fdsetSend, &fdsetError, &timeout); if (interruptNet) { return; } if (nSelect == SOCKET_ERROR) { int nErr = WSAGetLastError(); LogPrintf("socket select error %s\n", NetworkErrorString(nErr)); for (unsigned int i = 0; i <= hSocketMax; i++) { FD_SET(i, &fdsetRecv); } FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); if (!interruptNet.sleep_for( std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS))) { return; } } for (SOCKET hSocket : recv_select_set) { if (FD_ISSET(hSocket, &fdsetRecv)) { recv_set.insert(hSocket); } } for (SOCKET hSocket : send_select_set) { if (FD_ISSET(hSocket, &fdsetSend)) { send_set.insert(hSocket); } } for (SOCKET hSocket : error_select_set) { if (FD_ISSET(hSocket, &fdsetError)) { error_set.insert(hSocket); } } } #endif void CConnman::SocketHandler() { std::set recv_set, send_set, error_set; SocketEvents(recv_set, send_set, error_set); if (interruptNet) { return; } // // Accept new connections // for (const ListenSocket &hListenSocket : vhListenSocket) { if (hListenSocket.socket != INVALID_SOCKET && recv_set.count(hListenSocket.socket) > 0) { AcceptConnection(hListenSocket); } } // // Service each socket // std::vector vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { pnode->AddRef(); } } for (CNode *pnode : vNodesCopy) { if (interruptNet) { return; } // // Receive // bool recvSet = false; bool sendSet = false; bool errorSet = false; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } recvSet = recv_set.count(pnode->hSocket) > 0; sendSet = send_set.count(pnode->hSocket) > 0; errorSet = error_set.count(pnode->hSocket) > 0; } if (recvSet || errorSet) { // typical socket buffer is 8K-64K char pchBuf[0x10000]; int32_t nBytes = 0; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT); } if (nBytes > 0) { bool notify = false; if (!pnode->ReceiveMsgBytes(*config, pchBuf, nBytes, notify)) { pnode->CloseSocketDisconnect(); } RecordBytesRecv(nBytes); if (notify) { size_t nSizeAdded = 0; auto it(pnode->vRecvMsg.begin()); for (; it != pnode->vRecvMsg.end(); ++it) { // vRecvMsg contains only completed CNetMessage // the single possible partially deserialized message // are held by TransportDeserializer nSizeAdded += it->m_raw_message_size; } { LOCK(pnode->cs_vProcessMsg); pnode->vProcessMsg.splice(pnode->vProcessMsg.end(), pnode->vRecvMsg, pnode->vRecvMsg.begin(), it); pnode->nProcessQueueSize += nSizeAdded; pnode->fPauseRecv = pnode->nProcessQueueSize > nReceiveFloodSize; } WakeMessageHandler(); } } else if (nBytes == 0) { // socket closed gracefully if (!pnode->fDisconnect) { LogPrint(BCLog::NET, "socket closed for peer=%d\n", pnode->GetId()); } pnode->CloseSocketDisconnect(); } else if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { if (!pnode->fDisconnect) { LogPrint(BCLog::NET, "socket recv error for peer=%d: %s\n", pnode->GetId(), NetworkErrorString(nErr)); } pnode->CloseSocketDisconnect(); } } } // // Send // if (sendSet) { LOCK(pnode->cs_vSend); size_t nBytes = SocketSendData(pnode); if (nBytes) { RecordBytesSent(nBytes); } } InactivityCheck(pnode); } { LOCK(cs_vNodes); for (CNode *pnode : vNodesCopy) { pnode->Release(); } } } void CConnman::ThreadSocketHandler() { while (!interruptNet) { DisconnectNodes(); NotifyNumConnectionsChanged(); SocketHandler(); } } void CConnman::WakeMessageHandler() { { LOCK(mutexMsgProc); fMsgProcWake = true; } condMsgProc.notify_one(); } #ifdef USE_UPNP static CThreadInterrupt g_upnp_interrupt; static std::thread g_upnp_thread; static void ThreadMapPort() { std::string port = strprintf("%u", GetListenPort()); const char *multicastif = nullptr; const char *minissdpdpath = nullptr; struct UPNPDev *devlist = nullptr; char lanaddr[64]; int error = 0; #if MINIUPNPC_API_VERSION < 14 devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error); #else devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error); #endif struct UPNPUrls urls; struct IGDdatas data; int r; r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr)); if (r == 1) { if (fDiscover) { char externalIPAddress[40]; r = UPNP_GetExternalIPAddress( urls.controlURL, data.first.servicetype, externalIPAddress); if (r != UPNPCOMMAND_SUCCESS) { LogPrintf("UPnP: GetExternalIPAddress() returned %d\n", r); } else { if (externalIPAddress[0]) { CNetAddr resolved; if (LookupHost(externalIPAddress, resolved, false)) { LogPrintf("UPnP: ExternalIPAddress = %s\n", resolved.ToString()); AddLocal(resolved, LOCAL_UPNP); } } else { LogPrintf("UPnP: GetExternalIPAddress failed.\n"); } } } std::string strDesc = PACKAGE_NAME " " + FormatFullVersion(); do { r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0"); if (r != UPNPCOMMAND_SUCCESS) { LogPrintf( "AddPortMapping(%s, %s, %s) failed with code %d (%s)\n", port, port, lanaddr, r, strupnperror(r)); } else { LogPrintf("UPnP Port Mapping successful.\n"); } } while (g_upnp_interrupt.sleep_for(std::chrono::minutes(20))); r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype, port.c_str(), "TCP", 0); LogPrintf("UPNP_DeletePortMapping() returned: %d\n", r); freeUPNPDevlist(devlist); devlist = nullptr; FreeUPNPUrls(&urls); } else { LogPrintf("No valid UPnP IGDs found\n"); freeUPNPDevlist(devlist); devlist = nullptr; if (r != 0) { FreeUPNPUrls(&urls); } } } void StartMapPort() { if (!g_upnp_thread.joinable()) { assert(!g_upnp_interrupt); g_upnp_thread = std::thread( (std::bind(&TraceThread, "upnp", &ThreadMapPort))); } } void InterruptMapPort() { if (g_upnp_thread.joinable()) { g_upnp_interrupt(); } } void StopMapPort() { if (g_upnp_thread.joinable()) { g_upnp_thread.join(); g_upnp_interrupt.reset(); } } #else void StartMapPort() { // Intentionally left blank. } void InterruptMapPort() { // Intentionally left blank. } void StopMapPort() { // Intentionally left blank. } #endif void CConnman::ThreadDNSAddressSeed() { FastRandomContext rng; std::vector seeds = config->GetChainParams().DNSSeeds(); Shuffle(seeds.begin(), seeds.end(), rng); // Number of seeds left before testing if we have enough connections int seeds_right_now = 0; int found = 0; if (gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED)) { // When -forcednsseed is provided, query all. seeds_right_now = seeds.size(); } else if (addrman.size() == 0) { // If we have no known peers, query all. // This will occur on the first run, or if peers.dat has been // deleted. seeds_right_now = seeds.size(); } // goal: only query DNS seed if address need is acute // * If we have a reasonable number of peers in addrman, spend // some time trying them first. This improves user privacy by // creating fewer identifying DNS requests, reduces trust by // giving seeds less influence on the network topology, and // reduces traffic to the seeds. // * When querying DNS seeds query a few at once, this ensures // that we don't give DNS seeds the ability to eclipse nodes // that query them. // * If we continue having problems, eventually query all the // DNS seeds, and if that fails too, also try the fixed seeds. // (done in ThreadOpenConnections) const std::chrono::seconds seeds_wait_time = (addrman.size() >= DNSSEEDS_DELAY_PEER_THRESHOLD ? DNSSEEDS_DELAY_MANY_PEERS : DNSSEEDS_DELAY_FEW_PEERS); for (const std::string &seed : seeds) { if (seeds_right_now == 0) { seeds_right_now += DNSSEEDS_TO_QUERY_AT_ONCE; if (addrman.size() > 0) { LogPrintf("Waiting %d seconds before querying DNS seeds.\n", seeds_wait_time.count()); std::chrono::seconds to_wait = seeds_wait_time; while (to_wait.count() > 0) { // if sleeping for the MANY_PEERS interval, wake up // early to see if we have enough peers and can stop // this thread entirely freeing up its resources std::chrono::seconds w = std::min(DNSSEEDS_DELAY_FEW_PEERS, to_wait); if (!interruptNet.sleep_for(w)) { return; } to_wait -= w; int nRelevant = 0; { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (pnode->fSuccessfullyConnected && pnode->IsOutboundOrBlockRelayConn()) { ++nRelevant; } } } if (nRelevant >= 2) { if (found > 0) { LogPrintf("%d addresses found from DNS seeds\n", found); LogPrintf( "P2P peers available. Finished DNS seeding.\n"); } else { LogPrintf( "P2P peers available. Skipped DNS seeding.\n"); } return; } } } } if (interruptNet) { return; } // hold off on querying seeds if P2P network deactivated if (!fNetworkActive) { LogPrintf("Waiting for network to be reactivated before querying " "DNS seeds.\n"); do { if (!interruptNet.sleep_for(std::chrono::seconds{1})) { return; } } while (!fNetworkActive); } LogPrintf("Loading addresses from DNS seed %s\n", seed); if (HaveNameProxy()) { AddAddrFetch(seed); } else { std::vector vIPs; std::vector vAdd; ServiceFlags requiredServiceBits = GetDesirableServiceFlags(NODE_NONE); std::string host = strprintf("x%x.%s", requiredServiceBits, seed); CNetAddr resolveSource; if (!resolveSource.SetInternal(host)) { continue; } // Limits number of IPs learned from a DNS seed unsigned int nMaxIPs = 256; if (LookupHost(host, vIPs, nMaxIPs, true)) { for (const CNetAddr &ip : vIPs) { int nOneDay = 24 * 3600; CAddress addr = CAddress( CService(ip, config->GetChainParams().GetDefaultPort()), requiredServiceBits); // Use a random age between 3 and 7 days old. addr.nTime = GetTime() - 3 * nOneDay - rng.randrange(4 * nOneDay); vAdd.push_back(addr); found++; } addrman.Add(vAdd, resolveSource); } else { // We now avoid directly using results from DNS Seeds which do // not support service bit filtering, instead using them as a // addrfetch to get nodes with our desired service bits. AddAddrFetch(seed); } } --seeds_right_now; } LogPrintf("%d addresses found from DNS seeds\n", found); } void CConnman::DumpAddresses() { int64_t nStart = GetTimeMillis(); CAddrDB adb(config->GetChainParams()); adb.Write(addrman); LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n", addrman.size(), GetTimeMillis() - nStart); } void CConnman::ProcessAddrFetch() { std::string strDest; { LOCK(m_addr_fetches_mutex); if (m_addr_fetches.empty()) { return; } strDest = m_addr_fetches.front(); m_addr_fetches.pop_front(); } CAddress addr; CSemaphoreGrant grant(*semOutbound, true); if (grant) { OpenNetworkConnection(addr, false, &grant, strDest.c_str(), ConnectionType::ADDR_FETCH); } } bool CConnman::GetTryNewOutboundPeer() { return m_try_another_outbound_peer; } void CConnman::SetTryNewOutboundPeer(bool flag) { m_try_another_outbound_peer = flag; LogPrint(BCLog::NET, "net: setting try another outbound peer=%s\n", flag ? "true" : "false"); } // Return the number of peers we have over our outbound connection limit. // Exclude peers that are marked for disconnect, or are going to be disconnected // soon (eg one-shots and feelers). // Also exclude peers that haven't finished initial connection handshake yet (so // that we don't decide we're over our desired connection limit, and then evict // some peer that has finished the handshake). int CConnman::GetExtraOutboundCount() { int nOutbound = 0; { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsOutboundOrBlockRelayConn()) { ++nOutbound; } } } return std::max( nOutbound - m_max_outbound_full_relay - m_max_outbound_block_relay, 0); } void CConnman::ThreadOpenConnections(const std::vector connect) { // Connect to specific addresses if (!connect.empty()) { for (int64_t nLoop = 0;; nLoop++) { ProcessAddrFetch(); for (const std::string &strAddr : connect) { CAddress addr(CService(), NODE_NONE); OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), ConnectionType::MANUAL); for (int i = 0; i < 10 && i < nLoop; i++) { if (!interruptNet.sleep_for( std::chrono::milliseconds(500))) { return; } } } if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } } } // Initiate network connections int64_t nStart = GetTime(); // Minimum time before next feeler connection (in microseconds). int64_t nNextFeeler = PoissonNextSend(nStart * 1000 * 1000, FEELER_INTERVAL); while (!interruptNet) { ProcessAddrFetch(); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } CSemaphoreGrant grant(*semOutbound); if (interruptNet) { return; } // Add seed nodes if DNS seeds are all down (an infrastructure attack?). // Note that we only do this if we started with an empty peers.dat, // (in which case we will query DNS seeds immediately) *and* the DNS // seeds have not returned any results. if (addrman.size() == 0 && (GetTime() - nStart > 60)) { static bool done = false; if (!done) { LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be " "available.\n"); CNetAddr local; local.SetInternal("fixedseeds"); addrman.Add(convertSeed6(config->GetChainParams().FixedSeeds()), local); done = true; } } // // Choose an address to connect to based on most recently seen // CAddress addrConnect; // Only connect out to one peer per network group (/16 for IPv4). int nOutboundFullRelay = 0; int nOutboundBlockRelay = 0; std::set> setConnected; { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (pnode->IsFullOutboundConn()) { nOutboundFullRelay++; } if (pnode->IsBlockOnlyConn()) { nOutboundBlockRelay++; } // Netgroups for inbound and manual peers are not excluded // because our goal here is to not use multiple of our // limited outbound slots on a single netgroup but inbound // and manual peers do not use our outbound slots. Inbound // peers also have the added issue that they could be attacker // controlled and could be used to prevent us from connecting // to particular hosts if we used them here. switch (pnode->m_conn_type) { case ConnectionType::INBOUND: case ConnectionType::MANUAL: break; case ConnectionType::OUTBOUND: case ConnectionType::BLOCK_RELAY: case ConnectionType::ADDR_FETCH: case ConnectionType::FEELER: setConnected.insert( pnode->addr.GetGroup(addrman.m_asmap)); } } } // Feeler Connections // // Design goals: // * Increase the number of connectable addresses in the tried table. // // Method: // * Choose a random address from new and attempt to connect to it if // we can connect successfully it is added to tried. // * Start attempting feeler connections only after node finishes // making outbound connections. // * Only make a feeler connection once every few minutes. // bool fFeeler = false; if (nOutboundFullRelay >= m_max_outbound_full_relay && nOutboundBlockRelay >= m_max_outbound_block_relay && !GetTryNewOutboundPeer()) { // The current time right now (in microseconds). int64_t nTime = GetTimeMicros(); if (nTime > nNextFeeler) { nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL); fFeeler = true; } else { continue; } } addrman.ResolveCollisions(); int64_t nANow = GetAdjustedTime(); int nTries = 0; while (!interruptNet) { CAddrInfo addr = addrman.SelectTriedCollision(); // SelectTriedCollision returns an invalid address if it is empty. if (!fFeeler || !addr.IsValid()) { addr = addrman.Select(fFeeler); } // Require outbound connections, other than feelers, to be to // distinct network groups if (!fFeeler && setConnected.count(addr.GetGroup(addrman.m_asmap))) { break; } // if we selected an invalid or local address, restart if (!addr.IsValid() || IsLocal(addr)) { break; } // If we didn't find an appropriate destination after trying 100 // addresses fetched from addrman, stop this loop, and let the outer // loop run again (which sleeps, adds seed nodes, recalculates // already-connected network ranges, ...) before trying new addrman // addresses. nTries++; if (nTries > 100) { break; } if (!IsReachable(addr)) { continue; } // only consider very recently tried nodes after 30 failed attempts if (nANow - addr.nLastTry < 600 && nTries < 30) { continue; } // for non-feelers, require all the services we'll want, // for feelers, only require they be a full node (only because most // SPV clients don't have a good address DB available) if (!fFeeler && !HasAllDesirableServiceFlags(addr.nServices)) { continue; } if (fFeeler && !MayHaveUsefulAddressDB(addr.nServices)) { continue; } // do not allow non-default ports, unless after 50 invalid addresses // selected already. if (addr.GetPort() != config->GetChainParams().GetDefaultPort() && nTries < 50) { continue; } addrConnect = addr; break; } if (addrConnect.IsValid()) { if (fFeeler) { // Add small amount of random noise before connection to avoid // synchronization. int randsleep = GetRandInt(FEELER_SLEEP_WINDOW * 1000); if (!interruptNet.sleep_for( std::chrono::milliseconds(randsleep))) { return; } LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString()); } ConnectionType conn_type; // Determine what type of connection to open. If fFeeler is not // set, open OUTBOUND connections until we meet our full-relay // capacity. Then open BLOCK_RELAY connections until we hit our // block-relay peer limit. Otherwise, default to opening an // OUTBOUND connection. if (fFeeler) { conn_type = ConnectionType::FEELER; } else if (nOutboundFullRelay < m_max_outbound_full_relay) { conn_type = ConnectionType::OUTBOUND; } else if (nOutboundBlockRelay < m_max_outbound_block_relay) { conn_type = ConnectionType::BLOCK_RELAY; } else { // GetTryNewOutboundPeer() is true conn_type = ConnectionType::OUTBOUND; } OpenNetworkConnection(addrConnect, int(setConnected.size()) >= std::min(nMaxConnections - 1, 2), &grant, nullptr, conn_type); } } } std::vector CConnman::GetAddedNodeInfo() { std::vector ret; std::list lAddresses(0); { LOCK(cs_vAddedNodes); ret.reserve(vAddedNodes.size()); std::copy(vAddedNodes.cbegin(), vAddedNodes.cend(), std::back_inserter(lAddresses)); } // Build a map of all already connected addresses (by IP:port and by name) // to inbound/outbound and resolved CService std::map mapConnected; std::map> mapConnectedByName; { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (pnode->addr.IsValid()) { mapConnected[pnode->addr] = pnode->IsInboundConn(); } std::string addrName = pnode->GetAddrName(); if (!addrName.empty()) { mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->IsInboundConn(), static_cast(pnode->addr)); } } } for (const std::string &strAddNode : lAddresses) { CService service(LookupNumeric(strAddNode, Params().GetDefaultPort())); AddedNodeInfo addedNode{strAddNode, CService(), false, false}; if (service.IsValid()) { // strAddNode is an IP:port auto it = mapConnected.find(service); if (it != mapConnected.end()) { addedNode.resolvedAddress = service; addedNode.fConnected = true; addedNode.fInbound = it->second; } } else { // strAddNode is a name auto it = mapConnectedByName.find(strAddNode); if (it != mapConnectedByName.end()) { addedNode.resolvedAddress = it->second.second; addedNode.fConnected = true; addedNode.fInbound = it->second.first; } } ret.emplace_back(std::move(addedNode)); } return ret; } void CConnman::ThreadOpenAddedConnections() { while (true) { CSemaphoreGrant grant(*semAddnode); std::vector vInfo = GetAddedNodeInfo(); bool tried = false; for (const AddedNodeInfo &info : vInfo) { if (!info.fConnected) { if (!grant.TryAcquire()) { // If we've used up our semaphore and need a new one, let's // not wait here since while we are waiting the // addednodeinfo state might change. break; } tried = true; CAddress addr(CService(), NODE_NONE); OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), ConnectionType::MANUAL); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } } } // Retry every 60 seconds if a connection was attempted, otherwise two // seconds. if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2))) { return; } } } // If successful, this moves the passed grant to the constructed node. void CConnman::OpenNetworkConnection(const CAddress &addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type) { assert(conn_type != ConnectionType::INBOUND); // // Initiate outbound network connection // if (interruptNet) { return; } if (!fNetworkActive) { return; } if (!pszDest) { bool banned_or_discouraged = m_banman && (m_banman->IsDiscouraged(addrConnect) || m_banman->IsBanned(addrConnect)); if (IsLocal(addrConnect) || FindNode(static_cast(addrConnect)) || banned_or_discouraged || FindNode(addrConnect.ToStringIPPort())) { return; } } else if (FindNode(std::string(pszDest))) { return; } CNode *pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type); if (!pnode) { return; } if (grantOutbound) { grantOutbound->MoveTo(pnode->grantOutbound); } m_msgproc->InitializeNode(*config, pnode); { LOCK(cs_vNodes); vNodes.push_back(pnode); } } void CConnman::ThreadMessageHandler() { while (!flagInterruptMsgProc) { std::vector vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { pnode->AddRef(); } } bool fMoreWork = false; for (CNode *pnode : vNodesCopy) { if (pnode->fDisconnect) { continue; } // Receive messages bool fMoreNodeWork = m_msgproc->ProcessMessages( *config, pnode, flagInterruptMsgProc); fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend); if (flagInterruptMsgProc) { return; } // Send messages { LOCK(pnode->cs_sendProcessing); m_msgproc->SendMessages(*config, pnode, flagInterruptMsgProc); } if (flagInterruptMsgProc) { return; } } { LOCK(cs_vNodes); for (CNode *pnode : vNodesCopy) { pnode->Release(); } } WAIT_LOCK(mutexMsgProc, lock); if (!fMoreWork) { condMsgProc.wait_until(lock, std::chrono::steady_clock::now() + std::chrono::milliseconds(100), [this]() EXCLUSIVE_LOCKS_REQUIRED( mutexMsgProc) { return fMsgProcWake; }); } fMsgProcWake = false; } } bool CConnman::BindListenPort(const CService &addrBind, bilingual_str &strError, NetPermissionFlags permissions) { int nOne = 1; // Create socket for listening for incoming connections struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); if (!addrBind.GetSockAddr((struct sockaddr *)&sockaddr, &len)) { strError = strprintf( Untranslated("Error: Bind address family for %s not supported"), addrBind.ToString()); LogPrintf("%s\n", strError.original); return false; } SOCKET hListenSocket = CreateSocket(addrBind); if (hListenSocket == INVALID_SOCKET) { strError = strprintf(Untranslated("Error: Couldn't open socket for incoming " "connections (socket returned error %s)"), NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError.original); return false; } // Allow binding if the port is still in TIME_WAIT state after // the program was closed and restarted. setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne, sizeof(int)); // Some systems don't have IPV6_V6ONLY but are always v6only; others do have // the option and enable it by default or not. Try to enable it, if // possible. if (addrBind.IsIPv6()) { #ifdef IPV6_V6ONLY setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (sockopt_arg_type)&nOne, sizeof(int)); #endif #ifdef WIN32 int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED; setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (sockopt_arg_type)&nProtLevel, sizeof(int)); #endif } if (::bind(hListenSocket, (struct sockaddr *)&sockaddr, len) == SOCKET_ERROR) { int nErr = WSAGetLastError(); if (nErr == WSAEADDRINUSE) { strError = strprintf(_("Unable to bind to %s on this computer. %s " "is probably already running."), addrBind.ToString(), PACKAGE_NAME); } else { strError = strprintf(_("Unable to bind to %s on this computer " "(bind returned error %s)"), addrBind.ToString(), NetworkErrorString(nErr)); } LogPrintf("%s\n", strError.original); CloseSocket(hListenSocket); return false; } LogPrintf("Bound to %s\n", addrBind.ToString()); // Listen for incoming connections if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) { strError = strprintf(_("Error: Listening for incoming connections " "failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError.original); CloseSocket(hListenSocket); return false; } vhListenSocket.push_back(ListenSocket(hListenSocket, permissions)); if (addrBind.IsRoutable() && fDiscover && (permissions & PF_NOBAN) == 0) { AddLocal(addrBind, LOCAL_BIND); } return true; } void Discover() { if (!fDiscover) { return; } #ifdef WIN32 // Get local host IP char pszHostName[256] = ""; if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) { std::vector vaddr; if (LookupHost(pszHostName, vaddr, 0, true)) { for (const CNetAddr &addr : vaddr) { if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToString()); } } } } #elif (HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS) // Get local host ip struct ifaddrs *myaddrs; if (getifaddrs(&myaddrs) == 0) { for (struct ifaddrs *ifa = myaddrs; ifa != nullptr; ifa = ifa->ifa_next) { if (ifa->ifa_addr == nullptr || (ifa->ifa_flags & IFF_UP) == 0 || strcmp(ifa->ifa_name, "lo") == 0 || strcmp(ifa->ifa_name, "lo0") == 0) { continue; } if (ifa->ifa_addr->sa_family == AF_INET) { struct sockaddr_in *s4 = reinterpret_cast(ifa->ifa_addr); CNetAddr addr(s4->sin_addr); if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToString()); } } else if (ifa->ifa_addr->sa_family == AF_INET6) { struct sockaddr_in6 *s6 = reinterpret_cast(ifa->ifa_addr); CNetAddr addr(s6->sin6_addr); if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToString()); } } } freeifaddrs(myaddrs); } #endif } void CConnman::SetNetworkActive(bool active) { LogPrint(BCLog::NET, "SetNetworkActive: %s\n", active); if (fNetworkActive == active) { return; } fNetworkActive = active; uiInterface.NotifyNetworkActiveChanged(fNetworkActive); } CConnman::CConnman(const Config &configIn, uint64_t nSeed0In, uint64_t nSeed1In) : config(&configIn), nSeed0(nSeed0In), nSeed1(nSeed1In) { SetTryNewOutboundPeer(false); Options connOptions; Init(connOptions); } NodeId CConnman::GetNewNodeId() { return nLastNodeId.fetch_add(1); } bool CConnman::Bind(const CService &addr, unsigned int flags, NetPermissionFlags permissions) { if (!(flags & BF_EXPLICIT) && !IsReachable(addr)) { return false; } bilingual_str strError; if (!BindListenPort(addr, strError, permissions)) { if ((flags & BF_REPORT_ERROR) && clientInterface) { clientInterface->ThreadSafeMessageBox( strError, "", CClientUIInterface::MSG_ERROR); } return false; } return true; } bool CConnman::InitBinds( const std::vector &binds, const std::vector &whiteBinds) { bool fBound = false; for (const auto &addrBind : binds) { fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR), NetPermissionFlags::PF_NONE); } for (const auto &addrBind : whiteBinds) { fBound |= Bind(addrBind.m_service, (BF_EXPLICIT | BF_REPORT_ERROR), addrBind.m_flags); } if (binds.empty() && whiteBinds.empty()) { struct in_addr inaddr_any; inaddr_any.s_addr = INADDR_ANY; struct in6_addr inaddr6_any = IN6ADDR_ANY_INIT; fBound |= Bind(CService(inaddr6_any, GetListenPort()), BF_NONE, NetPermissionFlags::PF_NONE); fBound |= Bind(CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE, NetPermissionFlags::PF_NONE); } return fBound; } bool CConnman::Start(CScheduler &scheduler, const Options &connOptions) { Init(connOptions); { LOCK(cs_totalBytesRecv); nTotalBytesRecv = 0; } { LOCK(cs_totalBytesSent); nTotalBytesSent = 0; nMaxOutboundTotalBytesSentInCycle = 0; nMaxOutboundCycleStartTime = 0; } if (fListen && !InitBinds(connOptions.vBinds, connOptions.vWhiteBinds)) { if (clientInterface) { clientInterface->ThreadSafeMessageBox( _("Failed to listen on any port. Use -listen=0 if you want " "this."), "", CClientUIInterface::MSG_ERROR); } return false; } for (const auto &strDest : connOptions.vSeedNodes) { AddAddrFetch(strDest); } if (clientInterface) { clientInterface->InitMessage(_("Loading P2P addresses...").translated); } // Load addresses from peers.dat int64_t nStart = GetTimeMillis(); { CAddrDB adb(config->GetChainParams()); if (adb.Read(addrman)) { LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman.size(), GetTimeMillis() - nStart); } else { // Addrman can be in an inconsistent state after failure, reset it addrman.Clear(); LogPrintf("Invalid or missing peers.dat; recreating\n"); DumpAddresses(); } } uiInterface.InitMessage(_("Starting network threads...").translated); fAddressesInitialized = true; if (semOutbound == nullptr) { // initialize semaphore semOutbound = std::make_unique( std::min(m_max_outbound, nMaxConnections)); } if (semAddnode == nullptr) { // initialize semaphore semAddnode = std::make_unique(nMaxAddnode); } // // Start threads // assert(m_msgproc); InterruptSocks5(false); interruptNet.reset(); flagInterruptMsgProc = false; { LOCK(mutexMsgProc); fMsgProcWake = false; } // Send and receive from sockets, accept connections threadSocketHandler = std::thread( &TraceThread>, "net", std::function(std::bind(&CConnman::ThreadSocketHandler, this))); if (!gArgs.GetBoolArg("-dnsseed", true)) { LogPrintf("DNS seeding disabled\n"); } else { threadDNSAddressSeed = std::thread(&TraceThread>, "dnsseed", std::function( std::bind(&CConnman::ThreadDNSAddressSeed, this))); } // Initiate manual connections threadOpenAddedConnections = std::thread(&TraceThread>, "addcon", std::function(std::bind( &CConnman::ThreadOpenAddedConnections, this))); if (connOptions.m_use_addrman_outgoing && !connOptions.m_specified_outgoing.empty()) { if (clientInterface) { clientInterface->ThreadSafeMessageBox( _("Cannot provide specific connections and have addrman find " "outgoing connections at the same."), "", CClientUIInterface::MSG_ERROR); } return false; } if (connOptions.m_use_addrman_outgoing || !connOptions.m_specified_outgoing.empty()) { threadOpenConnections = std::thread(&TraceThread>, "opencon", std::function( std::bind(&CConnman::ThreadOpenConnections, this, connOptions.m_specified_outgoing))); } // Process messages threadMessageHandler = std::thread(&TraceThread>, "msghand", std::function( std::bind(&CConnman::ThreadMessageHandler, this))); // Dump network addresses scheduler.scheduleEvery( [this]() { this->DumpAddresses(); return true; }, DUMP_PEERS_INTERVAL); return true; } class CNetCleanup { public: CNetCleanup() {} ~CNetCleanup() { #ifdef WIN32 // Shutdown Windows Sockets WSACleanup(); #endif } }; static CNetCleanup instance_of_cnetcleanup; void CConnman::Interrupt() { { LOCK(mutexMsgProc); flagInterruptMsgProc = true; } condMsgProc.notify_all(); interruptNet(); InterruptSocks5(true); if (semOutbound) { for (int i = 0; i < m_max_outbound; i++) { semOutbound->post(); } } if (semAddnode) { for (int i = 0; i < nMaxAddnode; i++) { semAddnode->post(); } } } void CConnman::StopThreads() { if (threadMessageHandler.joinable()) { threadMessageHandler.join(); } if (threadOpenConnections.joinable()) { threadOpenConnections.join(); } if (threadOpenAddedConnections.joinable()) { threadOpenAddedConnections.join(); } if (threadDNSAddressSeed.joinable()) { threadDNSAddressSeed.join(); } if (threadSocketHandler.joinable()) { threadSocketHandler.join(); } } void CConnman::StopNodes() { if (fAddressesInitialized) { DumpAddresses(); fAddressesInitialized = false; } // Close sockets LOCK(cs_vNodes); for (CNode *pnode : vNodes) { pnode->CloseSocketDisconnect(); } for (ListenSocket &hListenSocket : vhListenSocket) { if (hListenSocket.socket != INVALID_SOCKET) { if (!CloseSocket(hListenSocket.socket)) { LogPrintf("CloseSocket(hListenSocket) failed with error %s\n", NetworkErrorString(WSAGetLastError())); } } } // clean up some globals (to help leak detection) for (CNode *pnode : vNodes) { DeleteNode(pnode); } for (CNode *pnode : vNodesDisconnected) { DeleteNode(pnode); } vNodes.clear(); vNodesDisconnected.clear(); vhListenSocket.clear(); semOutbound.reset(); semAddnode.reset(); } void CConnman::DeleteNode(CNode *pnode) { assert(pnode); bool fUpdateConnectionTime = false; m_msgproc->FinalizeNode(*config, pnode->GetId(), fUpdateConnectionTime); if (fUpdateConnectionTime) { addrman.Connected(pnode->addr); } delete pnode; } CConnman::~CConnman() { Interrupt(); Stop(); } void CConnman::SetServices(const CService &addr, ServiceFlags nServices) { addrman.SetServices(addr, nServices); } void CConnman::MarkAddressGood(const CAddress &addr) { addrman.Good(addr); } void CConnman::AddNewAddresses(const std::vector &vAddr, const CAddress &addrFrom, int64_t nTimePenalty) { addrman.Add(vAddr, addrFrom, nTimePenalty); } std::vector CConnman::GetAddresses() { return addrman.GetAddr(); } bool CConnman::AddNode(const std::string &strNode) { LOCK(cs_vAddedNodes); for (const std::string &it : vAddedNodes) { if (strNode == it) { return false; } } vAddedNodes.push_back(strNode); return true; } bool CConnman::RemoveAddedNode(const std::string &strNode) { LOCK(cs_vAddedNodes); for (std::vector::iterator it = vAddedNodes.begin(); it != vAddedNodes.end(); ++it) { if (strNode == *it) { vAddedNodes.erase(it); return true; } } return false; } size_t CConnman::GetNodeCount(NumConnections flags) { LOCK(cs_vNodes); // Shortcut if we want total if (flags == CConnman::CONNECTIONS_ALL) { return vNodes.size(); } int nNum = 0; for (const auto &pnode : vNodes) { if (flags & (pnode->IsInboundConn() ? CONNECTIONS_IN : CONNECTIONS_OUT)) { nNum++; } } return nNum; } void CConnman::GetNodeStats(std::vector &vstats) { vstats.clear(); LOCK(cs_vNodes); vstats.reserve(vNodes.size()); for (CNode *pnode : vNodes) { vstats.emplace_back(); pnode->copyStats(vstats.back(), addrman.m_asmap); } } bool CConnman::DisconnectNode(const std::string &strNode) { LOCK(cs_vNodes); if (CNode *pnode = FindNode(strNode)) { pnode->fDisconnect = true; return true; } return false; } bool CConnman::DisconnectNode(const CSubNet &subnet) { bool disconnected = false; LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (subnet.Match(pnode->addr)) { pnode->fDisconnect = true; disconnected = true; } } return disconnected; } bool CConnman::DisconnectNode(const CNetAddr &addr) { return DisconnectNode(CSubNet(addr)); } bool CConnman::DisconnectNode(NodeId id) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (id == pnode->GetId()) { pnode->fDisconnect = true; return true; } } return false; } void CConnman::RecordBytesRecv(uint64_t bytes) { LOCK(cs_totalBytesRecv); nTotalBytesRecv += bytes; } void CConnman::RecordBytesSent(uint64_t bytes) { LOCK(cs_totalBytesSent); nTotalBytesSent += bytes; uint64_t now = GetTime(); if (nMaxOutboundCycleStartTime + nMaxOutboundTimeframe < now) { // timeframe expired, reset cycle nMaxOutboundCycleStartTime = now; nMaxOutboundTotalBytesSentInCycle = 0; } // TODO, exclude whitebind peers nMaxOutboundTotalBytesSentInCycle += bytes; } void CConnman::SetMaxOutboundTarget(uint64_t limit) { LOCK(cs_totalBytesSent); nMaxOutboundLimit = limit; } uint64_t CConnman::GetMaxOutboundTarget() { LOCK(cs_totalBytesSent); return nMaxOutboundLimit; } uint64_t CConnman::GetMaxOutboundTimeframe() { LOCK(cs_totalBytesSent); return nMaxOutboundTimeframe; } uint64_t CConnman::GetMaxOutboundTimeLeftInCycle() { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return 0; } if (nMaxOutboundCycleStartTime == 0) { return nMaxOutboundTimeframe; } uint64_t cycleEndTime = nMaxOutboundCycleStartTime + nMaxOutboundTimeframe; uint64_t now = GetTime(); return (cycleEndTime < now) ? 0 : cycleEndTime - GetTime(); } void CConnman::SetMaxOutboundTimeframe(uint64_t timeframe) { LOCK(cs_totalBytesSent); if (nMaxOutboundTimeframe != timeframe) { // reset measure-cycle in case of changing the timeframe. nMaxOutboundCycleStartTime = GetTime(); } nMaxOutboundTimeframe = timeframe; } bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return false; } if (historicalBlockServingLimit) { // keep a large enough buffer to at least relay each block once. uint64_t timeLeftInCycle = GetMaxOutboundTimeLeftInCycle(); uint64_t buffer = timeLeftInCycle / 600 * ONE_MEGABYTE; if (buffer >= nMaxOutboundLimit || nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit - buffer) { return true; } } else if (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) { return true; } return false; } uint64_t CConnman::GetOutboundTargetBytesLeft() { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return 0; } return (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) ? 0 : nMaxOutboundLimit - nMaxOutboundTotalBytesSentInCycle; } uint64_t CConnman::GetTotalBytesRecv() { LOCK(cs_totalBytesRecv); return nTotalBytesRecv; } uint64_t CConnman::GetTotalBytesSent() { LOCK(cs_totalBytesSent); return nTotalBytesSent; } ServiceFlags CConnman::GetLocalServices() const { return nLocalServices; } void CConnman::SetBestHeight(int height) { nBestHeight.store(height, std::memory_order_release); } int CConnman::GetBestHeight() const { return nBestHeight.load(std::memory_order_acquire); } unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; } CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, uint64_t nLocalExtraEntropyIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in) : nTimeConnected(GetSystemTimeInSeconds()), addr(addrIn), addrBind(addrBindIn), nKeyedNetGroup(nKeyedNetGroupIn), // Don't relay addr messages to peers that we connect to as // block-relay-only peers (to prevent adversaries from inferring these // links from addr traffic). id(idIn), nLocalHostNonce(nLocalHostNonceIn), nLocalExtraEntropy(nLocalExtraEntropyIn), m_conn_type(conn_type_in), nLocalServices(nLocalServicesIn), nMyStartingHeight(nMyStartingHeightIn) { hSocket = hSocketIn; addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn; hashContinue = BlockHash(); if (conn_type_in != ConnectionType::BLOCK_RELAY) { m_tx_relay = std::make_unique(); m_addr_known = std::make_unique(5000, 0.001); } for (const std::string &msg : getAllNetMessageTypes()) { mapRecvBytesPerMsgCmd[msg] = 0; } mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0; if (fLogIPs) { LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", addrName, id); } else { LogPrint(BCLog::NET, "Added connection peer=%d\n", id); } m_deserializer = std::make_unique( V1TransportDeserializer(GetConfig().GetChainParams().NetMagic(), SER_NETWORK, INIT_PROTO_VERSION)); m_serializer = std::make_unique(V1TransportSerializer()); } CNode::~CNode() { CloseSocket(hSocket); } bool CConnman::NodeFullyConnected(const CNode *pnode) { return pnode && pnode->fSuccessfullyConnected && !pnode->fDisconnect; } void CConnman::PushMessage(CNode *pnode, CSerializedNetMsg &&msg) { size_t nMessageSize = msg.data.size(); LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.m_type), nMessageSize, pnode->GetId()); // make sure we use the appropriate network transport format std::vector serializedHeader; pnode->m_serializer->prepareForTransport(*config, msg, serializedHeader); size_t nTotalSize = nMessageSize + serializedHeader.size(); size_t nBytesSent = 0; { LOCK(pnode->cs_vSend); bool optimisticSend(pnode->vSendMsg.empty()); // log total amount of bytes per message type pnode->mapSendBytesPerMsgCmd[msg.m_type] += nTotalSize; pnode->nSendSize += nTotalSize; if (pnode->nSendSize > nSendBufferMaxSize) { pnode->fPauseSend = true; } pnode->vSendMsg.push_back(std::move(serializedHeader)); if (nMessageSize) { pnode->vSendMsg.push_back(std::move(msg.data)); } // If write queue empty, attempt "optimistic write" if (optimisticSend == true) { nBytesSent = SocketSendData(pnode); } } if (nBytesSent) { RecordBytesSent(nBytesSent); } } bool CConnman::ForNode(NodeId id, std::function func) { CNode *found = nullptr; LOCK(cs_vNodes); for (auto &&pnode : vNodes) { if (pnode->GetId() == id) { found = pnode; break; } } return found != nullptr && NodeFullyConnected(found) && func(found); } int64_t CConnman::PoissonNextSendInbound(int64_t now, int average_interval_seconds) { if (m_next_send_inv_to_incoming < now) { // If this function were called from multiple threads simultaneously // it would be possible that both update the next send variable, and // return a different result to their caller. This is not possible in // practice as only the net processing thread invokes this function. m_next_send_inv_to_incoming = PoissonNextSend(now, average_interval_seconds); } return m_next_send_inv_to_incoming; } int64_t PoissonNextSend(int64_t now, int average_interval_seconds) { return now + int64_t(log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */) * average_interval_seconds * -1000000.0 + 0.5); } CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const { return CSipHasher(nSeed0, nSeed1).Write(id); } uint64_t CConnman::CalculateKeyedNetGroup(const CAddress &ad) const { std::vector vchNetGroup(ad.GetGroup(addrman.m_asmap)); return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP) .Write(vchNetGroup.data(), vchNetGroup.size()) .Finalize(); } /** * This function convert MaxBlockSize from byte to * MB with a decimal precision one digit rounded down * E.g. * 1660000 -> 1.6 * 2010000 -> 2.0 * 1000000 -> 1.0 * 230000 -> 0.2 * 50000 -> 0.0 * * NB behavior for EB<1MB not standardized yet still * the function applies the same algo used for * EB greater or equal to 1MB */ std::string getSubVersionEB(uint64_t MaxBlockSize) { // Prepare EB string we are going to add to SubVer: // 1) translate from byte to MB and convert to string // 2) limit the EB string to the first decimal digit (floored) std::stringstream ebMBs; ebMBs << (MaxBlockSize / (ONE_MEGABYTE / 10)); std::string eb = ebMBs.str(); eb.insert(eb.size() - 1, ".", 1); if (eb.substr(0, 1) == ".") { eb = "0" + eb; } return eb; } std::string userAgent(const Config &config) { // format excessive blocksize value std::string eb = getSubVersionEB(config.GetMaxBlockSize()); std::vector uacomments; uacomments.push_back("EB" + eb); // Comments are checked for char compliance at startup, it is safe to add // them to the user agent string for (const std::string &cmt : gArgs.GetArgs("-uacomment")) { uacomments.push_back(cmt); } // Size compliance is checked at startup, it is safe to not check it again std::string subversion = FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments); return subversion; } diff --git a/src/netaddress.cpp b/src/netaddress.cpp index 0fe39d395..f3f4cf6f4 100644 --- a/src/netaddress.cpp +++ b/src/netaddress.cpp @@ -1,947 +1,947 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include static const uint8_t pchIPv4[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}; static const uint8_t pchOnionCat[] = {0xFD, 0x87, 0xD8, 0x7E, 0xEB, 0x43}; // 0xFD + sha256("bitcoin")[0:5] static const uint8_t g_internal_prefix[] = {0xFD, 0x6B, 0x88, 0xC0, 0x87, 0x24}; /** * Construct an unspecified IPv6 network address (::/128). * * @note This address is considered invalid by CNetAddr::IsValid() */ CNetAddr::CNetAddr() { memset(ip, 0, sizeof(ip)); } void CNetAddr::SetIP(const CNetAddr &ipIn) { memcpy(ip, ipIn.ip, sizeof(ip)); } void CNetAddr::SetRaw(Network network, const uint8_t *ip_in) { switch (network) { case NET_IPV4: memcpy(ip, pchIPv4, 12); memcpy(ip + 12, ip_in, 4); break; case NET_IPV6: memcpy(ip, ip_in, 16); break; default: assert(!"invalid network"); } } /** * Try to make this a dummy address that maps the specified name into IPv6 like * so: (0xFD + %sha256("bitcoin")[0:5]) + %sha256(name)[0:10]. Such dummy * addresses have a prefix of fd6b:88c0:8724::/48 and are guaranteed to not be * publicly routable as it falls under RFC4193's fc00::/7 subnet allocated to * unique-local addresses. * * CAddrMan uses these fake addresses to keep track of which DNS seeds were * used. * * @returns Whether or not the operation was successful. * * @see CNetAddr::IsInternal(), CNetAddr::IsRFC4193() */ bool CNetAddr::SetInternal(const std::string &name) { if (name.empty()) { return false; } uint8_t hash[32] = {}; CSHA256().Write((const uint8_t *)name.data(), name.size()).Finalize(hash); memcpy(ip, g_internal_prefix, sizeof(g_internal_prefix)); memcpy(ip + sizeof(g_internal_prefix), hash, sizeof(ip) - sizeof(g_internal_prefix)); return true; } /** * Try to make this a dummy address that maps the specified onion address into * IPv6 using OnionCat's range and encoding. Such dummy addresses have a prefix * of fd87:d87e:eb43::/48 and are guaranteed to not be publicly routable as they * fall under RFC4193's fc00::/7 subnet allocated to unique-local addresses. * * @returns Whether or not the operation was successful. * * @see CNetAddr::IsTor(), CNetAddr::IsRFC4193() */ bool CNetAddr::SetSpecial(const std::string &strName) { if (strName.size() > 6 && strName.substr(strName.size() - 6, 6) == ".onion") { std::vector vchAddr = DecodeBase32(strName.substr(0, strName.size() - 6).c_str()); if (vchAddr.size() != 16 - sizeof(pchOnionCat)) { return false; } memcpy(ip, pchOnionCat, sizeof(pchOnionCat)); for (unsigned int i = 0; i < 16 - sizeof(pchOnionCat); i++) { ip[i + sizeof(pchOnionCat)] = vchAddr[i]; } return true; } return false; } CNetAddr::CNetAddr(const struct in_addr &ipv4Addr) { SetRaw(NET_IPV4, (const uint8_t *)&ipv4Addr); } CNetAddr::CNetAddr(const struct in6_addr &ipv6Addr, const uint32_t scope) { SetRaw(NET_IPV6, (const uint8_t *)&ipv6Addr); scopeId = scope; } unsigned int CNetAddr::GetByte(int n) const { return ip[15 - n]; } bool CNetAddr::IsBindAny() const { const int cmplen = IsIPv4() ? 4 : 16; for (int i = 0; i < cmplen; ++i) { if (GetByte(i)) { return false; } } return true; } bool CNetAddr::IsIPv4() const { return (memcmp(ip, pchIPv4, sizeof(pchIPv4)) == 0); } bool CNetAddr::IsIPv6() const { return !IsIPv4() && !IsTor() && !IsInternal(); } bool CNetAddr::IsRFC1918() const { return IsIPv4() && (GetByte(3) == 10 || (GetByte(3) == 192 && GetByte(2) == 168) || (GetByte(3) == 172 && (GetByte(2) >= 16 && GetByte(2) <= 31))); } bool CNetAddr::IsRFC2544() const { return IsIPv4() && GetByte(3) == 198 && (GetByte(2) == 18 || GetByte(2) == 19); } bool CNetAddr::IsRFC3927() const { return IsIPv4() && (GetByte(3) == 169 && GetByte(2) == 254); } bool CNetAddr::IsRFC6598() const { return IsIPv4() && GetByte(3) == 100 && GetByte(2) >= 64 && GetByte(2) <= 127; } bool CNetAddr::IsRFC5737() const { return IsIPv4() && ((GetByte(3) == 192 && GetByte(2) == 0 && GetByte(1) == 2) || (GetByte(3) == 198 && GetByte(2) == 51 && GetByte(1) == 100) || (GetByte(3) == 203 && GetByte(2) == 0 && GetByte(1) == 113)); } bool CNetAddr::IsRFC3849() const { return GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x0D && GetByte(12) == 0xB8; } bool CNetAddr::IsRFC3964() const { return (GetByte(15) == 0x20 && GetByte(14) == 0x02); } bool CNetAddr::IsRFC6052() const { static const uint8_t pchRFC6052[] = {0, 0x64, 0xFF, 0x9B, 0, 0, 0, 0, 0, 0, 0, 0}; return (memcmp(ip, pchRFC6052, sizeof(pchRFC6052)) == 0); } bool CNetAddr::IsRFC4380() const { return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0 && GetByte(12) == 0); } bool CNetAddr::IsRFC4862() const { static const uint8_t pchRFC4862[] = {0xFE, 0x80, 0, 0, 0, 0, 0, 0}; return (memcmp(ip, pchRFC4862, sizeof(pchRFC4862)) == 0); } bool CNetAddr::IsRFC4193() const { return ((GetByte(15) & 0xFE) == 0xFC); } bool CNetAddr::IsRFC6145() const { static const uint8_t pchRFC6145[] = {0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0, 0}; return (memcmp(ip, pchRFC6145, sizeof(pchRFC6145)) == 0); } bool CNetAddr::IsRFC4843() const { return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x10); } bool CNetAddr::IsRFC7343() const { return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x00 && (GetByte(12) & 0xF0) == 0x20); } bool CNetAddr::IsHeNet() const { return (GetByte(15) == 0x20 && GetByte(14) == 0x01 && GetByte(13) == 0x04 && GetByte(12) == 0x70); } /** * @returns Whether or not this is a dummy address that maps an onion address * into IPv6. * * @see CNetAddr::SetSpecial(const std::string &) */ bool CNetAddr::IsTor() const { return (memcmp(ip, pchOnionCat, sizeof(pchOnionCat)) == 0); } bool CNetAddr::IsLocal() const { // IPv4 loopback (127.0.0.0/8 or 0.0.0.0/8) if (IsIPv4() && (GetByte(3) == 127 || GetByte(3) == 0)) { return true; } // IPv6 loopback (::1/128) static const uint8_t pchLocal[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}; if (memcmp(ip, pchLocal, 16) == 0) { return true; } return false; } /** * @returns Whether or not this network address is a valid address that @a could * be used to refer to an actual host. * * @note A valid address may or may not be publicly routable on the global * internet. As in, the set of valid addresses is a superset of the set of * publicly routable addresses. * * @see CNetAddr::IsRoutable() */ bool CNetAddr::IsValid() const { // Cleanup 3-byte shifted addresses caused by garbage in size field of addr // messages from versions before 0.2.9 checksum. // Two consecutive addr messages look like this: // header20 vectorlen3 addr26 addr26 addr26 header20 vectorlen3 addr26 // addr26 addr26... so if the first length field is garbled, it reads the // second batch of addr misaligned by 3 bytes. if (memcmp(ip, pchIPv4 + 3, sizeof(pchIPv4) - 3) == 0) { return false; } // unspecified IPv6 address (::/128) uint8_t ipNone6[16] = {}; if (memcmp(ip, ipNone6, 16) == 0) { return false; } // documentation IPv6 address if (IsRFC3849()) { return false; } if (IsInternal()) { return false; } if (IsIPv4()) { // INADDR_NONE uint32_t ipNone = INADDR_NONE; if (memcmp(ip + 12, &ipNone, 4) == 0) { return false; } // 0 ipNone = 0; if (memcmp(ip + 12, &ipNone, 4) == 0) { return false; } } return true; } /** * @returns Whether or not this network address is publicly routable on the * global internet. * * @note A routable address is always valid. As in, the set of routable * addresses is a subset of the set of valid addresses. * * @see CNetAddr::IsValid() */ bool CNetAddr::IsRoutable() const { return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsRFC7343() || IsLocal() || IsInternal()); } /** * @returns Whether or not this is a dummy address that maps a name into IPv6. * * @see CNetAddr::SetInternal(const std::string &) */ bool CNetAddr::IsInternal() const { return memcmp(ip, g_internal_prefix, sizeof(g_internal_prefix)) == 0; } enum Network CNetAddr::GetNetwork() const { if (IsInternal()) { return NET_INTERNAL; } if (!IsRoutable()) { return NET_UNROUTABLE; } if (IsIPv4()) { return NET_IPV4; } if (IsTor()) { return NET_ONION; } return NET_IPV6; } std::string CNetAddr::ToStringIP() const { if (IsTor()) { return EncodeBase32(&ip[6], 10) + ".onion"; } if (IsInternal()) { return EncodeBase32(ip + sizeof(g_internal_prefix), sizeof(ip) - sizeof(g_internal_prefix)) + ".internal"; } CService serv(*this, 0); struct sockaddr_storage sockaddr; socklen_t socklen = sizeof(sockaddr); if (serv.GetSockAddr((struct sockaddr *)&sockaddr, &socklen)) { char name[1025] = ""; if (!getnameinfo((const struct sockaddr *)&sockaddr, socklen, name, sizeof(name), nullptr, 0, NI_NUMERICHOST)) { return std::string(name); } } if (IsIPv4()) { return strprintf("%u.%u.%u.%u", GetByte(3), GetByte(2), GetByte(1), GetByte(0)); } return strprintf("%x:%x:%x:%x:%x:%x:%x:%x", GetByte(15) << 8 | GetByte(14), GetByte(13) << 8 | GetByte(12), GetByte(11) << 8 | GetByte(10), GetByte(9) << 8 | GetByte(8), GetByte(7) << 8 | GetByte(6), GetByte(5) << 8 | GetByte(4), GetByte(3) << 8 | GetByte(2), GetByte(1) << 8 | GetByte(0)); } std::string CNetAddr::ToString() const { return ToStringIP(); } bool operator==(const CNetAddr &a, const CNetAddr &b) { return (memcmp(a.ip, b.ip, 16) == 0); } bool operator<(const CNetAddr &a, const CNetAddr &b) { return (memcmp(a.ip, b.ip, 16) < 0); } /** * Try to get our IPv4 address. * * @param[out] pipv4Addr The in_addr struct to which to copy. * * @returns Whether or not the operation was successful, in particular, whether * or not our address was an IPv4 address. * * @see CNetAddr::IsIPv4() */ bool CNetAddr::GetInAddr(struct in_addr *pipv4Addr) const { if (!IsIPv4()) { return false; } memcpy(pipv4Addr, ip + 12, 4); return true; } /** * Try to get our IPv6 address. * * @param[out] pipv6Addr The in6_addr struct to which to copy. * * @returns Whether or not the operation was successful, in particular, whether * or not our address was an IPv6 address. * * @see CNetAddr::IsIPv6() */ bool CNetAddr::GetIn6Addr(struct in6_addr *pipv6Addr) const { if (!IsIPv6()) { return false; } memcpy(pipv6Addr, ip, 16); return true; } bool CNetAddr::HasLinkedIPv4() const { return IsRoutable() && (IsIPv4() || IsRFC6145() || IsRFC6052() || IsRFC3964() || IsRFC4380()); } uint32_t CNetAddr::GetLinkedIPv4() const { if (IsIPv4() || IsRFC6145() || IsRFC6052()) { // IPv4, mapped IPv4, SIIT translated IPv4: the IPv4 address is the last // 4 bytes of the address return ReadBE32(ip + 12); } else if (IsRFC3964()) { // 6to4 tunneled IPv4: the IPv4 address is in bytes 2-6 return ReadBE32(ip + 2); } else if (IsRFC4380()) { // Teredo tunneled IPv4: the IPv4 address is in the last 4 bytes of the // address, but bitflipped return ~ReadBE32(ip + 12); } assert(false); } uint32_t CNetAddr::GetNetClass() const { uint32_t net_class = NET_IPV6; if (IsLocal()) { net_class = 255; } if (IsInternal()) { net_class = NET_INTERNAL; } else if (!IsRoutable()) { net_class = NET_UNROUTABLE; } else if (HasLinkedIPv4()) { net_class = NET_IPV4; } else if (IsTor()) { net_class = NET_ONION; } return net_class; } uint32_t CNetAddr::GetMappedAS(const std::vector &asmap) const { uint32_t net_class = GetNetClass(); if (asmap.size() == 0 || (net_class != NET_IPV4 && net_class != NET_IPV6)) { return 0; // Indicates not found, safe because AS0 is reserved per // RFC7607. } std::vector ip_bits(128); if (HasLinkedIPv4()) { // For lookup, treat as if it was just an IPv4 address (pchIPv4 prefix + // IPv4 bits) for (int8_t byte_i = 0; byte_i < 12; ++byte_i) { for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) { ip_bits[byte_i * 8 + bit_i] = (pchIPv4[byte_i] >> (7 - bit_i)) & 1; } } uint32_t ipv4 = GetLinkedIPv4(); for (int i = 0; i < 32; ++i) { ip_bits[96 + i] = (ipv4 >> (31 - i)) & 1; } } else { // Use all 128 bits of the IPv6 address otherwise for (int8_t byte_i = 0; byte_i < 16; ++byte_i) { uint8_t cur_byte = GetByte(15 - byte_i); for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) { ip_bits[byte_i * 8 + bit_i] = (cur_byte >> (7 - bit_i)) & 1; } } } uint32_t mapped_as = Interpret(asmap, ip_bits); return mapped_as; } /** * Get the canonical identifier of our network group * * The groups are assigned in a way where it should be costly for an attacker to * obtain addresses with many different group identifiers, even if it is cheap * to obtain addresses with the same identifier. * * @note No two connections will be attempted to addresses with the same network * group. */ std::vector CNetAddr::GetGroup(const std::vector &asmap) const { std::vector vchRet; uint32_t net_class = GetNetClass(); // If non-empty asmap is supplied and the address is IPv4/IPv6, // return ASN to be used for bucketing. uint32_t asn = GetMappedAS(asmap); if (asn != 0) { // Either asmap was empty, or address has non-asmappable net // class (e.g. TOR). vchRet.push_back(NET_IPV6); // IPv4 and IPv6 with same ASN should be in // the same bucket for (int i = 0; i < 4; i++) { vchRet.push_back((asn >> (8 * i)) & 0xFF); } return vchRet; } vchRet.push_back(net_class); int nStartByte = 0; int nBits = 16; if (IsLocal()) { // all local addresses belong to the same group nBits = 0; } else if (IsInternal()) { // all internal-usage addresses get their own group nStartByte = sizeof(g_internal_prefix); nBits = (sizeof(ip) - sizeof(g_internal_prefix)) * 8; } else if (!IsRoutable()) { // all other unroutable addresses belong to the same group nBits = 0; } else if (HasLinkedIPv4()) { // IPv4 addresses (and mapped IPv4 addresses) use /16 groups uint32_t ipv4 = GetLinkedIPv4(); vchRet.push_back((ipv4 >> 24) & 0xFF); vchRet.push_back((ipv4 >> 16) & 0xFF); return vchRet; } else if (IsTor()) { nStartByte = 6; nBits = 4; } else if (IsHeNet()) { // for he.net, use /36 groups nBits = 36; } else { // for the rest of the IPv6 network, use /32 groups nBits = 32; } // push our ip onto vchRet byte by byte... while (nBits >= 8) { vchRet.push_back(GetByte(15 - nStartByte)); nStartByte++; nBits -= 8; } // ...for the last byte, push nBits and for the rest of the byte push 1's if (nBits > 0) { vchRet.push_back(GetByte(15 - nStartByte) | ((1 << (8 - nBits)) - 1)); } return vchRet; } uint64_t CNetAddr::GetHash() const { - uint256 hash = Hash(&ip[0], &ip[16]); + uint256 hash = Hash(ip); uint64_t nRet; memcpy(&nRet, &hash, sizeof(nRet)); return nRet; } // private extensions to enum Network, only returned by GetExtNetwork, and only // used in GetReachabilityFrom static const int NET_UNKNOWN = NET_MAX + 0; static const int NET_TEREDO = NET_MAX + 1; static int GetExtNetwork(const CNetAddr *addr) { if (addr == nullptr) { return NET_UNKNOWN; } if (addr->IsRFC4380()) { return NET_TEREDO; } return addr->GetNetwork(); } /** Calculates a metric for how reachable (*this) is from a given partner */ int CNetAddr::GetReachabilityFrom(const CNetAddr *paddrPartner) const { enum Reachability { REACH_UNREACHABLE, REACH_DEFAULT, REACH_TEREDO, REACH_IPV6_WEAK, REACH_IPV4, REACH_IPV6_STRONG, REACH_PRIVATE }; if (!IsRoutable() || IsInternal()) { return REACH_UNREACHABLE; } int ourNet = GetExtNetwork(this); int theirNet = GetExtNetwork(paddrPartner); bool fTunnel = IsRFC3964() || IsRFC6052() || IsRFC6145(); switch (theirNet) { case NET_IPV4: switch (ourNet) { default: return REACH_DEFAULT; case NET_IPV4: return REACH_IPV4; } case NET_IPV6: switch (ourNet) { default: return REACH_DEFAULT; case NET_TEREDO: return REACH_TEREDO; case NET_IPV4: return REACH_IPV4; // only prefer giving our IPv6 address if it's not tunnelled case NET_IPV6: return fTunnel ? REACH_IPV6_WEAK : REACH_IPV6_STRONG; } case NET_ONION: switch (ourNet) { default: return REACH_DEFAULT; // Tor users can connect to IPv4 as well case NET_IPV4: return REACH_IPV4; case NET_ONION: return REACH_PRIVATE; } case NET_TEREDO: switch (ourNet) { default: return REACH_DEFAULT; case NET_TEREDO: return REACH_TEREDO; case NET_IPV6: return REACH_IPV6_WEAK; case NET_IPV4: return REACH_IPV4; } case NET_UNKNOWN: case NET_UNROUTABLE: default: switch (ourNet) { default: return REACH_DEFAULT; case NET_TEREDO: return REACH_TEREDO; case NET_IPV6: return REACH_IPV6_WEAK; case NET_IPV4: return REACH_IPV4; // either from Tor, or don't care about our address case NET_ONION: return REACH_PRIVATE; } } } CService::CService() : port(0) {} CService::CService(const CNetAddr &cip, unsigned short portIn) : CNetAddr(cip), port(portIn) {} CService::CService(const struct in_addr &ipv4Addr, unsigned short portIn) : CNetAddr(ipv4Addr), port(portIn) {} CService::CService(const struct in6_addr &ipv6Addr, unsigned short portIn) : CNetAddr(ipv6Addr), port(portIn) {} CService::CService(const struct sockaddr_in &addr) : CNetAddr(addr.sin_addr), port(ntohs(addr.sin_port)) { assert(addr.sin_family == AF_INET); } CService::CService(const struct sockaddr_in6 &addr) : CNetAddr(addr.sin6_addr, addr.sin6_scope_id), port(ntohs(addr.sin6_port)) { assert(addr.sin6_family == AF_INET6); } bool CService::SetSockAddr(const struct sockaddr *paddr) { switch (paddr->sa_family) { case AF_INET: *this = CService(*reinterpret_cast(paddr)); return true; case AF_INET6: *this = CService(*reinterpret_cast(paddr)); return true; default: return false; } } unsigned short CService::GetPort() const { return port; } bool operator==(const CService &a, const CService &b) { return static_cast(a) == static_cast(b) && a.port == b.port; } bool operator<(const CService &a, const CService &b) { return static_cast(a) < static_cast(b) || (static_cast(a) == static_cast(b) && a.port < b.port); } /** * Obtain the IPv4/6 socket address this represents. * * @param[out] paddr The obtained socket address. * @param[in,out] addrlen The size, in bytes, of the address structure pointed * to by paddr. The value that's pointed to by this * parameter might change after calling this function if * the size of the corresponding address structure * changed. * * @returns Whether or not the operation was successful. */ bool CService::GetSockAddr(struct sockaddr *paddr, socklen_t *addrlen) const { if (IsIPv4()) { if (*addrlen < (socklen_t)sizeof(struct sockaddr_in)) { return false; } *addrlen = sizeof(struct sockaddr_in); struct sockaddr_in *paddrin = reinterpret_cast(paddr); memset(paddrin, 0, *addrlen); if (!GetInAddr(&paddrin->sin_addr)) { return false; } paddrin->sin_family = AF_INET; paddrin->sin_port = htons(port); return true; } if (IsIPv6()) { if (*addrlen < (socklen_t)sizeof(struct sockaddr_in6)) { return false; } *addrlen = sizeof(struct sockaddr_in6); struct sockaddr_in6 *paddrin6 = reinterpret_cast(paddr); memset(paddrin6, 0, *addrlen); if (!GetIn6Addr(&paddrin6->sin6_addr)) { return false; } paddrin6->sin6_scope_id = scopeId; paddrin6->sin6_family = AF_INET6; paddrin6->sin6_port = htons(port); return true; } return false; } /** * @returns An identifier unique to this service's address and port number. */ std::vector CService::GetKey() const { std::vector vKey; vKey.resize(18); memcpy(vKey.data(), ip, 16); // most significant byte of our port vKey[16] = port / 0x100; // least significant byte of our port vKey[17] = port & 0x0FF; return vKey; } std::string CService::ToStringPort() const { return strprintf("%u", port); } std::string CService::ToStringIPPort() const { if (IsIPv4() || IsTor() || IsInternal()) { return ToStringIP() + ":" + ToStringPort(); } else { return "[" + ToStringIP() + "]:" + ToStringPort(); } } std::string CService::ToString() const { return ToStringIPPort(); } CSubNet::CSubNet() : valid(false) { memset(netmask, 0, sizeof(netmask)); } CSubNet::CSubNet(const CNetAddr &addr, int32_t mask) { valid = true; network = addr; // Default to /32 (IPv4) or /128 (IPv6), i.e. match single address memset(netmask, 255, sizeof(netmask)); // IPv4 addresses start at offset 12, and first 12 bytes must match, so just // offset n const int astartofs = network.IsIPv4() ? 12 : 0; // Only valid if in range of bits of address int32_t n = mask; if (n >= 0 && n <= (128 - astartofs * 8)) { n += astartofs * 8; // Clear bits [n..127] for (; n < 128; ++n) { netmask[n >> 3] &= ~(1 << (7 - (n & 7))); } } else { valid = false; } // Normalize network according to netmask for (int x = 0; x < 16; ++x) { network.ip[x] &= netmask[x]; } } CSubNet::CSubNet(const CNetAddr &addr, const CNetAddr &mask) { valid = true; network = addr; // Default to /32 (IPv4) or /128 (IPv6), i.e. match single address memset(netmask, 255, sizeof(netmask)); // IPv4 addresses start at offset 12, and first 12 bytes must match, so just // offset n const int astartofs = network.IsIPv4() ? 12 : 0; for (int x = astartofs; x < 16; ++x) { netmask[x] = mask.ip[x]; } // Normalize network according to netmask for (int x = 0; x < 16; ++x) { network.ip[x] &= netmask[x]; } } CSubNet::CSubNet(const CNetAddr &addr) : valid(addr.IsValid()) { memset(netmask, 255, sizeof(netmask)); network = addr; } /** * @returns True if this subnet is valid, the specified address is valid, and * the specified address belongs in this subnet. */ bool CSubNet::Match(const CNetAddr &addr) const { if (!valid || !addr.IsValid()) { return false; } for (int x = 0; x < 16; ++x) { if ((addr.ip[x] & netmask[x]) != network.ip[x]) { return false; } } return true; } /** * @returns The number of 1-bits in the prefix of the specified subnet mask. If * the specified subnet mask is not a valid one, -1. */ static inline int NetmaskBits(uint8_t x) { switch (x) { case 0x00: return 0; case 0x80: return 1; case 0xc0: return 2; case 0xe0: return 3; case 0xf0: return 4; case 0xf8: return 5; case 0xfc: return 6; case 0xfe: return 7; case 0xff: return 8; default: return -1; } } std::string CSubNet::ToString() const { /* Parse binary 1{n}0{N-n} to see if mask can be represented as /n */ int cidr = 0; bool valid_cidr = true; int n = network.IsIPv4() ? 12 : 0; for (; n < 16 && netmask[n] == 0xff; ++n) { cidr += 8; } if (n < 16) { int bits = NetmaskBits(netmask[n]); if (bits < 0) { valid_cidr = false; } else { cidr += bits; } ++n; } for (; n < 16 && valid_cidr; ++n) { if (netmask[n] != 0x00) { valid_cidr = false; } } /* Format output */ std::string strNetmask; if (valid_cidr) { strNetmask = strprintf("%u", cidr); } else { if (network.IsIPv4()) { strNetmask = strprintf("%u.%u.%u.%u", netmask[12], netmask[13], netmask[14], netmask[15]); } else { strNetmask = strprintf( "%x:%x:%x:%x:%x:%x:%x:%x", netmask[0] << 8 | netmask[1], netmask[2] << 8 | netmask[3], netmask[4] << 8 | netmask[5], netmask[6] << 8 | netmask[7], netmask[8] << 8 | netmask[9], netmask[10] << 8 | netmask[11], netmask[12] << 8 | netmask[13], netmask[14] << 8 | netmask[15]); } } return network.ToString() + "/" + strNetmask; } bool CSubNet::IsValid() const { return valid; } bool operator==(const CSubNet &a, const CSubNet &b) { return a.valid == b.valid && a.network == b.network && !memcmp(a.netmask, b.netmask, 16); } bool operator<(const CSubNet &a, const CSubNet &b) { return (a.network < b.network || (a.network == b.network && memcmp(a.netmask, b.netmask, 16) < 0)); } bool SanityCheckASMap(const std::vector &asmap) { // For IP address lookups, the input is 128 bits return SanityCheckASMap(asmap, 128); } diff --git a/src/pubkey.h b/src/pubkey.h index 6cb50030e..30bc66301 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -1,231 +1,233 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017 The Zcash developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_PUBKEY_H #define BITCOIN_PUBKEY_H #include #include #include #include #include #include const unsigned int BIP32_EXTKEY_SIZE = 74; /** A reference to a CKey: the Hash160 of its serialized public key */ class CKeyID : public uint160 { public: CKeyID() : uint160() {} explicit CKeyID(const uint160 &in) : uint160(in) {} }; using ChainCode = uint256; /** An encapsulated public key. */ class CPubKey { public: /** * secp256k1: */ static constexpr unsigned int SIZE = 65; static constexpr unsigned int COMPRESSED_SIZE = 33; static constexpr unsigned int SCHNORR_SIZE = 64; static constexpr unsigned int SIGNATURE_SIZE = 72; static constexpr unsigned int COMPACT_SIGNATURE_SIZE = 65; /** * see www.keylength.com * script supports up to 75 for single byte push */ static_assert(SIZE >= COMPRESSED_SIZE, "COMPRESSED_SIZE is larger than SIZE"); private: /** * Just store the serialized data. * Its length can very cheaply be computed from the first byte. */ uint8_t vch[SIZE]; //! Compute the length of a pubkey with a given first byte. static unsigned int GetLen(uint8_t chHeader) { if (chHeader == 2 || chHeader == 3) { return COMPRESSED_SIZE; } if (chHeader == 4 || chHeader == 6 || chHeader == 7) { return SIZE; } return 0; } //! Set this key data to be invalid void Invalidate() { vch[0] = 0xFF; } public: bool static ValidSize(const std::vector &vch) { return vch.size() > 0 && GetLen(vch[0]) == vch.size(); } //! Construct an invalid public key. CPubKey() { Invalidate(); } //! Initialize a public key using begin/end iterators to byte data. template void Set(const T pbegin, const T pend) { int len = pend == pbegin ? 0 : GetLen(pbegin[0]); if (len && len == (pend - pbegin)) { memcpy(vch, (uint8_t *)&pbegin[0], len); } else { Invalidate(); } } //! Construct a public key using begin/end iterators to byte data. template CPubKey(const T pbegin, const T pend) { Set(pbegin, pend); } //! Construct a public key from a byte vector. explicit CPubKey(const std::vector &_vch) { Set(_vch.begin(), _vch.end()); } //! Simple read-only vector-like interface to the pubkey data. unsigned int size() const { return GetLen(vch[0]); } const uint8_t *data() const { return vch; } const uint8_t *begin() const { return vch; } const uint8_t *end() const { return vch + size(); } const uint8_t &operator[](unsigned int pos) const { return vch[pos]; } //! Comparator implementation. friend bool operator==(const CPubKey &a, const CPubKey &b) { return a.vch[0] == b.vch[0] && memcmp(a.vch, b.vch, a.size()) == 0; } friend bool operator!=(const CPubKey &a, const CPubKey &b) { return !(a == b); } friend bool operator<(const CPubKey &a, const CPubKey &b) { return a.vch[0] < b.vch[0] || (a.vch[0] == b.vch[0] && memcmp(a.vch, b.vch, a.size()) < 0); } //! Implement serialization, as if this was a byte vector. template void Serialize(Stream &s) const { unsigned int len = size(); ::WriteCompactSize(s, len); s.write((char *)vch, len); } template void Unserialize(Stream &s) { unsigned int len = ::ReadCompactSize(s); if (len <= SIZE) { s.read((char *)vch, len); } else { // invalid pubkey, skip available data char dummy; while (len--) { s.read(&dummy, 1); } Invalidate(); } } //! Get the KeyID of this public key (hash of its serialization) - CKeyID GetID() const { return CKeyID(Hash160(vch, vch + size())); } + CKeyID GetID() const { + return CKeyID(Hash160(MakeSpan(vch).first(size()))); + } //! Get the 256-bit hash of this public key. - uint256 GetHash() const { return Hash(vch, vch + size()); } + uint256 GetHash() const { return Hash(MakeSpan(vch).first(size())); } /* * Check syntactic correctness. * * Note that this is consensus critical as CheckSig() calls it! */ bool IsValid() const { return size() > 0; } //! fully validate whether this is a valid public key (more expensive than //! IsValid()) bool IsFullyValid() const; //! Check whether this is a compressed public key. bool IsCompressed() const { return size() == COMPRESSED_SIZE; } /** * Verify a DER-serialized ECDSA signature (~72 bytes). * If this public key is not fully valid, the return value will be false. */ bool VerifyECDSA(const uint256 &hash, const std::vector &vchSig) const; /** * Verify a Schnorr signature (=64 bytes). * If this public key is not fully valid, the return value will be false. */ bool VerifySchnorr(const uint256 &hash, const std::array &sig) const; bool VerifySchnorr(const uint256 &hash, const std::vector &vchSig) const; /** * Check whether a DER-serialized ECDSA signature is normalized (lower-S). */ static bool CheckLowS(const boost::sliced_range> &vchSig); static bool CheckLowS(const std::vector &vchSig) { return CheckLowS(vchSig | boost::adaptors::sliced(0, vchSig.size())); } //! Recover a public key from a compact ECDSA signature. bool RecoverCompact(const uint256 &hash, const std::vector &vchSig); //! Turn this public key into an uncompressed public key. bool Decompress(); //! Derive BIP32 child pubkey. bool Derive(CPubKey &pubkeyChild, ChainCode &ccChild, unsigned int nChild, const ChainCode &cc) const; }; struct CExtPubKey { uint8_t nDepth; uint8_t vchFingerprint[4]; unsigned int nChild; ChainCode chaincode; CPubKey pubkey; friend bool operator==(const CExtPubKey &a, const CExtPubKey &b) { return a.nDepth == b.nDepth && memcmp(&a.vchFingerprint[0], &b.vchFingerprint[0], sizeof(vchFingerprint)) == 0 && a.nChild == b.nChild && a.chaincode == b.chaincode && a.pubkey == b.pubkey; } friend bool operator!=(const CExtPubKey &a, const CExtPubKey &b) { return !(a == b); } void Encode(uint8_t code[BIP32_EXTKEY_SIZE]) const; void Decode(const uint8_t code[BIP32_EXTKEY_SIZE]); bool Derive(CExtPubKey &out, unsigned int nChild) const; CExtPubKey() = default; }; /** * Users of this module must hold an ECCVerifyHandle. The constructor and * destructor of these are not allowed to run in parallel, though. */ class ECCVerifyHandle { static int refcount; public: ECCVerifyHandle(); ~ECCVerifyHandle(); }; #endif // BITCOIN_PUBKEY_H diff --git a/src/script/standard.cpp b/src/script/standard.cpp index 34b47b0da..ac949f73c 100644 --- a/src/script/standard.cpp +++ b/src/script/standard.cpp @@ -1,282 +1,280 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include