diff --git a/src/rest.cpp b/src/rest.cpp index 292679bdc0..6dabc6acc3 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -1,705 +1,677 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <chain.h> #include <chainparams.h> #include <config.h> #include <core_io.h> #include <httpserver.h> #include <index/txindex.h> #include <primitives/block.h> #include <primitives/transaction.h> #include <rpc/blockchain.h> #include <rpc/server.h> #include <streams.h> #include <sync.h> #include <txmempool.h> #include <utilstrencodings.h> #include <validation.h> #include <version.h> #include <boost/algorithm/string.hpp> #include <univalue.h> // Allow a max of 15 outpoints to be queried at once. static const size_t MAX_GETUTXOS_OUTPOINTS = 15; enum class RetFormat { UNDEF, BINARY, HEX, JSON, }; static const struct { enum RetFormat rf; const char *name; } rf_names[] = { {RetFormat::UNDEF, ""}, {RetFormat::BINARY, "bin"}, {RetFormat::HEX, "hex"}, {RetFormat::JSON, "json"}, }; struct CCoin { uint32_t nHeight; CTxOut out; CCoin() : nHeight(0) {} explicit CCoin(Coin in) : nHeight(in.GetHeight()), out(std::move(in.GetTxOut())) {} ADD_SERIALIZE_METHODS; template <typename Stream, typename Operation> inline void SerializationOp(Stream &s, Operation ser_action) { uint32_t nTxVerDummy = 0; READWRITE(nTxVerDummy); READWRITE(nHeight); READWRITE(out); } }; static bool RESTERR(HTTPRequest *req, enum HTTPStatusCode status, std::string message) { req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(status, message + "\r\n"); return false; } static enum RetFormat ParseDataFormat(std::string ¶m, const std::string &strReq) { const std::string::size_type pos = strReq.rfind('.'); if (pos == std::string::npos) { param = strReq; return rf_names[0].rf; } param = strReq.substr(0, pos); const std::string suff(strReq, pos + 1); for (size_t i = 0; i < ARRAYLEN(rf_names); i++) { if (suff == rf_names[i].name) { return rf_names[i].rf; } } /* If no suffix is found, return original string. */ param = strReq; return rf_names[0].rf; } static std::string AvailableDataFormatsString() { std::string formats; for (size_t i = 0; i < ARRAYLEN(rf_names); i++) { if (strlen(rf_names[i].name) > 0) { formats.append("."); formats.append(rf_names[i].name); formats.append(", "); } } if (formats.length() > 0) { return formats.substr(0, formats.length() - 2); } return formats; } static bool ParseHashStr(const std::string &strReq, uint256 &v) { if (!IsHex(strReq) || (strReq.size() != 64)) { return false; } v.SetHex(strReq); return true; } static bool CheckWarmup(HTTPRequest *req) { std::string statusmessage; if (RPCIsInWarmup(&statusmessage)) { return RESTERR(req, HTTP_SERVICE_UNAVAILABLE, "Service temporarily unavailable: " + statusmessage); } return true; } static bool rest_headers(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); std::vector<std::string> path; boost::split(path, param, boost::is_any_of("/")); if (path.size() != 2) { return RESTERR(req, HTTP_BAD_REQUEST, "No header count specified. Use " "/rest/headers/<count>/<hash>.<ext>."); } long count = strtol(path[0].c_str(), nullptr, 10); if (count < 1 || count > 2000) { return RESTERR(req, HTTP_BAD_REQUEST, "Header count out of range: " + path[0]); } std::string hashStr = path[1]; uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } const CBlockIndex *tip = nullptr; std::vector<const CBlockIndex *> headers; headers.reserve(count); { LOCK(cs_main); tip = chainActive.Tip(); const CBlockIndex *pindex = LookupBlockIndex(hash); while (pindex != nullptr && chainActive.Contains(pindex)) { headers.push_back(pindex); if (headers.size() == size_t(count)) { break; } pindex = chainActive.Next(pindex); } } CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { ssHeader << pindex->GetBlockHeader(); } switch (rf) { case RetFormat::BINARY: { std::string binaryHeader = ssHeader.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryHeader); return true; } case RetFormat::HEX: { std::string strHex = HexStr(ssHeader.begin(), ssHeader.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue jsonHeaders(UniValue::VARR); for (const CBlockIndex *pindex : headers) { jsonHeaders.push_back(blockheaderToJSON(tip, pindex)); } std::string strJSON = jsonHeaders.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: .bin, .hex)"); } } - - // not reached - // continue to process further HTTP reqs on this cxn - return true; } static bool rest_block(const Config &config, HTTPRequest *req, const std::string &strURIPart, bool showTxDetails) { if (!CheckWarmup(req)) { return false; } std::string hashStr; const RetFormat rf = ParseDataFormat(hashStr, strURIPart); uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } CBlock block; CBlockIndex *pblockindex = nullptr; CBlockIndex *tip = nullptr; { LOCK(cs_main); tip = chainActive.Tip(); pblockindex = LookupBlockIndex(hash); if (!pblockindex) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } if (fHavePruned && !pblockindex->nStatus.hasData() && pblockindex->nTx > 0) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not available (pruned data)"); } if (!ReadBlockFromDisk(block, pblockindex, config)) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } } CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; switch (rf) { case RetFormat::BINARY: { std::string binaryBlock = ssBlock.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryBlock); return true; } case RetFormat::HEX: { std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue objBlock = blockToJSON(block, tip, pblockindex, showTxDetails); std::string strJSON = objBlock.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } - - // not reached - // continue to process further HTTP reqs on this cxn - return true; } static bool rest_block_extended(Config &config, HTTPRequest *req, const std::string &strURIPart) { return rest_block(config, req, strURIPart, true); } static bool rest_block_notxdetails(Config &config, HTTPRequest *req, const std::string &strURIPart) { return rest_block(config, req, strURIPart, false); } static bool rest_chaininfo(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RetFormat::JSON: { JSONRPCRequest jsonRequest; jsonRequest.params = UniValue(UniValue::VARR); UniValue chainInfoObject = getblockchaininfo(config, jsonRequest); std::string strJSON = chainInfoObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } - - // not reached - // continue to process further HTTP reqs on this cxn - return true; } static bool rest_mempool_info(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RetFormat::JSON: { UniValue mempoolInfoObject = mempoolInfoToJSON(); std::string strJSON = mempoolInfoObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } - - // not reached - // continue to process further HTTP reqs on this cxn - return true; } static bool rest_mempool_contents(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); switch (rf) { case RetFormat::JSON: { UniValue mempoolObject = mempoolToJSON(true); std::string strJSON = mempoolObject.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: json)"); } } - - // not reached - // continue to process further HTTP reqs on this cxn - return true; } static bool rest_tx(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string hashStr; const RetFormat rf = ParseDataFormat(hashStr, strURIPart); uint256 hash; if (!ParseHashStr(hashStr, hash)) { return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); } const TxId txid(hash); if (g_txindex) { g_txindex->BlockUntilSyncedToCurrentChain(); } CTransactionRef tx; uint256 hashBlock = uint256(); if (!GetTransaction(config, txid, tx, hashBlock, true)) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssTx << tx; switch (rf) { case RetFormat::BINARY: { std::string binaryTx = ssTx.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, binaryTx); return true; } case RetFormat::HEX: { std::string strHex = HexStr(ssTx.begin(), ssTx.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue objTx(UniValue::VOBJ); TxToUniv(*tx, hashBlock, objTx); std::string strJSON = objTx.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } - - // not reached - // continue to process further HTTP reqs on this cxn - return true; } static bool rest_getutxos(Config &config, HTTPRequest *req, const std::string &strURIPart) { if (!CheckWarmup(req)) { return false; } std::string param; const RetFormat rf = ParseDataFormat(param, strURIPart); std::vector<std::string> uriParts; if (param.length() > 1) { std::string strUriParams = param.substr(1); boost::split(uriParts, strUriParams, boost::is_any_of("/")); } // throw exception in case of an empty request std::string strRequestMutable = req->ReadBody(); if (strRequestMutable.length() == 0 && uriParts.size() == 0) { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } bool fInputParsed = false; bool fCheckMemPool = false; std::vector<COutPoint> vOutPoints; // parse/deserialize input // input-format = output-format, rest/getutxos/bin requires binary input, // gives binary output, ... if (uriParts.size() > 0) { // inputs is sent over URI scheme // (/rest/getutxos/checkmempool/txid1-n/txid2-n/...) if (uriParts[0] == "checkmempool") { fCheckMemPool = true; } for (size_t i = (fCheckMemPool) ? 1 : 0; i < uriParts.size(); i++) { int32_t nOutput; std::string strTxid = uriParts[i].substr(0, uriParts[i].find("-")); std::string strOutput = uriParts[i].substr(uriParts[i].find("-") + 1); if (!ParseInt32(strOutput, &nOutput) || !IsHex(strTxid)) { return RESTERR(req, HTTP_BAD_REQUEST, "Parse error"); } TxId txid; txid.SetHex(strTxid); vOutPoints.push_back(COutPoint(txid, uint32_t(nOutput))); } if (vOutPoints.size() > 0) { fInputParsed = true; } else { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } } switch (rf) { case RetFormat::HEX: { // convert hex to bin, continue then with bin part std::vector<uint8_t> strRequestV = ParseHex(strRequestMutable); strRequestMutable.assign(strRequestV.begin(), strRequestV.end()); } // FALLTHROUGH case RetFormat::BINARY: { try { // deserialize only if user sent a request if (strRequestMutable.size() > 0) { // don't allow sending input over URI and HTTP RAW DATA if (fInputParsed) { return RESTERR(req, HTTP_BAD_REQUEST, "Combination of URI scheme inputs and " "raw post data is not allowed"); } CDataStream oss(SER_NETWORK, PROTOCOL_VERSION); oss << strRequestMutable; oss >> fCheckMemPool; oss >> vOutPoints; } } catch (const std::ios_base::failure &e) { // abort in case of unreadable binary data return RESTERR(req, HTTP_BAD_REQUEST, "Parse error"); } break; } case RetFormat::JSON: { if (!fInputParsed) { return RESTERR(req, HTTP_BAD_REQUEST, "Error: empty request"); } break; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } // limit max outpoints if (vOutPoints.size() > MAX_GETUTXOS_OUTPOINTS) { return RESTERR( req, HTTP_BAD_REQUEST, strprintf("Error: max outpoints exceeded (max: %d, tried: %d)", MAX_GETUTXOS_OUTPOINTS, vOutPoints.size())); } // check spentness and form a bitmap (as well as a JSON capable // human-readable string representation) std::vector<uint8_t> bitmap; std::vector<CCoin> outs; std::string bitmapStringRepresentation; std::vector<bool> hits; bitmap.resize((vOutPoints.size() + 7) / 8); { LOCK2(cs_main, g_mempool.cs); CCoinsView viewDummy; CCoinsViewCache view(&viewDummy); CCoinsViewCache &viewChain = *pcoinsTip; CCoinsViewMemPool viewMempool(&viewChain, g_mempool); if (fCheckMemPool) { // switch cache backend to db+mempool in case user likes to query // mempool. view.SetBackend(viewMempool); } for (size_t i = 0; i < vOutPoints.size(); i++) { Coin coin; bool hit = false; if (view.GetCoin(vOutPoints[i], coin) && !g_mempool.isSpent(vOutPoints[i])) { hit = true; outs.emplace_back(std::move(coin)); } hits.push_back(hit); // form a binary string representation (human-readable for json // output) bitmapStringRepresentation.append(hit ? "1" : "0"); bitmap[i / 8] |= ((uint8_t)hit) << (i % 8); } } switch (rf) { case RetFormat::BINARY: { // serialize data // use exact same output as mentioned in Bip64 CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string ssGetUTXOResponseString = ssGetUTXOResponse.str(); req->WriteHeader("Content-Type", "application/octet-stream"); req->WriteReply(HTTP_OK, ssGetUTXOResponseString); return true; } case RetFormat::HEX: { CDataStream ssGetUTXOResponse(SER_NETWORK, PROTOCOL_VERSION); ssGetUTXOResponse << chainActive.Height() << chainActive.Tip()->GetBlockHash() << bitmap << outs; std::string strHex = HexStr(ssGetUTXOResponse.begin(), ssGetUTXOResponse.end()) + "\n"; req->WriteHeader("Content-Type", "text/plain"); req->WriteReply(HTTP_OK, strHex); return true; } case RetFormat::JSON: { UniValue objGetUTXOResponse(UniValue::VOBJ); // pack in some essentials // use more or less the same output as mentioned in Bip64 objGetUTXOResponse.pushKV("chainHeight", chainActive.Height()); objGetUTXOResponse.pushKV( "chaintipHash", chainActive.Tip()->GetBlockHash().GetHex()); objGetUTXOResponse.pushKV("bitmap", bitmapStringRepresentation); UniValue utxos(UniValue::VARR); for (const CCoin &coin : outs) { UniValue utxo(UniValue::VOBJ); utxo.pushKV("height", int32_t(coin.nHeight)); utxo.pushKV("value", ValueFromAmount(coin.out.nValue)); // include the script in a json output UniValue o(UniValue::VOBJ); ScriptPubKeyToUniv(coin.out.scriptPubKey, o, true); utxo.pushKV("scriptPubKey", o); utxos.push_back(utxo); } objGetUTXOResponse.pushKV("utxos", utxos); // return json string std::string strJSON = objGetUTXOResponse.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); return true; } default: { return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); } } - - // not reached - // continue to process further HTTP reqs on this cxn - return true; } static const struct { const char *prefix; bool (*handler)(Config &config, HTTPRequest *req, const std::string &strReq); } uri_prefixes[] = { {"/rest/tx/", rest_tx}, {"/rest/block/notxdetails/", rest_block_notxdetails}, {"/rest/block/", rest_block_extended}, {"/rest/chaininfo", rest_chaininfo}, {"/rest/mempool/info", rest_mempool_info}, {"/rest/mempool/contents", rest_mempool_contents}, {"/rest/headers/", rest_headers}, {"/rest/getutxos", rest_getutxos}, }; bool StartREST() { for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) { RegisterHTTPHandler(uri_prefixes[i].prefix, false, uri_prefixes[i].handler); } return true; } void InterruptREST() {} void StopREST() { for (size_t i = 0; i < ARRAYLEN(uri_prefixes); i++) { UnregisterHTTPHandler(uri_prefixes[i].prefix, false); } } diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp index 6d9353e317..715161f124 100644 --- a/src/wallet/db.cpp +++ b/src/wallet/db.cpp @@ -1,874 +1,872 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <wallet/db.h> #include <addrman.h> #include <fs.h> #include <hash.h> #include <protocol.h> #include <util.h> #include <utilstrencodings.h> #include <wallet/walletutil.h> #include <boost/thread.hpp> // boost::this_thread::interruption_point() (mingw) #include <cstdint> #ifndef WIN32 #include <sys/stat.h> #endif namespace { //! Make sure database has a unique fileid within the environment. If it //! doesn't, throw an error. BDB caches do not work properly when more than one //! open database has the same fileid (values written to one database may show //! up in reads to other databases). //! //! BerkeleyDB generates unique fileids by default //! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html), //! so bitcoin should never create different databases with the same fileid, but //! this error can be triggered if users manually copy database files. void CheckUniqueFileid(const BerkeleyEnvironment &env, const std::string &filename, Db &db) { if (env.IsMock()) { return; } u_int8_t fileid[DB_FILE_ID_LEN]; int ret = db.get_mpf()->get_fileid(fileid); if (ret != 0) { throw std::runtime_error(strprintf( "BerkeleyBatch: Can't open database %s (get_fileid failed with %d)", filename, ret)); } for (const auto &item : env.mapDb) { u_int8_t item_fileid[DB_FILE_ID_LEN]; if (item.second && item.second->get_mpf()->get_fileid(item_fileid) == 0 && memcmp(fileid, item_fileid, sizeof(fileid)) == 0) { const char *item_filename = nullptr; item.second->get_dbname(&item_filename, nullptr); throw std::runtime_error(strprintf( "BerkeleyBatch: Can't open database %s (duplicates fileid %s " "from %s)", filename, HexStr(std::begin(item_fileid), std::end(item_fileid)), item_filename ? item_filename : "(unknown database)")); } } } CCriticalSection cs_db; //!< Map from directory name to open db environment. std::map<std::string, BerkeleyEnvironment> g_dbenvs; } // namespace BerkeleyEnvironment *GetWalletEnv(const fs::path &wallet_path, std::string &database_filename) { fs::path env_directory; if (fs::is_regular_file(wallet_path)) { // Special case for backwards compatibility: if wallet path points to an // existing file, treat it as the path to a BDB data file in a parent // directory that also contains BDB log files. env_directory = wallet_path.parent_path(); database_filename = wallet_path.filename().string(); } else { // Normal case: Interpret wallet path as a directory path containing // data and log files. env_directory = wallet_path; database_filename = "wallet.dat"; } LOCK(cs_db); // Note: An ununsed temporary BerkeleyEnvironment object may be created // inside the emplace function if the key already exists. This is a little // inefficient, but not a big concern since the map will be changed in the // future to hold pointers instead of objects, anyway. return &g_dbenvs .emplace(std::piecewise_construct, std::forward_as_tuple(env_directory.string()), std::forward_as_tuple(env_directory)) .first->second; } // // BerkeleyBatch // void BerkeleyEnvironment::Close() { if (!fDbEnvInit) { return; } fDbEnvInit = false; for (auto &db : mapDb) { auto count = mapFileUseCount.find(db.first); assert(count == mapFileUseCount.end() || count->second == 0); if (db.second) { db.second->close(0); delete db.second; db.second = nullptr; } } int ret = dbenv->close(0); if (ret != 0) { LogPrintf("BerkeleyEnvironment::EnvShutdown: Error %d shutting down " "database environment: %s\n", ret, DbEnv::strerror(ret)); } if (!fMockDb) { DbEnv(u_int32_t(0)).remove(strPath.c_str(), 0); } } void BerkeleyEnvironment::Reset() { dbenv.reset(new DbEnv(DB_CXX_NO_EXCEPTIONS)); fDbEnvInit = false; fMockDb = false; } BerkeleyEnvironment::BerkeleyEnvironment(const fs::path &dir_path) : strPath(dir_path.string()) { Reset(); } BerkeleyEnvironment::~BerkeleyEnvironment() { Close(); } bool BerkeleyEnvironment::Open(bool retry) { if (fDbEnvInit) { return true; } boost::this_thread::interruption_point(); fs::path pathIn = strPath; TryCreateDirectories(pathIn); if (!LockDirectory(pathIn, ".walletlock")) { LogPrintf("Cannot obtain a lock on wallet directory %s. Another " "instance of bitcoin may be using it.\n", strPath); return false; } fs::path pathLogDir = pathIn / "database"; TryCreateDirectories(pathLogDir); fs::path pathErrorFile = pathIn / "db.log"; LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n", pathLogDir.string(), pathErrorFile.string()); unsigned int nEnvFlags = 0; if (gArgs.GetBoolArg("-privdb", DEFAULT_WALLET_PRIVDB)) { nEnvFlags |= DB_PRIVATE; } dbenv->set_lg_dir(pathLogDir.string().c_str()); // 1 MiB should be enough for just the wallet dbenv->set_cachesize(0, 0x100000, 1); dbenv->set_lg_bsize(0x10000); dbenv->set_lg_max(1048576); dbenv->set_lk_max_locks(40000); dbenv->set_lk_max_objects(40000); /// debug dbenv->set_errfile(fsbridge::fopen(pathErrorFile, "a")); dbenv->set_flags(DB_AUTO_COMMIT, 1); dbenv->set_flags(DB_TXN_WRITE_NOSYNC, 1); dbenv->log_set_config(DB_LOG_AUTO_REMOVE, 1); int ret = dbenv->open(strPath.c_str(), DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD | DB_RECOVER | nEnvFlags, S_IRUSR | S_IWUSR); if (ret != 0) { dbenv->close(0); LogPrintf("BerkeleyEnvironment::Open: Error %d opening database " "environment: %s\n", ret, DbEnv::strerror(ret)); if (retry) { // try moving the database env out of the way fs::path pathDatabaseBak = pathIn / strprintf("database.%d.bak", GetTime()); try { fs::rename(pathLogDir, pathDatabaseBak); LogPrintf("Moved old %s to %s. Retrying.\n", pathLogDir.string(), pathDatabaseBak.string()); } catch (const fs::filesystem_error &) { // failure is ok (well, not really, but it's not worse than what // we started with) } // try opening it again one more time if (!Open(false /* retry */)) { // if it still fails, it probably means we can't even create the // database env return false; } } else { return false; } } fDbEnvInit = true; fMockDb = false; return true; } void BerkeleyEnvironment::MakeMock() { if (fDbEnvInit) { throw std::runtime_error( "BerkeleyEnvironment::MakeMock: Already initialized"); } boost::this_thread::interruption_point(); LogPrint(BCLog::DB, "BerkeleyEnvironment::MakeMock\n"); dbenv->set_cachesize(1, 0, 1); dbenv->set_lg_bsize(10485760 * 4); dbenv->set_lg_max(10485760); dbenv->set_lk_max_locks(10000); dbenv->set_lk_max_objects(10000); dbenv->set_flags(DB_AUTO_COMMIT, 1); dbenv->log_set_config(DB_LOG_IN_MEMORY, 1); int ret = dbenv->open(nullptr, DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD | DB_PRIVATE, S_IRUSR | S_IWUSR); if (ret > 0) { throw std::runtime_error( strprintf("BerkeleyEnvironment::MakeMock: Error %d opening " "database environment.", ret)); } fDbEnvInit = true; fMockDb = true; } BerkeleyEnvironment::VerifyResult BerkeleyEnvironment::Verify(const std::string &strFile, recoverFunc_type recoverFunc, std::string &out_backup_filename) { LOCK(cs_db); assert(mapFileUseCount.count(strFile) == 0); Db db(dbenv.get(), 0); int result = db.verify(strFile.c_str(), nullptr, nullptr, 0); if (result == 0) { return VerifyResult::VERIFY_OK; } else if (recoverFunc == nullptr) { return VerifyResult::RECOVER_FAIL; } // Try to recover: bool fRecovered = (*recoverFunc)(fs::path(strPath) / strFile, out_backup_filename); return (fRecovered ? VerifyResult::RECOVER_OK : VerifyResult::RECOVER_FAIL); } bool BerkeleyBatch::Recover(const fs::path &file_path, void *callbackDataIn, bool (*recoverKVcallback)(void *callbackData, CDataStream ssKey, CDataStream ssValue), std::string &newFilename) { std::string filename; BerkeleyEnvironment *env = GetWalletEnv(file_path, filename); // Recovery procedure: // Move wallet file to walletfilename.timestamp.bak // Call Salvage with fAggressive=true to get as much data as possible. // Rewrite salvaged data to fresh wallet file. // Set -rescan so any missing transactions will be found. int64_t now = GetTime(); newFilename = strprintf("%s.%d.bak", filename, now); int result = env->dbenv->dbrename(nullptr, filename.c_str(), nullptr, newFilename.c_str(), DB_AUTO_COMMIT); if (result == 0) { LogPrintf("Renamed %s to %s\n", filename, newFilename); } else { LogPrintf("Failed to rename %s to %s\n", filename, newFilename); return false; } std::vector<BerkeleyEnvironment::KeyValPair> salvagedData; bool fSuccess = env->Salvage(newFilename, true, salvagedData); if (salvagedData.empty()) { LogPrintf("Salvage(aggressive) found no records in %s.\n", newFilename); return false; } LogPrintf("Salvage(aggressive) found %u records\n", salvagedData.size()); std::unique_ptr<Db> pdbCopy = std::make_unique<Db>(env->dbenv.get(), 0); int ret = pdbCopy->open(nullptr, // Txn pointer filename.c_str(), // Filename "main", // Logical db name DB_BTREE, // Database type DB_CREATE, // Flags 0); if (ret > 0) { LogPrintf("Cannot create database file %s\n", filename); pdbCopy->close(0); return false; } DbTxn *ptxn = env->TxnBegin(); for (BerkeleyEnvironment::KeyValPair &row : salvagedData) { if (recoverKVcallback) { CDataStream ssKey(row.first, SER_DISK, CLIENT_VERSION); CDataStream ssValue(row.second, SER_DISK, CLIENT_VERSION); if (!(*recoverKVcallback)(callbackDataIn, ssKey, ssValue)) { continue; } } Dbt datKey(&row.first[0], row.first.size()); Dbt datValue(&row.second[0], row.second.size()); int ret2 = pdbCopy->put(ptxn, &datKey, &datValue, DB_NOOVERWRITE); if (ret2 > 0) { fSuccess = false; } } ptxn->commit(0); pdbCopy->close(0); return fSuccess; } bool BerkeleyBatch::VerifyEnvironment(const fs::path &file_path, std::string &errorStr) { std::string walletFile; BerkeleyEnvironment *env = GetWalletEnv(file_path, walletFile); fs::path walletDir = env->Directory(); LogPrintf("Using BerkeleyDB version %s\n", DbEnv::version(0, 0, 0)); LogPrintf("Using wallet %s\n", walletFile); // Wallet file must be a plain filename without a directory if (walletFile != fs::basename(walletFile) + fs::extension(walletFile)) { errorStr = strprintf(_("Wallet %s resides outside wallet directory %s"), walletFile, walletDir.string()); return false; } if (!env->Open(true /* retry */)) { errorStr = strprintf( _("Error initializing wallet database environment %s!"), walletDir); return false; } return true; } bool BerkeleyBatch::VerifyDatabaseFile( const fs::path &file_path, std::string &warningStr, std::string &errorStr, BerkeleyEnvironment::recoverFunc_type recoverFunc) { std::string walletFile; BerkeleyEnvironment *env = GetWalletEnv(file_path, walletFile); fs::path walletDir = env->Directory(); if (fs::exists(walletDir / walletFile)) { std::string backup_filename; BerkeleyEnvironment::VerifyResult r = env->Verify(walletFile, recoverFunc, backup_filename); if (r == BerkeleyEnvironment::VerifyResult::RECOVER_OK) { warningStr = strprintf( _("Warning: Wallet file corrupt, data salvaged! Original %s " "saved as %s in %s; if your balance or transactions are " "incorrect you should restore from a backup."), walletFile, backup_filename, walletDir); } if (r == BerkeleyEnvironment::VerifyResult::RECOVER_FAIL) { errorStr = strprintf(_("%s corrupt, salvage failed"), walletFile); return false; } } // also return true if files does not exists return true; } /* End of headers, beginning of key/value data */ static const char *HEADER_END = "HEADER=END"; /* End of key/value data */ static const char *DATA_END = "DATA=END"; bool BerkeleyEnvironment::Salvage( const std::string &strFile, bool fAggressive, std::vector<BerkeleyEnvironment::KeyValPair> &vResult) { LOCK(cs_db); assert(mapFileUseCount.count(strFile) == 0); u_int32_t flags = DB_SALVAGE; if (fAggressive) { flags |= DB_AGGRESSIVE; } std::stringstream strDump; Db db(dbenv.get(), 0); int result = db.verify(strFile.c_str(), nullptr, &strDump, flags); if (result == DB_VERIFY_BAD) { LogPrintf("BerkeleyEnvironment::Salvage: Database salvage found " "errors, all data may not be recoverable.\n"); if (!fAggressive) { LogPrintf("BerkeleyEnvironment::Salvage: Rerun with aggressive " "mode to ignore errors and continue.\n"); return false; } } if (result != 0 && result != DB_VERIFY_BAD) { LogPrintf("BerkeleyEnvironment::Salvage: Database salvage failed with " "result %d.\n", result); return false; } // Format of bdb dump is ascii lines: // header lines... // HEADER=END // hexadecimal key // hexadecimal value // ... repeated // DATA=END std::string strLine; while (!strDump.eof() && strLine != HEADER_END) { // Skip past header getline(strDump, strLine); } std::string keyHex, valueHex; while (!strDump.eof() && keyHex != DATA_END) { getline(strDump, keyHex); if (keyHex != DATA_END) { if (strDump.eof()) { break; } getline(strDump, valueHex); if (valueHex == DATA_END) { LogPrintf("BerkeleyEnvironment::Salvage: WARNING: Number of " "keys in data does not match number of values.\n"); break; } vResult.push_back(make_pair(ParseHex(keyHex), ParseHex(valueHex))); } } if (keyHex != DATA_END) { LogPrintf("BerkeleyEnvironment::Salvage: WARNING: Unexpected end of " "file while reading salvage output.\n"); return false; } return (result == 0); } void BerkeleyEnvironment::CheckpointLSN(const std::string &strFile) { dbenv->txn_checkpoint(0, 0, 0); if (fMockDb) { return; } dbenv->lsn_reset(strFile.c_str(), 0); } BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase &database, const char *pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr) { fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w')); fFlushOnClose = fFlushOnCloseIn; env = database.env; if (database.IsDummy()) { return; } const std::string &strFilename = database.strFile; bool fCreate = strchr(pszMode, 'c') != nullptr; unsigned int nFlags = DB_THREAD; if (fCreate) { nFlags |= DB_CREATE; } { LOCK(cs_db); if (!env->Open(false /* retry */)) { throw std::runtime_error( "BerkeleyBatch: Failed to open database environment."); } pdb = env->mapDb[strFilename]; if (pdb == nullptr) { int ret; std::unique_ptr<Db> pdb_temp = std::make_unique<Db>(env->dbenv.get(), 0); bool fMockDb = env->IsMock(); if (fMockDb) { DbMpoolFile *mpf = pdb_temp->get_mpf(); ret = mpf->set_flags(DB_MPOOL_NOFILE, 1); if (ret != 0) { throw std::runtime_error( strprintf("BerkeleyBatch: Failed to configure for no " "temp file backing for database %s", strFilename)); } } ret = pdb_temp->open( nullptr, // Txn pointer fMockDb ? nullptr : strFilename.c_str(), // Filename fMockDb ? strFilename.c_str() : "main", // Logical db name DB_BTREE, // Database type nFlags, // Flags 0); if (ret != 0) { throw std::runtime_error( strprintf("BerkeleyBatch: Error %d, can't open database %s", ret, strFilename)); } // Call CheckUniqueFileid on the containing BDB environment to // avoid BDB data consistency bugs that happen when different data // files in the same environment have the same fileid. // // Also call CheckUniqueFileid on all the other g_dbenvs to prevent // bitcoin from opening the same data file through another // environment when the file is referenced through equivalent but // not obviously identical symlinked or hard linked or bind mounted // paths. In the future a more relaxed check for equal inode and // device ids could be done instead, which would allow opening // different backup copies of a wallet at the same time. Maybe even // more ideally, an exclusive lock for accessing the database could // be implemented, so no equality checks are needed at all. (Newer // versions of BDB have an set_lk_exclusive method for this // purpose, but the older version we use does not.) for (auto &dbenv : g_dbenvs) { CheckUniqueFileid(dbenv.second, strFilename, *pdb_temp); } pdb = pdb_temp.release(); env->mapDb[strFilename] = pdb; if (fCreate && !Exists(std::string("version"))) { bool fTmp = fReadOnly; fReadOnly = false; WriteVersion(CLIENT_VERSION); fReadOnly = fTmp; } } ++env->mapFileUseCount[strFilename]; strFile = strFilename; } } void BerkeleyBatch::Flush() { if (activeTxn) { return; } // Flush database activity from memory pool to disk log unsigned int nMinutes = 0; if (fReadOnly) { nMinutes = 1; } env->dbenv->txn_checkpoint( nMinutes ? gArgs.GetArg("-dblogsize", DEFAULT_WALLET_DBLOGSIZE) * 1024 : 0, nMinutes, 0); } void BerkeleyDatabase::IncrementUpdateCounter() { ++nUpdateCounter; } void BerkeleyBatch::Close() { if (!pdb) { return; } if (activeTxn) { activeTxn->abort(); } activeTxn = nullptr; pdb = nullptr; if (fFlushOnClose) { Flush(); } LOCK(cs_db); --env->mapFileUseCount[strFile]; } void BerkeleyEnvironment::CloseDb(const std::string &strFile) { LOCK(cs_db); if (mapDb[strFile] != nullptr) { // Close the database handle Db *pdb = mapDb[strFile]; pdb->close(0); delete pdb; mapDb[strFile] = nullptr; } } bool BerkeleyBatch::Rewrite(BerkeleyDatabase &database, const char *pszSkip) { if (database.IsDummy()) { return true; } BerkeleyEnvironment *env = database.env; const std::string &strFile = database.strFile; while (true) { { LOCK(cs_db); if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) { // Flush log data to the dat file env->CloseDb(strFile); env->CheckpointLSN(strFile); env->mapFileUseCount.erase(strFile); bool fSuccess = true; LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile); std::string strFileRes = strFile + ".rewrite"; { // surround usage of db with extra {} BerkeleyBatch db(database, "r"); std::unique_ptr<Db> pdbCopy = std::make_unique<Db>(env->dbenv.get(), 0); int ret = pdbCopy->open(nullptr, // Txn pointer strFileRes.c_str(), // Filename "main", // Logical db name DB_BTREE, // Database type DB_CREATE, // Flags 0); if (ret > 0) { LogPrintf("BerkeleyBatch::Rewrite: Can't create " "database file %s\n", strFileRes); fSuccess = false; } Dbc *pcursor = db.GetCursor(); if (pcursor) while (fSuccess) { CDataStream ssKey(SER_DISK, CLIENT_VERSION); CDataStream ssValue(SER_DISK, CLIENT_VERSION); int ret1 = db.ReadAtCursor(pcursor, ssKey, ssValue); if (ret1 == DB_NOTFOUND) { pcursor->close(); break; } if (ret1 != 0) { pcursor->close(); fSuccess = false; break; } if (pszSkip && strncmp(ssKey.data(), pszSkip, std::min(ssKey.size(), strlen(pszSkip))) == 0) { continue; } if (strncmp(ssKey.data(), "\x07version", 8) == 0) { // Update version: ssValue.clear(); ssValue << CLIENT_VERSION; } Dbt datKey(ssKey.data(), ssKey.size()); Dbt datValue(ssValue.data(), ssValue.size()); int ret2 = pdbCopy->put(nullptr, &datKey, &datValue, DB_NOOVERWRITE); if (ret2 > 0) { fSuccess = false; } } if (fSuccess) { db.Close(); env->CloseDb(strFile); if (pdbCopy->close(0)) { fSuccess = false; } } else { pdbCopy->close(0); } } if (fSuccess) { Db dbA(env->dbenv.get(), 0); if (dbA.remove(strFile.c_str(), nullptr, 0)) { fSuccess = false; } Db dbB(env->dbenv.get(), 0); if (dbB.rename(strFileRes.c_str(), nullptr, strFile.c_str(), 0)) { fSuccess = false; } } if (!fSuccess) { LogPrintf("BerkeleyBatch::Rewrite: Failed to rewrite " "database file %s\n", strFileRes); } return fSuccess; } } MilliSleep(100); } - return false; } void BerkeleyEnvironment::Flush(bool fShutdown) { int64_t nStart = GetTimeMillis(); // Flush log data to the actual data file on all files that are not in use LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: Flush(%s)%s\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started"); if (!fDbEnvInit) { return; } { LOCK(cs_db); std::map<std::string, int>::iterator mi = mapFileUseCount.begin(); while (mi != mapFileUseCount.end()) { std::string strFile = (*mi).first; int nRefCount = (*mi).second; LogPrint( BCLog::DB, "BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount); if (nRefCount == 0) { // Move log data to the dat file CloseDb(strFile); LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: %s checkpoint\n", strFile); dbenv->txn_checkpoint(0, 0, 0); LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: %s detach\n", strFile); if (!fMockDb) { dbenv->lsn_reset(strFile.c_str(), 0); } LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: %s closed\n", strFile); mapFileUseCount.erase(mi++); } else { mi++; } } LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart); if (fShutdown) { char **listp; if (mapFileUseCount.empty()) { dbenv->log_archive(&listp, DB_ARCH_REMOVE); Close(); if (!fMockDb) { fs::remove_all(fs::path(strPath) / "database"); } } } } } bool BerkeleyBatch::PeriodicFlush(BerkeleyDatabase &database) { if (database.IsDummy()) { return true; } bool ret = false; BerkeleyEnvironment *env = database.env; const std::string &strFile = database.strFile; TRY_LOCK(cs_db, lockDb); if (lockDb) { // Don't do this if any databases are in use int nRefCount = 0; std::map<std::string, int>::iterator mit = env->mapFileUseCount.begin(); while (mit != env->mapFileUseCount.end()) { nRefCount += (*mit).second; mit++; } if (nRefCount == 0) { boost::this_thread::interruption_point(); std::map<std::string, int>::iterator mi = env->mapFileUseCount.find(strFile); if (mi != env->mapFileUseCount.end()) { LogPrint(BCLog::DB, "Flushing %s\n", strFile); int64_t nStart = GetTimeMillis(); // Flush wallet file so it's self contained env->CloseDb(strFile); env->CheckpointLSN(strFile); env->mapFileUseCount.erase(mi++); LogPrint(BCLog::DB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart); ret = true; } } } return ret; } bool BerkeleyDatabase::Rewrite(const char *pszSkip) { return BerkeleyBatch::Rewrite(*this, pszSkip); } bool BerkeleyDatabase::Backup(const std::string &strDest) { if (IsDummy()) { return false; } while (true) { { LOCK(cs_db); if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) { // Flush log data to the dat file env->CloseDb(strFile); env->CheckpointLSN(strFile); env->mapFileUseCount.erase(strFile); // Copy wallet file. fs::path pathSrc = GetWalletDir() / strFile; fs::path pathDest(strDest); if (fs::is_directory(pathDest)) { pathDest /= strFile; } try { if (fs::equivalent(pathSrc, pathDest)) { LogPrintf("cannot backup to wallet source file %s\n", pathDest.string()); return false; } fs::copy_file(pathSrc, pathDest, fs::copy_option::overwrite_if_exists); LogPrintf("copied %s to %s\n", strFile, pathDest.string()); return true; } catch (const fs::filesystem_error &e) { LogPrintf("error copying %s to %s - %s\n", strFile, pathDest.string(), e.what()); return false; } } } MilliSleep(100); } - return false; } void BerkeleyDatabase::Flush(bool shutdown) { if (!IsDummy()) { env->Flush(shutdown); } }