diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp index 77b98c709..ae410f74c 100644 --- a/src/wallet/db.cpp +++ b/src/wallet/db.cpp @@ -1,984 +1,984 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include // boost::this_thread::interruption_point() (mingw) #include #ifndef WIN32 #include #endif namespace { //! Make sure database has a unique fileid within the environment. If it //! doesn't, throw an error. BDB caches do not work properly when more than one //! open database has the same fileid (values written to one database may show //! up in reads to other databases). //! //! BerkeleyDB generates unique fileids by default //! (https://docs.oracle.com/cd/E17275_01/html/programmer_reference/program_copy.html), //! so bitcoin should never create different databases with the same fileid, but //! this error can be triggered if users manually copy database files. void CheckUniqueFileid(const BerkeleyEnvironment &env, const std::string &filename, Db &db, WalletDatabaseFileId &fileid) { if (env.IsMock()) { return; } int ret = db.get_mpf()->get_fileid(fileid.value); if (ret != 0) { throw std::runtime_error(strprintf( "BerkeleyBatch: Can't open database %s (get_fileid failed with %d)", filename, ret)); } for (const auto &item : env.m_fileids) { if (fileid == item.second && &fileid != &item.second) { throw std::runtime_error(strprintf( "BerkeleyBatch: Can't open database %s (duplicates fileid %s " "from %s)", filename, HexStr(std::begin(item.second.value), std::end(item.second.value)), item.first)); } } } RecursiveMutex cs_db; //! Map from directory name to db environment. std::map> g_dbenvs GUARDED_BY(cs_db); } // namespace bool WalletDatabaseFileId::operator==(const WalletDatabaseFileId &rhs) const { return memcmp(value, &rhs.value, sizeof(value)) == 0; } static void SplitWalletPath(const fs::path &wallet_path, fs::path &env_directory, std::string &database_filename) { if (fs::is_regular_file(wallet_path)) { // Special case for backwards compatibility: if wallet path points to an // existing file, treat it as the path to a BDB data file in a parent // directory that also contains BDB log files. env_directory = wallet_path.parent_path(); database_filename = wallet_path.filename().string(); } else { // Normal case: Interpret wallet path as a directory path containing // data and log files. env_directory = wallet_path; database_filename = "wallet.dat"; } } bool IsWalletLoaded(const fs::path &wallet_path) { fs::path env_directory; std::string database_filename; SplitWalletPath(wallet_path, env_directory, database_filename); LOCK(cs_db); auto env = g_dbenvs.find(env_directory.string()); if (env == g_dbenvs.end()) { return false; } auto database = env->second.lock(); return database && database->IsDatabaseLoaded(database_filename); } /** * @param[in] wallet_path Path to wallet directory. Or (for backwards * compatibility only) a path to a berkeley btree data file inside a wallet * directory. * @param[out] database_filename Filename of berkeley btree data file inside the * wallet directory. * @return A shared pointer to the BerkeleyEnvironment object for the wallet * directory, never empty because ~BerkeleyEnvironment erases the weak pointer * from the g_dbenvs map. * @post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the * directory path key was not already in the map. */ std::shared_ptr GetWalletEnv(const fs::path &wallet_path, std::string &database_filename) { fs::path env_directory; SplitWalletPath(wallet_path, env_directory, database_filename); LOCK(cs_db); auto inserted = g_dbenvs.emplace(env_directory.string(), std::weak_ptr()); if (inserted.second) { auto env = std::make_shared(env_directory.string()); inserted.first->second = env; return env; } return inserted.first->second.lock(); } // // BerkeleyBatch // void BerkeleyEnvironment::Close() { if (!fDbEnvInit) { return; } fDbEnvInit = false; for (auto &db : m_databases) { auto count = mapFileUseCount.find(db.first); assert(count == mapFileUseCount.end() || count->second == 0); BerkeleyDatabase &database = db.second.get(); if (database.m_db) { database.m_db->close(0); database.m_db.reset(); } } FILE *error_file = nullptr; dbenv->get_errfile(&error_file); int ret = dbenv->close(0); if (ret != 0) { LogPrintf("BerkeleyEnvironment::Close: Error %d closing database " "environment: %s\n", ret, DbEnv::strerror(ret)); } if (!fMockDb) { DbEnv(u_int32_t(0)).remove(strPath.c_str(), 0); } if (error_file) { fclose(error_file); } UnlockDirectory(strPath, ".walletlock"); } void BerkeleyEnvironment::Reset() { dbenv.reset(new DbEnv(DB_CXX_NO_EXCEPTIONS)); fDbEnvInit = false; fMockDb = false; } BerkeleyEnvironment::BerkeleyEnvironment(const fs::path &dir_path) : strPath(dir_path.string()) { Reset(); } BerkeleyEnvironment::~BerkeleyEnvironment() { LOCK(cs_db); g_dbenvs.erase(strPath); Close(); } bool BerkeleyEnvironment::Open(bool retry) { if (fDbEnvInit) { return true; } boost::this_thread::interruption_point(); fs::path pathIn = strPath; TryCreateDirectories(pathIn); if (!LockDirectory(pathIn, ".walletlock")) { LogPrintf("Cannot obtain a lock on wallet directory %s. Another " "instance of bitcoin may be using it.\n", strPath); return false; } fs::path pathLogDir = pathIn / "database"; TryCreateDirectories(pathLogDir); fs::path pathErrorFile = pathIn / "db.log"; LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n", pathLogDir.string(), pathErrorFile.string()); unsigned int nEnvFlags = 0; if (gArgs.GetBoolArg("-privdb", DEFAULT_WALLET_PRIVDB)) { nEnvFlags |= DB_PRIVATE; } dbenv->set_lg_dir(pathLogDir.string().c_str()); // 1 MiB should be enough for just the wallet dbenv->set_cachesize(0, 0x100000, 1); dbenv->set_lg_bsize(0x10000); dbenv->set_lg_max(1048576); dbenv->set_lk_max_locks(40000); dbenv->set_lk_max_objects(40000); /// debug dbenv->set_errfile(fsbridge::fopen(pathErrorFile, "a")); dbenv->set_flags(DB_AUTO_COMMIT, 1); dbenv->set_flags(DB_TXN_WRITE_NOSYNC, 1); dbenv->log_set_config(DB_LOG_AUTO_REMOVE, 1); int ret = dbenv->open(strPath.c_str(), DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD | DB_RECOVER | nEnvFlags, S_IRUSR | S_IWUSR); if (ret != 0) { LogPrintf("BerkeleyEnvironment::Open: Error %d opening database " "environment: %s\n", ret, DbEnv::strerror(ret)); int ret2 = dbenv->close(0); if (ret2 != 0) { LogPrintf("BerkeleyEnvironment::Open: Error %d closing failed " "database environment: %s\n", ret2, DbEnv::strerror(ret2)); } Reset(); if (retry) { // try moving the database env out of the way fs::path pathDatabaseBak = pathIn / strprintf("database.%d.bak", GetTime()); try { fs::rename(pathLogDir, pathDatabaseBak); LogPrintf("Moved old %s to %s. Retrying.\n", pathLogDir.string(), pathDatabaseBak.string()); } catch (const fs::filesystem_error &) { // failure is ok (well, not really, but it's not worse than what // we started with) } // try opening it again one more time if (!Open(false /* retry */)) { // if it still fails, it probably means we can't even create the // database env return false; } } else { return false; } } fDbEnvInit = true; fMockDb = false; return true; } //! Construct an in-memory mock Berkeley environment for testing and as a //! place-holder for g_dbenvs emplace BerkeleyEnvironment::BerkeleyEnvironment() { Reset(); boost::this_thread::interruption_point(); LogPrint(BCLog::DB, "BerkeleyEnvironment::MakeMock\n"); dbenv->set_cachesize(1, 0, 1); dbenv->set_lg_bsize(10485760 * 4); dbenv->set_lg_max(10485760); dbenv->set_lk_max_locks(10000); dbenv->set_lk_max_objects(10000); dbenv->set_flags(DB_AUTO_COMMIT, 1); dbenv->log_set_config(DB_LOG_IN_MEMORY, 1); int ret = dbenv->open(nullptr, DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD | DB_PRIVATE, S_IRUSR | S_IWUSR); if (ret > 0) { throw std::runtime_error( strprintf("BerkeleyEnvironment::MakeMock: Error %d opening " "database environment.", ret)); } fDbEnvInit = true; fMockDb = true; } BerkeleyEnvironment::VerifyResult BerkeleyEnvironment::Verify(const std::string &strFile, recoverFunc_type recoverFunc, std::string &out_backup_filename) { LOCK(cs_db); assert(mapFileUseCount.count(strFile) == 0); Db db(dbenv.get(), 0); int result = db.verify(strFile.c_str(), nullptr, nullptr, 0); if (result == 0) { return VerifyResult::VERIFY_OK; } else if (recoverFunc == nullptr) { return VerifyResult::RECOVER_FAIL; } // Try to recover: bool fRecovered = (*recoverFunc)(fs::path(strPath) / strFile, out_backup_filename); return (fRecovered ? VerifyResult::RECOVER_OK : VerifyResult::RECOVER_FAIL); } bool BerkeleyBatch::Recover(const fs::path &file_path, void *callbackDataIn, bool (*recoverKVcallback)(void *callbackData, CDataStream ssKey, CDataStream ssValue), std::string &newFilename) { std::string filename; std::shared_ptr env = GetWalletEnv(file_path, filename); // Recovery procedure: // Move wallet file to walletfilename.timestamp.bak // Call Salvage with fAggressive=true to get as much data as possible. // Rewrite salvaged data to fresh wallet file. // Set -rescan so any missing transactions will be found. int64_t now = GetTime(); newFilename = strprintf("%s.%d.bak", filename, now); int result = env->dbenv->dbrename(nullptr, filename.c_str(), nullptr, newFilename.c_str(), DB_AUTO_COMMIT); if (result == 0) { LogPrintf("Renamed %s to %s\n", filename, newFilename); } else { LogPrintf("Failed to rename %s to %s\n", filename, newFilename); return false; } std::vector salvagedData; bool fSuccess = env->Salvage(newFilename, true, salvagedData); if (salvagedData.empty()) { LogPrintf("Salvage(aggressive) found no records in %s.\n", newFilename); return false; } LogPrintf("Salvage(aggressive) found %u records\n", salvagedData.size()); std::unique_ptr pdbCopy = std::make_unique(env->dbenv.get(), 0); int ret = pdbCopy->open(nullptr, // Txn pointer filename.c_str(), // Filename "main", // Logical db name DB_BTREE, // Database type DB_CREATE, // Flags 0); if (ret > 0) { LogPrintf("Cannot create database file %s\n", filename); pdbCopy->close(0); return false; } DbTxn *ptxn = env->TxnBegin(); for (BerkeleyEnvironment::KeyValPair &row : salvagedData) { if (recoverKVcallback) { CDataStream ssKey(row.first, SER_DISK, CLIENT_VERSION); CDataStream ssValue(row.second, SER_DISK, CLIENT_VERSION); if (!(*recoverKVcallback)(callbackDataIn, ssKey, ssValue)) { continue; } } Dbt datKey(&row.first[0], row.first.size()); Dbt datValue(&row.second[0], row.second.size()); int ret2 = pdbCopy->put(ptxn, &datKey, &datValue, DB_NOOVERWRITE); if (ret2 > 0) { fSuccess = false; } } ptxn->commit(0); pdbCopy->close(0); return fSuccess; } bool BerkeleyBatch::VerifyEnvironment(const fs::path &file_path, std::string &errorStr) { std::string walletFile; std::shared_ptr env = GetWalletEnv(file_path, walletFile); fs::path walletDir = env->Directory(); LogPrintf("Using BerkeleyDB version %s\n", DbEnv::version(nullptr, nullptr, nullptr)); LogPrintf("Using wallet %s\n", walletFile); // Wallet file must be a plain filename without a directory if (walletFile != fs::basename(walletFile) + fs::extension(walletFile)) { errorStr = strprintf( _("Wallet %s resides outside wallet directory %s").translated, walletFile, walletDir.string()); return false; } if (!env->Open(true /* retry */)) { errorStr = strprintf( _("Error initializing wallet database environment %s!").translated, walletDir); return false; } return true; } bool BerkeleyBatch::VerifyDatabaseFile( const fs::path &file_path, std::string &warningStr, std::string &errorStr, BerkeleyEnvironment::recoverFunc_type recoverFunc) { std::string walletFile; std::shared_ptr env = GetWalletEnv(file_path, walletFile); fs::path walletDir = env->Directory(); if (fs::exists(walletDir / walletFile)) { std::string backup_filename; BerkeleyEnvironment::VerifyResult r = env->Verify(walletFile, recoverFunc, backup_filename); if (r == BerkeleyEnvironment::VerifyResult::RECOVER_OK) { warningStr = strprintf( _("Warning: Wallet file corrupt, data salvaged! Original %s " "saved as %s in %s; if your balance or transactions are " "incorrect you should restore from a backup.") .translated, walletFile, backup_filename, walletDir); } if (r == BerkeleyEnvironment::VerifyResult::RECOVER_FAIL) { errorStr = strprintf(_("%s corrupt, salvage failed").translated, walletFile); return false; } } // also return true if files does not exists return true; } /* End of headers, beginning of key/value data */ static const char *HEADER_END = "HEADER=END"; /* End of key/value data */ static const char *DATA_END = "DATA=END"; bool BerkeleyEnvironment::Salvage( const std::string &strFile, bool fAggressive, std::vector &vResult) { LOCK(cs_db); assert(mapFileUseCount.count(strFile) == 0); u_int32_t flags = DB_SALVAGE; if (fAggressive) { flags |= DB_AGGRESSIVE; } std::stringstream strDump; Db db(dbenv.get(), 0); int result = db.verify(strFile.c_str(), nullptr, &strDump, flags); if (result == DB_VERIFY_BAD) { LogPrintf("BerkeleyEnvironment::Salvage: Database salvage found " "errors, all data may not be recoverable.\n"); if (!fAggressive) { LogPrintf("BerkeleyEnvironment::Salvage: Rerun with aggressive " "mode to ignore errors and continue.\n"); return false; } } if (result != 0 && result != DB_VERIFY_BAD) { LogPrintf("BerkeleyEnvironment::Salvage: Database salvage failed with " "result %d.\n", result); return false; } // Format of bdb dump is ascii lines: // header lines... // HEADER=END // hexadecimal key // hexadecimal value // ... repeated // DATA=END std::string strLine; while (!strDump.eof() && strLine != HEADER_END) { // Skip past header getline(strDump, strLine); } std::string keyHex, valueHex; while (!strDump.eof() && keyHex != DATA_END) { getline(strDump, keyHex); if (keyHex != DATA_END) { if (strDump.eof()) { break; } getline(strDump, valueHex); if (valueHex == DATA_END) { LogPrintf("BerkeleyEnvironment::Salvage: WARNING: Number of " "keys in data does not match number of values.\n"); break; } vResult.push_back(make_pair(ParseHex(keyHex), ParseHex(valueHex))); } } if (keyHex != DATA_END) { LogPrintf("BerkeleyEnvironment::Salvage: WARNING: Unexpected end of " "file while reading salvage output.\n"); return false; } return (result == 0); } void BerkeleyEnvironment::CheckpointLSN(const std::string &strFile) { dbenv->txn_checkpoint(0, 0, 0); if (fMockDb) { return; } dbenv->lsn_reset(strFile.c_str(), 0); } BerkeleyBatch::BerkeleyBatch(BerkeleyDatabase &database, const char *pszMode, bool fFlushOnCloseIn) : pdb(nullptr), activeTxn(nullptr) { fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w')); fFlushOnClose = fFlushOnCloseIn; env = database.env.get(); if (database.IsDummy()) { return; } const std::string &strFilename = database.strFile; bool fCreate = strchr(pszMode, 'c') != nullptr; unsigned int nFlags = DB_THREAD; if (fCreate) { nFlags |= DB_CREATE; } { LOCK(cs_db); if (!env->Open(false /* retry */)) { throw std::runtime_error( "BerkeleyBatch: Failed to open database environment."); } pdb = database.m_db.get(); if (pdb == nullptr) { int ret; std::unique_ptr pdb_temp = std::make_unique(env->dbenv.get(), 0); bool fMockDb = env->IsMock(); if (fMockDb) { DbMpoolFile *mpf = pdb_temp->get_mpf(); ret = mpf->set_flags(DB_MPOOL_NOFILE, 1); if (ret != 0) { throw std::runtime_error( strprintf("BerkeleyBatch: Failed to configure for no " "temp file backing for database %s", strFilename)); } } ret = pdb_temp->open( nullptr, // Txn pointer fMockDb ? nullptr : strFilename.c_str(), // Filename fMockDb ? strFilename.c_str() : "main", // Logical db name DB_BTREE, // Database type nFlags, // Flags 0); if (ret != 0) { throw std::runtime_error( strprintf("BerkeleyBatch: Error %d, can't open database %s", ret, strFilename)); } // Call CheckUniqueFileid on the containing BDB environment to // avoid BDB data consistency bugs that happen when different data // files in the same environment have the same fileid. // // Also call CheckUniqueFileid on all the other g_dbenvs to prevent // bitcoin from opening the same data file through another // environment when the file is referenced through equivalent but // not obviously identical symlinked or hard linked or bind mounted // paths. In the future a more relaxed check for equal inode and // device ids could be done instead, which would allow opening // different backup copies of a wallet at the same time. Maybe even // more ideally, an exclusive lock for accessing the database could // be implemented, so no equality checks are needed at all. (Newer // versions of BDB have an set_lk_exclusive method for this // purpose, but the older version we use does not.) for (const auto &dbenv : g_dbenvs) { CheckUniqueFileid(*dbenv.second.lock().get(), strFilename, *pdb_temp, this->env->m_fileids[strFilename]); } pdb = pdb_temp.release(); database.m_db.reset(pdb); if (fCreate && !Exists(std::string("version"))) { bool fTmp = fReadOnly; fReadOnly = false; - WriteVersion(CLIENT_VERSION); + Write(std::string("version"), CLIENT_VERSION); fReadOnly = fTmp; } } ++env->mapFileUseCount[strFilename]; strFile = strFilename; } } void BerkeleyBatch::Flush() { if (activeTxn) { return; } // Flush database activity from memory pool to disk log unsigned int nMinutes = 0; if (fReadOnly) { nMinutes = 1; } // env is nullptr for dummy databases (i.e. in tests). Don't actually flush // if env is nullptr so we don't segfault if (env) { env->dbenv->txn_checkpoint( nMinutes ? gArgs.GetArg("-dblogsize", DEFAULT_WALLET_DBLOGSIZE) * 1024 : 0, nMinutes, 0); } } void BerkeleyDatabase::IncrementUpdateCounter() { ++nUpdateCounter; } void BerkeleyBatch::Close() { if (!pdb) { return; } if (activeTxn) { activeTxn->abort(); } activeTxn = nullptr; pdb = nullptr; if (fFlushOnClose) { Flush(); } { LOCK(cs_db); --env->mapFileUseCount[strFile]; } env->m_db_in_use.notify_all(); } void BerkeleyEnvironment::CloseDb(const std::string &strFile) { LOCK(cs_db); auto it = m_databases.find(strFile); assert(it != m_databases.end()); BerkeleyDatabase &database = it->second.get(); if (database.m_db) { // Close the database handle database.m_db->close(0); database.m_db.reset(); } } void BerkeleyEnvironment::ReloadDbEnv() { // Make sure that no Db's are in use AssertLockNotHeld(cs_db); std::unique_lock lock(cs_db); m_db_in_use.wait(lock, [this]() { for (auto &count : mapFileUseCount) { if (count.second > 0) { return false; } } return true; }); std::vector filenames; for (auto it : m_databases) { filenames.push_back(it.first); } // Close the individual Db's for (const std::string &filename : filenames) { CloseDb(filename); } // Reset the environment // This will flush and close the environment Flush(true); Reset(); Open(true); } bool BerkeleyBatch::Rewrite(BerkeleyDatabase &database, const char *pszSkip) { if (database.IsDummy()) { return true; } BerkeleyEnvironment *env = database.env.get(); const std::string &strFile = database.strFile; while (true) { { LOCK(cs_db); if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) { // Flush log data to the dat file env->CloseDb(strFile); env->CheckpointLSN(strFile); env->mapFileUseCount.erase(strFile); bool fSuccess = true; LogPrintf("BerkeleyBatch::Rewrite: Rewriting %s...\n", strFile); std::string strFileRes = strFile + ".rewrite"; { // surround usage of db with extra {} BerkeleyBatch db(database, "r"); std::unique_ptr pdbCopy = std::make_unique(env->dbenv.get(), 0); int ret = pdbCopy->open(nullptr, // Txn pointer strFileRes.c_str(), // Filename "main", // Logical db name DB_BTREE, // Database type DB_CREATE, // Flags 0); if (ret > 0) { LogPrintf("BerkeleyBatch::Rewrite: Can't create " "database file %s\n", strFileRes); fSuccess = false; } Dbc *pcursor = db.GetCursor(); if (pcursor) { while (fSuccess) { CDataStream ssKey(SER_DISK, CLIENT_VERSION); CDataStream ssValue(SER_DISK, CLIENT_VERSION); int ret1 = db.ReadAtCursor(pcursor, ssKey, ssValue); if (ret1 == DB_NOTFOUND) { pcursor->close(); break; } if (ret1 != 0) { pcursor->close(); fSuccess = false; break; } if (pszSkip && strncmp(ssKey.data(), pszSkip, std::min(ssKey.size(), strlen(pszSkip))) == 0) { continue; } if (strncmp(ssKey.data(), "\x07version", 8) == 0) { // Update version: ssValue.clear(); ssValue << CLIENT_VERSION; } Dbt datKey(ssKey.data(), ssKey.size()); Dbt datValue(ssValue.data(), ssValue.size()); int ret2 = pdbCopy->put(nullptr, &datKey, &datValue, DB_NOOVERWRITE); if (ret2 > 0) { fSuccess = false; } } } if (fSuccess) { db.Close(); env->CloseDb(strFile); if (pdbCopy->close(0)) { fSuccess = false; } } else { pdbCopy->close(0); } } if (fSuccess) { Db dbA(env->dbenv.get(), 0); if (dbA.remove(strFile.c_str(), nullptr, 0)) { fSuccess = false; } Db dbB(env->dbenv.get(), 0); if (dbB.rename(strFileRes.c_str(), nullptr, strFile.c_str(), 0)) { fSuccess = false; } } if (!fSuccess) { LogPrintf("BerkeleyBatch::Rewrite: Failed to rewrite " "database file %s\n", strFileRes); } return fSuccess; } } UninterruptibleSleep(std::chrono::milliseconds{100}); } } void BerkeleyEnvironment::Flush(bool fShutdown) { int64_t nStart = GetTimeMillis(); // Flush log data to the actual data file on all files that are not in use LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: [%s] Flush(%s)%s\n", strPath, fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started"); if (!fDbEnvInit) { return; } { LOCK(cs_db); std::map::iterator mi = mapFileUseCount.begin(); while (mi != mapFileUseCount.end()) { std::string strFile = (*mi).first; int nRefCount = (*mi).second; LogPrint( BCLog::DB, "BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount); if (nRefCount == 0) { // Move log data to the dat file CloseDb(strFile); LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: %s checkpoint\n", strFile); dbenv->txn_checkpoint(0, 0, 0); LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: %s detach\n", strFile); if (!fMockDb) { dbenv->lsn_reset(strFile.c_str(), 0); } LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: %s closed\n", strFile); mapFileUseCount.erase(mi++); } else { mi++; } } LogPrint(BCLog::DB, "BerkeleyEnvironment::Flush: Flush(%s)%s took %15dms\n", fShutdown ? "true" : "false", fDbEnvInit ? "" : " database not started", GetTimeMillis() - nStart); if (fShutdown) { char **listp; if (mapFileUseCount.empty()) { dbenv->log_archive(&listp, DB_ARCH_REMOVE); Close(); if (!fMockDb) { fs::remove_all(fs::path(strPath) / "database"); } } } } } bool BerkeleyBatch::PeriodicFlush(BerkeleyDatabase &database) { if (database.IsDummy()) { return true; } bool ret = false; BerkeleyEnvironment *env = database.env.get(); const std::string &strFile = database.strFile; TRY_LOCK(cs_db, lockDb); if (lockDb) { // Don't do this if any databases are in use int nRefCount = 0; std::map::iterator mit = env->mapFileUseCount.begin(); while (mit != env->mapFileUseCount.end()) { nRefCount += (*mit).second; mit++; } if (nRefCount == 0) { boost::this_thread::interruption_point(); std::map::iterator mi = env->mapFileUseCount.find(strFile); if (mi != env->mapFileUseCount.end()) { LogPrint(BCLog::DB, "Flushing %s\n", strFile); int64_t nStart = GetTimeMillis(); // Flush wallet file so it's self contained env->CloseDb(strFile); env->CheckpointLSN(strFile); env->mapFileUseCount.erase(mi++); LogPrint(BCLog::DB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart); ret = true; } } } return ret; } bool BerkeleyDatabase::Rewrite(const char *pszSkip) { return BerkeleyBatch::Rewrite(*this, pszSkip); } bool BerkeleyDatabase::Backup(const std::string &strDest) { if (IsDummy()) { return false; } while (true) { { LOCK(cs_db); if (!env->mapFileUseCount.count(strFile) || env->mapFileUseCount[strFile] == 0) { // Flush log data to the dat file env->CloseDb(strFile); env->CheckpointLSN(strFile); env->mapFileUseCount.erase(strFile); // Copy wallet file. fs::path pathSrc = env->Directory() / strFile; fs::path pathDest(strDest); if (fs::is_directory(pathDest)) { pathDest /= strFile; } try { if (fs::equivalent(pathSrc, pathDest)) { LogPrintf("cannot backup to wallet source file %s\n", pathDest.string()); return false; } fs::copy_file(pathSrc, pathDest, fs::copy_option::overwrite_if_exists); LogPrintf("copied %s to %s\n", strFile, pathDest.string()); return true; } catch (const fs::filesystem_error &e) { LogPrintf("error copying %s to %s - %s\n", strFile, pathDest.string(), fsbridge::get_filesystem_error_message(e)); return false; } } } UninterruptibleSleep(std::chrono::milliseconds{100}); } } void BerkeleyDatabase::Flush(bool shutdown) { if (!IsDummy()) { env->Flush(shutdown); if (shutdown) { LOCK(cs_db); g_dbenvs.erase(env->Directory().string()); env = nullptr; } else { // TODO: To avoid g_dbenvs.erase erasing the environment prematurely // after the first database shutdown when multiple databases are // open in the same environment, should replace raw database `env` // pointers with shared or weak pointers, or else separate the // database and environment shutdowns so environments can be shut // down after databases. env->m_fileids.erase(strFile); } } } void BerkeleyDatabase::ReloadDbEnv() { if (!IsDummy()) { env->ReloadDbEnv(); } } diff --git a/src/wallet/db.h b/src/wallet/db.h index 2c59d5b20..bda79317c 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -1,460 +1,451 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_WALLET_DB_H #define BITCOIN_WALLET_DB_H #include #include #include #include #include #include #include #include #include #include #include #include static const unsigned int DEFAULT_WALLET_DBLOGSIZE = 100; static const bool DEFAULT_WALLET_PRIVDB = true; struct WalletDatabaseFileId { u_int8_t value[DB_FILE_ID_LEN]; bool operator==(const WalletDatabaseFileId &rhs) const; }; class BerkeleyDatabase; class BerkeleyEnvironment { private: bool fDbEnvInit; bool fMockDb; // Don't change into fs::path, as that can result in // shutdown problems/crashes caused by a static initialized internal // pointer. std::string strPath; public: std::unique_ptr dbenv; std::map mapFileUseCount; std::map> m_databases; std::unordered_map m_fileids; std::condition_variable_any m_db_in_use; BerkeleyEnvironment(const fs::path &env_directory); BerkeleyEnvironment(); ~BerkeleyEnvironment(); void Reset(); void MakeMock(); bool IsMock() const { return fMockDb; } bool IsInitialized() const { return fDbEnvInit; } bool IsDatabaseLoaded(const std::string &db_filename) const { return m_databases.find(db_filename) != m_databases.end(); } fs::path Directory() const { return strPath; } /** * Verify that database file strFile is OK. If it is not, call the callback * to try to recover. * This must be called BEFORE strFile is opened. * Returns true if strFile is OK. */ enum class VerifyResult { VERIFY_OK, RECOVER_OK, RECOVER_FAIL }; typedef bool (*recoverFunc_type)(const fs::path &file_path, std::string &out_backup_filename); VerifyResult Verify(const std::string &strFile, recoverFunc_type recoverFunc, std::string &out_backup_filename); /** * Salvage data from a file that Verify says is bad. * fAggressive sets the DB_AGGRESSIVE flag (see berkeley DB->verify() method * documentation). * Appends binary key/value pairs to vResult, returns true if successful. * NOTE: reads the entire database into memory, so cannot be used * for huge databases. */ typedef std::pair, std::vector> KeyValPair; bool Salvage(const std::string &strFile, bool fAggressive, std::vector &vResult); bool Open(bool retry); void Close(); void Flush(bool fShutdown); void CheckpointLSN(const std::string &strFile); void CloseDb(const std::string &strFile); void ReloadDbEnv(); DbTxn *TxnBegin(int flags = DB_TXN_WRITE_NOSYNC) { DbTxn *ptxn = nullptr; int ret = dbenv->txn_begin(nullptr, &ptxn, flags); if (!ptxn || ret != 0) return nullptr; return ptxn; } }; /** Return whether a wallet database is currently loaded. */ bool IsWalletLoaded(const fs::path &wallet_path); /** Get BerkeleyEnvironment and database filename given a wallet path. */ std::shared_ptr GetWalletEnv(const fs::path &wallet_path, std::string &database_filename); /** * An instance of this class represents one database. * For BerkeleyDB this is just a (env, strFile) tuple. */ class BerkeleyDatabase { friend class BerkeleyBatch; public: /** Create dummy DB handle */ BerkeleyDatabase() : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(nullptr) {} /** Create DB handle to real database */ BerkeleyDatabase(std::shared_ptr envIn, std::string filename) : nUpdateCounter(0), nLastSeen(0), nLastFlushed(0), nLastWalletUpdate(0), env(std::move(envIn)), strFile(std::move(filename)) { auto inserted = this->env->m_databases.emplace(strFile, std::ref(*this)); assert(inserted.second); } ~BerkeleyDatabase() { if (env) { size_t erased = env->m_databases.erase(strFile); assert(erased == 1); } } /** Return object for accessing database at specified path. */ static std::unique_ptr Create(const fs::path &path) { std::string filename; return std::make_unique(GetWalletEnv(path, filename), std::move(filename)); } /** * Return object for accessing dummy database with no read/write * capabilities. */ static std::unique_ptr CreateDummy() { return std::make_unique(); } /** * Return object for accessing temporary in-memory database. */ static std::unique_ptr CreateMock() { return std::make_unique( std::make_shared(), ""); } /** * Rewrite the entire database on disk, with the exception of key pszSkip if * non-zero */ bool Rewrite(const char *pszSkip = nullptr); /** * Back up the entire database to a file. */ bool Backup(const std::string &strDest); /** * Make sure all changes are flushed to disk. */ void Flush(bool shutdown); void IncrementUpdateCounter(); void ReloadDbEnv(); std::atomic nUpdateCounter; unsigned int nLastSeen; unsigned int nLastFlushed; int64_t nLastWalletUpdate; /** * Pointer to shared database environment. * * Normally there is only one BerkeleyDatabase object per * BerkeleyEnvivonment, but in the special, backwards compatible case where * multiple wallet BDB data files are loaded from the same directory, this * will point to a shared instance that gets freed when the last data file * is closed. */ std::shared_ptr env; /** * Database pointer. This is initialized lazily and reset during flushes, * so it can be null. */ std::unique_ptr m_db; private: std::string strFile; /** * Return whether this database handle is a dummy for testing. * Only to be used at a low level, application should ideally not care * about this. */ bool IsDummy() { return env == nullptr; } }; /** RAII class that provides access to a Berkeley database */ class BerkeleyBatch { protected: Db *pdb; std::string strFile; DbTxn *activeTxn; bool fReadOnly; bool fFlushOnClose; BerkeleyEnvironment *env; public: explicit BerkeleyBatch(BerkeleyDatabase &database, const char *pszMode = "r+", bool fFlushOnCloseIn = true); ~BerkeleyBatch() { Close(); } BerkeleyBatch(const BerkeleyBatch &) = delete; BerkeleyBatch &operator=(const BerkeleyBatch &) = delete; void Flush(); void Close(); static bool Recover(const fs::path &file_path, void *callbackDataIn, bool (*recoverKVcallback)(void *callbackData, CDataStream ssKey, CDataStream ssValue), std::string &out_backup_filename); /* flush the wallet passively (TRY_LOCK) ideal to be called periodically */ static bool PeriodicFlush(BerkeleyDatabase &database); /* verifies the database environment */ static bool VerifyEnvironment(const fs::path &file_path, std::string &errorStr); /* verifies the database file */ static bool VerifyDatabaseFile(const fs::path &file_path, std::string &warningStr, std::string &errorStr, BerkeleyEnvironment::recoverFunc_type recoverFunc); public: template bool Read(const K &key, T &value) { if (!pdb) { return false; } // Key CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(1000); ssKey << key; Dbt datKey(ssKey.data(), ssKey.size()); // Read Dbt datValue; datValue.set_flags(DB_DBT_MALLOC); int ret = pdb->get(activeTxn, &datKey, &datValue, 0); memory_cleanse(datKey.get_data(), datKey.get_size()); bool success = false; if (datValue.get_data() != nullptr) { // Unserialize value try { CDataStream ssValue((char *)datValue.get_data(), (char *)datValue.get_data() + datValue.get_size(), SER_DISK, CLIENT_VERSION); ssValue >> value; success = true; } catch (const std::exception &) { // In this case success remains 'false' } // Clear and free memory memory_cleanse(datValue.get_data(), datValue.get_size()); free(datValue.get_data()); } return ret == 0 && success; } template bool Write(const K &key, const T &value, bool fOverwrite = true) { if (!pdb) { return true; } if (fReadOnly) { assert(!"Write called on database in read-only mode"); } // Key CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(1000); ssKey << key; Dbt datKey(ssKey.data(), ssKey.size()); // Value CDataStream ssValue(SER_DISK, CLIENT_VERSION); ssValue.reserve(10000); ssValue << value; Dbt datValue(ssValue.data(), ssValue.size()); // Write int ret = pdb->put(activeTxn, &datKey, &datValue, (fOverwrite ? 0 : DB_NOOVERWRITE)); // Clear memory in case it was a private key memory_cleanse(datKey.get_data(), datKey.get_size()); memory_cleanse(datValue.get_data(), datValue.get_size()); return (ret == 0); } template bool Erase(const K &key) { if (!pdb) { return false; } if (fReadOnly) { assert(!"Erase called on database in read-only mode"); } // Key CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(1000); ssKey << key; Dbt datKey(ssKey.data(), ssKey.size()); // Erase int ret = pdb->del(activeTxn, &datKey, 0); // Clear memory memory_cleanse(datKey.get_data(), datKey.get_size()); return (ret == 0 || ret == DB_NOTFOUND); } template bool Exists(const K &key) { if (!pdb) { return false; } // Key CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(1000); ssKey << key; Dbt datKey(ssKey.data(), ssKey.size()); // Exists int ret = pdb->exists(activeTxn, &datKey, 0); // Clear memory memory_cleanse(datKey.get_data(), datKey.get_size()); return (ret == 0); } Dbc *GetCursor() { if (!pdb) { return nullptr; } Dbc *pcursor = nullptr; int ret = pdb->cursor(nullptr, &pcursor, 0); if (ret != 0) { return nullptr; } return pcursor; } int ReadAtCursor(Dbc *pcursor, CDataStream &ssKey, CDataStream &ssValue, bool setRange = false) { // Read at cursor Dbt datKey; unsigned int fFlags = DB_NEXT; if (setRange) { datKey.set_data(ssKey.data()); datKey.set_size(ssKey.size()); fFlags = DB_SET_RANGE; } Dbt datValue; datKey.set_flags(DB_DBT_MALLOC); datValue.set_flags(DB_DBT_MALLOC); int ret = pcursor->get(&datKey, &datValue, fFlags); if (ret != 0) { return ret; } else if (datKey.get_data() == nullptr || datValue.get_data() == nullptr) { return 99999; } // Convert to streams ssKey.SetType(SER_DISK); ssKey.clear(); ssKey.write((char *)datKey.get_data(), datKey.get_size()); ssValue.SetType(SER_DISK); ssValue.clear(); ssValue.write((char *)datValue.get_data(), datValue.get_size()); // Clear and free memory memory_cleanse(datKey.get_data(), datKey.get_size()); memory_cleanse(datValue.get_data(), datValue.get_size()); free(datKey.get_data()); free(datValue.get_data()); return 0; } public: bool TxnBegin() { if (!pdb || activeTxn) { return false; } DbTxn *ptxn = env->TxnBegin(); if (!ptxn) { return false; } activeTxn = ptxn; return true; } bool TxnCommit() { if (!pdb || !activeTxn) { return false; } int ret = activeTxn->commit(0); activeTxn = nullptr; return (ret == 0); } bool TxnAbort() { if (!pdb || !activeTxn) { return false; } int ret = activeTxn->abort(); activeTxn = nullptr; return (ret == 0); } - bool ReadVersion(int &nVersion) { - nVersion = 0; - return Read(std::string("version"), nVersion); - } - - bool WriteVersion(int nVersion) { - return Write(std::string("version"), nVersion); - } - static bool Rewrite(BerkeleyDatabase &database, const char *pszSkip = nullptr); }; #endif // BITCOIN_WALLET_DB_H diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 59a7ea0d8..ba6587c93 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -1,807 +1,799 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include // // WalletBatch // bool WalletBatch::WriteName(const CTxDestination &address, const std::string &strName) { if (!IsValidDestination(address)) { return false; } return WriteIC(std::make_pair(std::string("name"), EncodeLegacyAddr(address, Params())), strName); } bool WalletBatch::EraseName(const CTxDestination &address) { // This should only be used for sending addresses, never for receiving // addresses, receiving addresses must always have an address book entry if // they're not change return. if (!IsValidDestination(address)) { return false; } return EraseIC(std::make_pair(std::string("name"), EncodeLegacyAddr(address, Params()))); } bool WalletBatch::WritePurpose(const CTxDestination &address, const std::string &strPurpose) { if (!IsValidDestination(address)) { return false; } return WriteIC(std::make_pair(std::string("purpose"), EncodeLegacyAddr(address, Params())), strPurpose); } bool WalletBatch::ErasePurpose(const CTxDestination &address) { if (!IsValidDestination(address)) { return false; } return EraseIC(std::make_pair(std::string("purpose"), EncodeLegacyAddr(address, Params()))); } bool WalletBatch::WriteTx(const CWalletTx &wtx) { return WriteIC(std::make_pair(std::string("tx"), wtx.GetId()), wtx); } bool WalletBatch::EraseTx(uint256 hash) { return EraseIC(std::make_pair(std::string("tx"), hash)); } bool WalletBatch::WriteKeyMetadata(const CKeyMetadata &meta, const CPubKey &pubkey, const bool overwrite) { return WriteIC(std::make_pair(std::string("keymeta"), pubkey), meta, overwrite); } bool WalletBatch::WriteKey(const CPubKey &vchPubKey, const CPrivKey &vchPrivKey, const CKeyMetadata &keyMeta) { if (!WriteKeyMetadata(keyMeta, vchPubKey, false)) { return false; } // hash pubkey/privkey to accelerate wallet load std::vector vchKey; vchKey.reserve(vchPubKey.size() + vchPrivKey.size()); vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end()); vchKey.insert(vchKey.end(), vchPrivKey.begin(), vchPrivKey.end()); return WriteIC( std::make_pair(std::string("key"), vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey.begin(), vchKey.end())), false); } bool WalletBatch::WriteCryptedKey(const CPubKey &vchPubKey, const std::vector &vchCryptedSecret, const CKeyMetadata &keyMeta) { if (!WriteKeyMetadata(keyMeta, vchPubKey, true)) { return false; } if (!WriteIC(std::make_pair(std::string("ckey"), vchPubKey), vchCryptedSecret, false)) { return false; } EraseIC(std::make_pair(std::string("key"), vchPubKey)); EraseIC(std::make_pair(std::string("wkey"), vchPubKey)); return true; } bool WalletBatch::WriteMasterKey(unsigned int nID, const CMasterKey &kMasterKey) { return WriteIC(std::make_pair(std::string("mkey"), nID), kMasterKey, true); } bool WalletBatch::WriteCScript(const uint160 &hash, const CScript &redeemScript) { return WriteIC(std::make_pair(std::string("cscript"), hash), redeemScript, false); } bool WalletBatch::WriteWatchOnly(const CScript &dest, const CKeyMetadata &keyMeta) { if (!WriteIC(std::make_pair(std::string("watchmeta"), dest), keyMeta)) { return false; } return WriteIC(std::make_pair(std::string("watchs"), dest), '1'); } bool WalletBatch::EraseWatchOnly(const CScript &dest) { if (!EraseIC(std::make_pair(std::string("watchmeta"), dest))) { return false; } return EraseIC(std::make_pair(std::string("watchs"), dest)); } bool WalletBatch::WriteBestBlock(const CBlockLocator &locator) { // Write empty block locator so versions that require a merkle branch // automatically rescan WriteIC(std::string("bestblock"), CBlockLocator()); return WriteIC(std::string("bestblock_nomerkle"), locator); } bool WalletBatch::ReadBestBlock(CBlockLocator &locator) { if (m_batch.Read(std::string("bestblock"), locator) && !locator.vHave.empty()) { return true; } return m_batch.Read(std::string("bestblock_nomerkle"), locator); } bool WalletBatch::WriteOrderPosNext(int64_t nOrderPosNext) { return WriteIC(std::string("orderposnext"), nOrderPosNext); } bool WalletBatch::ReadPool(int64_t nPool, CKeyPool &keypool) { return m_batch.Read(std::make_pair(std::string("pool"), nPool), keypool); } bool WalletBatch::WritePool(int64_t nPool, const CKeyPool &keypool) { return WriteIC(std::make_pair(std::string("pool"), nPool), keypool); } bool WalletBatch::ErasePool(int64_t nPool) { return EraseIC(std::make_pair(std::string("pool"), nPool)); } bool WalletBatch::WriteMinVersion(int nVersion) { return WriteIC(std::string("minversion"), nVersion); } class CWalletScanState { public: unsigned int nKeys{0}; unsigned int nCKeys{0}; unsigned int nWatchKeys{0}; unsigned int nKeyMeta{0}; unsigned int m_unknown_records{0}; bool fIsEncrypted{false}; bool fAnyUnordered{false}; std::vector vWalletUpgrade; CWalletScanState() {} }; static bool ReadKeyValue(CWallet *pwallet, CDataStream &ssKey, CDataStream &ssValue, CWalletScanState &wss, std::string &strType, std::string &strErr) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet) { try { // Unserialize // Taking advantage of the fact that pair serialization is just the two // items serialized one after the other. ssKey >> strType; if (strType == "name") { std::string strAddress; ssKey >> strAddress; ssValue >> pwallet ->mapAddressBook[DecodeDestination( strAddress, pwallet->chainParams)] .name; } else if (strType == "purpose") { std::string strAddress; ssKey >> strAddress; ssValue >> pwallet ->mapAddressBook[DecodeDestination( strAddress, pwallet->chainParams)] .purpose; } else if (strType == "tx") { TxId txid; ssKey >> txid; CWalletTx wtx(nullptr /* pwallet */, MakeTransactionRef()); ssValue >> wtx; TxValidationState state; bool isValid = wtx.IsCoinBase() ? CheckCoinbase(*wtx.tx, state) : CheckRegularTransaction(*wtx.tx, state); if (!isValid || wtx.GetId() != txid) { return false; } // Undo serialize changes in 31600 if (31404 <= wtx.fTimeReceivedIsTxTime && wtx.fTimeReceivedIsTxTime <= 31703) { if (!ssValue.empty()) { char fTmp; char fUnused; std::string unused_string; ssValue >> fTmp >> fUnused >> unused_string; strErr = strprintf("LoadWallet() upgrading tx ver=%d %d %s", wtx.fTimeReceivedIsTxTime, fTmp, txid.ToString()); wtx.fTimeReceivedIsTxTime = fTmp; } else { strErr = strprintf("LoadWallet() repairing tx ver=%d %s", wtx.fTimeReceivedIsTxTime, txid.ToString()); wtx.fTimeReceivedIsTxTime = 0; } wss.vWalletUpgrade.push_back(txid); } if (wtx.nOrderPos == -1) { wss.fAnyUnordered = true; } pwallet->LoadToWallet(wtx); } else if (strType == "watchs") { wss.nWatchKeys++; CScript script; ssKey >> script; char fYes; ssValue >> fYes; if (fYes == '1') { pwallet->LoadWatchOnly(script); } } else if (strType == "key" || strType == "wkey") { CPubKey vchPubKey; ssKey >> vchPubKey; if (!vchPubKey.IsValid()) { strErr = "Error reading wallet database: CPubKey corrupt"; return false; } CKey key; CPrivKey pkey; uint256 hash; if (strType == "key") { wss.nKeys++; ssValue >> pkey; } else { CWalletKey wkey; ssValue >> wkey; pkey = wkey.vchPrivKey; } // Old wallets store keys as "key" [pubkey] => [privkey] // ... which was slow for wallets with lots of keys, because the // public key is re-derived from the private key using EC operations // as a checksum. Newer wallets store keys as "key"[pubkey] => // [privkey][hash(pubkey,privkey)], which is much faster while // remaining backwards-compatible. try { ssValue >> hash; } catch (...) { } bool fSkipCheck = false; if (!hash.IsNull()) { // hash pubkey/privkey to accelerate wallet load std::vector vchKey; vchKey.reserve(vchPubKey.size() + pkey.size()); vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end()); vchKey.insert(vchKey.end(), pkey.begin(), pkey.end()); if (Hash(vchKey.begin(), vchKey.end()) != hash) { strErr = "Error reading wallet database: CPubKey/CPrivKey " "corrupt"; return false; } fSkipCheck = true; } if (!key.Load(pkey, vchPubKey, fSkipCheck)) { strErr = "Error reading wallet database: CPrivKey corrupt"; return false; } if (!pwallet->LoadKey(key, vchPubKey)) { strErr = "Error reading wallet database: LoadKey failed"; return false; } } else if (strType == "mkey") { unsigned int nID; ssKey >> nID; CMasterKey kMasterKey; ssValue >> kMasterKey; if (pwallet->mapMasterKeys.count(nID) != 0) { strErr = strprintf( "Error reading wallet database: duplicate CMasterKey id %u", nID); return false; } pwallet->mapMasterKeys[nID] = kMasterKey; if (pwallet->nMasterKeyMaxID < nID) { pwallet->nMasterKeyMaxID = nID; } } else if (strType == "ckey") { CPubKey vchPubKey; ssKey >> vchPubKey; if (!vchPubKey.IsValid()) { strErr = "Error reading wallet database: CPubKey corrupt"; return false; } std::vector vchPrivKey; ssValue >> vchPrivKey; wss.nCKeys++; if (!pwallet->LoadCryptedKey(vchPubKey, vchPrivKey)) { strErr = "Error reading wallet database: LoadCryptedKey failed"; return false; } wss.fIsEncrypted = true; } else if (strType == "keymeta") { CPubKey vchPubKey; ssKey >> vchPubKey; CKeyMetadata keyMeta; ssValue >> keyMeta; wss.nKeyMeta++; pwallet->LoadKeyMetadata(vchPubKey.GetID(), keyMeta); } else if (strType == "watchmeta") { CScript script; ssKey >> script; CKeyMetadata keyMeta; ssValue >> keyMeta; wss.nKeyMeta++; pwallet->LoadScriptMetadata(CScriptID(script), keyMeta); } else if (strType == "defaultkey") { // We don't want or need the default key, but if there is one set, // we want to make sure that it is valid so that we can detect // corruption CPubKey vchPubKey; ssValue >> vchPubKey; if (!vchPubKey.IsValid()) { strErr = "Error reading wallet database: Default Key corrupt"; return false; } } else if (strType == "pool") { int64_t nIndex; ssKey >> nIndex; CKeyPool keypool; ssValue >> keypool; pwallet->LoadKeyPool(nIndex, keypool); } else if (strType == "cscript") { uint160 hash; ssKey >> hash; CScript script; ssValue >> script; if (!pwallet->LoadCScript(script)) { strErr = "Error reading wallet database: LoadCScript failed"; return false; } } else if (strType == "orderposnext") { ssValue >> pwallet->nOrderPosNext; } else if (strType == "destdata") { std::string strAddress, strKey, strValue; ssKey >> strAddress; ssKey >> strKey; ssValue >> strValue; pwallet->LoadDestData( DecodeDestination(strAddress, pwallet->chainParams), strKey, strValue); } else if (strType == "hdchain") { CHDChain chain; ssValue >> chain; pwallet->SetHDChain(chain, true); } else if (strType == "flags") { uint64_t flags; ssValue >> flags; if (!pwallet->SetWalletFlags(flags, true)) { strErr = "Error reading wallet database: Unknown non-tolerable " "wallet flags found"; return false; } } else if (strType != "bestblock" && strType != "bestblock_nomerkle" && strType != "minversion" && strType != "acentry" && strType != "version") { wss.m_unknown_records++; } } catch (...) { return false; } return true; } bool WalletBatch::IsKeyType(const std::string &strType) { return (strType == "key" || strType == "wkey" || strType == "mkey" || strType == "ckey"); } DBErrors WalletBatch::LoadWallet(CWallet *pwallet) { CWalletScanState wss; bool fNoncriticalErrors = false; DBErrors result = DBErrors::LOAD_OK; LOCK(pwallet->cs_wallet); try { int nMinVersion = 0; if (m_batch.Read((std::string) "minversion", nMinVersion)) { if (nMinVersion > FEATURE_LATEST) { return DBErrors::TOO_NEW; } pwallet->LoadMinVersion(nMinVersion); } // Get cursor Dbc *pcursor = m_batch.GetCursor(); if (!pcursor) { pwallet->WalletLogPrintf("Error getting wallet database cursor\n"); return DBErrors::CORRUPT; } while (true) { // Read next record CDataStream ssKey(SER_DISK, CLIENT_VERSION); CDataStream ssValue(SER_DISK, CLIENT_VERSION); int ret = m_batch.ReadAtCursor(pcursor, ssKey, ssValue); if (ret == DB_NOTFOUND) { break; } if (ret != 0) { pwallet->WalletLogPrintf( "Error reading next record from wallet database\n"); return DBErrors::CORRUPT; } // Try to be tolerant of single corrupt records: std::string strType, strErr; if (!ReadKeyValue(pwallet, ssKey, ssValue, wss, strType, strErr)) { // losing keys is considered a catastrophic error, anything else // we assume the user can live with: if (IsKeyType(strType) || strType == "defaultkey") { result = DBErrors::CORRUPT; } else if (strType == "flags") { // Reading the wallet flags can only fail if unknown flags // are present. result = DBErrors::TOO_NEW; } else { // Leave other errors alone, if we try to fix them we might // make things worse. But do warn the user there is // something wrong. fNoncriticalErrors = true; if (strType == "tx") { // Rescan if there is a bad transaction record: gArgs.SoftSetBoolArg("-rescan", true); } } } if (!strErr.empty()) { pwallet->WalletLogPrintf("%s\n", strErr); } } pcursor->close(); } catch (const boost::thread_interrupted &) { throw; } catch (...) { result = DBErrors::CORRUPT; } if (fNoncriticalErrors && result == DBErrors::LOAD_OK) { result = DBErrors::NONCRITICAL_ERROR; } // Any wallet corruption at all: skip any rewriting or upgrading, we don't // want to make it worse. if (result != DBErrors::LOAD_OK) { return result; } // Last client version to open this wallet, was previously the file version // number int last_client = CLIENT_VERSION; - ReadVersion(last_client); + m_batch.Read(std::string("version"), last_client); int wallet_version = pwallet->GetVersion(); pwallet->WalletLogPrintf("Wallet File Version = %d\n", wallet_version > 0 ? wallet_version : last_client); pwallet->WalletLogPrintf("Keys: %u plaintext, %u encrypted, %u w/ " "metadata, %u total. Unknown wallet records: %u\n", wss.nKeys, wss.nCKeys, wss.nKeyMeta, wss.nKeys + wss.nCKeys, wss.m_unknown_records); // nTimeFirstKey is only reliable if all keys have metadata if ((wss.nKeys + wss.nCKeys + wss.nWatchKeys) != wss.nKeyMeta) { pwallet->UpdateTimeFirstKey(1); } for (const TxId &txid : wss.vWalletUpgrade) { WriteTx(pwallet->mapWallet.at(txid)); } // Rewrite encrypted wallets of versions 0.4.0 and 0.5.0rc: if (wss.fIsEncrypted && (last_client == 40000 || last_client == 50000)) { return DBErrors::NEED_REWRITE; } if (last_client < CLIENT_VERSION) { // Update - WriteVersion(CLIENT_VERSION); + m_batch.Write(std::string("version"), CLIENT_VERSION); } if (wss.fAnyUnordered) { result = pwallet->ReorderTransactions(); } // Upgrade all of the wallet keymetadata to have the hd master key id // This operation is not atomic, but if it fails, updated entries are still // backwards compatible with older software try { pwallet->UpgradeKeyMetadata(); } catch (...) { result = DBErrors::CORRUPT; } return result; } DBErrors WalletBatch::FindWalletTx(std::vector &txIds, std::vector &vWtx) { DBErrors result = DBErrors::LOAD_OK; try { int nMinVersion = 0; if (m_batch.Read((std::string) "minversion", nMinVersion)) { if (nMinVersion > FEATURE_LATEST) { return DBErrors::TOO_NEW; } } // Get cursor Dbc *pcursor = m_batch.GetCursor(); if (!pcursor) { LogPrintf("Error getting wallet database cursor\n"); return DBErrors::CORRUPT; } while (true) { // Read next record CDataStream ssKey(SER_DISK, CLIENT_VERSION); CDataStream ssValue(SER_DISK, CLIENT_VERSION); int ret = m_batch.ReadAtCursor(pcursor, ssKey, ssValue); if (ret == DB_NOTFOUND) { break; } if (ret != 0) { LogPrintf("Error reading next record from wallet database\n"); return DBErrors::CORRUPT; } std::string strType; ssKey >> strType; if (strType == "tx") { TxId txid; ssKey >> txid; CWalletTx wtx(nullptr /* pwallet */, MakeTransactionRef()); ssValue >> wtx; txIds.push_back(txid); vWtx.push_back(wtx); } } pcursor->close(); } catch (const boost::thread_interrupted &) { throw; } catch (...) { result = DBErrors::CORRUPT; } return result; } DBErrors WalletBatch::ZapSelectTx(std::vector &txIdsIn, std::vector &txIdsOut) { // Build list of wallet TXs and hashes. std::vector txIds; std::vector vWtx; DBErrors err = FindWalletTx(txIds, vWtx); if (err != DBErrors::LOAD_OK) { return err; } std::sort(txIds.begin(), txIds.end()); std::sort(txIdsIn.begin(), txIdsIn.end()); // Erase each matching wallet TX. bool delerror = false; std::vector::iterator it = txIdsIn.begin(); for (const TxId &txid : txIds) { while (it < txIdsIn.end() && (*it) < txid) { it++; } if (it == txIdsIn.end()) { break; } if ((*it) == txid) { if (!EraseTx(txid)) { LogPrint(BCLog::DB, "Transaction was found for deletion but returned " "database error: %s\n", txid.GetHex()); delerror = true; } txIdsOut.push_back(txid); } } if (delerror) { return DBErrors::CORRUPT; } return DBErrors::LOAD_OK; } DBErrors WalletBatch::ZapWalletTx(std::vector &vWtx) { // Build list of wallet TXs. std::vector txIds; DBErrors err = FindWalletTx(txIds, vWtx); if (err != DBErrors::LOAD_OK) { return err; } // Erase each wallet TX. for (const TxId &txid : txIds) { if (!EraseTx(txid)) { return DBErrors::CORRUPT; } } return DBErrors::LOAD_OK; } void MaybeCompactWalletDB() { static std::atomic fOneThread; if (fOneThread.exchange(true)) { return; } if (!gArgs.GetBoolArg("-flushwallet", DEFAULT_FLUSHWALLET)) { return; } for (const std::shared_ptr &pwallet : GetWallets()) { WalletDatabase &dbh = pwallet->GetDBHandle(); unsigned int nUpdateCounter = dbh.nUpdateCounter; if (dbh.nLastSeen != nUpdateCounter) { dbh.nLastSeen = nUpdateCounter; dbh.nLastWalletUpdate = GetTime(); } if (dbh.nLastFlushed != nUpdateCounter && GetTime() - dbh.nLastWalletUpdate >= 2) { if (BerkeleyBatch::PeriodicFlush(dbh)) { dbh.nLastFlushed = nUpdateCounter; } } } fOneThread = false; } // // Try to (very carefully!) recover wallet file if there is a problem. // bool WalletBatch::Recover(const fs::path &wallet_path, void *callbackDataIn, bool (*recoverKVcallback)(void *callbackData, CDataStream ssKey, CDataStream ssValue), std::string &out_backup_filename) { return BerkeleyBatch::Recover(wallet_path, callbackDataIn, recoverKVcallback, out_backup_filename); } bool WalletBatch::Recover(const fs::path &wallet_path, std::string &out_backup_filename) { // recover without a key filter callback // results in recovering all record types return WalletBatch::Recover(wallet_path, nullptr, nullptr, out_backup_filename); } bool WalletBatch::RecoverKeysOnlyFilter(void *callbackData, CDataStream ssKey, CDataStream ssValue) { CWallet *dummyWallet = reinterpret_cast(callbackData); CWalletScanState dummyWss; std::string strType, strErr; bool fReadOK; { // Required in LoadKeyMetadata(): LOCK(dummyWallet->cs_wallet); fReadOK = ReadKeyValue(dummyWallet, ssKey, ssValue, dummyWss, strType, strErr); } if (!IsKeyType(strType) && strType != "hdchain") { return false; } if (!fReadOK) { LogPrintf("WARNING: WalletBatch::Recover skipping %s: %s\n", strType, strErr); return false; } return true; } bool WalletBatch::VerifyEnvironment(const fs::path &wallet_path, std::string &errorStr) { return BerkeleyBatch::VerifyEnvironment(wallet_path, errorStr); } bool WalletBatch::VerifyDatabaseFile(const fs::path &wallet_path, std::string &warningStr, std::string &errorStr) { return BerkeleyBatch::VerifyDatabaseFile(wallet_path, warningStr, errorStr, WalletBatch::Recover); } bool WalletBatch::WriteDestData(const CTxDestination &address, const std::string &key, const std::string &value) { if (!IsValidDestination(address)) { return false; } return WriteIC( std::make_pair( std::string("destdata"), std::make_pair(EncodeLegacyAddr(address, Params()), key)), value); } bool WalletBatch::EraseDestData(const CTxDestination &address, const std::string &key) { if (!IsValidDestination(address)) { return false; } return EraseIC(std::make_pair( std::string("destdata"), std::make_pair(EncodeLegacyAddr(address, Params()), key))); } bool WalletBatch::WriteHDChain(const CHDChain &chain) { return WriteIC(std::string("hdchain"), chain); } bool WalletBatch::WriteWalletFlags(const uint64_t flags) { return WriteIC(std::string("flags"), flags); } bool WalletBatch::TxnBegin() { return m_batch.TxnBegin(); } bool WalletBatch::TxnCommit() { return m_batch.TxnCommit(); } bool WalletBatch::TxnAbort() { return m_batch.TxnAbort(); } - -bool WalletBatch::ReadVersion(int &nVersion) { - return m_batch.ReadVersion(nVersion); -} - -bool WalletBatch::WriteVersion(int nVersion) { - return m_batch.WriteVersion(nVersion); -} diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h index 3933776e9..b67b148ca 100644 --- a/src/wallet/walletdb.h +++ b/src/wallet/walletdb.h @@ -1,279 +1,274 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_WALLET_WALLETDB_H #define BITCOIN_WALLET_WALLETDB_H #include #include #include