diff --git a/src/Makefile.am b/src/Makefile.am index a505f44d7e..906d097821 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -81,7 +81,8 @@ TEST_FILES = $(TESTDATA_DIR)/stellar-core_example.cfg $(TESTDATA_DIR)/stellar-co $(TESTDATA_DIR)/stellar-core_testnet.cfg $(TESTDATA_DIR)/stellar-core_testnet_legacy.cfg \ $(TESTDATA_DIR)/stellar-history.testnet.6714239.json $(TESTDATA_DIR)/stellar-history.livenet.15686975.json \ $(TESTDATA_DIR)/stellar-core_testnet_validator.cfg $(TESTDATA_DIR)/stellar-core_example_validators.cfg \ - $(TESTDATA_DIR)/stellar-history.testnet.6714239.networkPassphrase.json + $(TESTDATA_DIR)/stellar-history.testnet.6714239.networkPassphrase.json \ + $(TESTDATA_DIR)/stellar-history.testnet.6714239.networkPassphrase.v2.json BUILT_SOURCES = $(SRC_X_FILES:.x=.h) main/StellarCoreVersion.cpp main/XDRFilesSha256.cpp $(TEST_FILES) diff --git a/src/bucket/BucketBase.cpp b/src/bucket/BucketBase.cpp index 0917b20a82..1d0326cf47 100644 --- a/src/bucket/BucketBase.cpp +++ b/src/bucket/BucketBase.cpp @@ -385,7 +385,7 @@ BucketBase::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, } if (countMergeEvents) { - bucketManager.incrMergeCounters(mc); + bucketManager.incrMergeCounters(mc); } std::vector shadowHashes; diff --git a/src/bucket/BucketManager.cpp b/src/bucket/BucketManager.cpp index 38f8c3d919..a43a57ecc6 100644 --- a/src/bucket/BucketManager.cpp +++ b/src/bucket/BucketManager.cpp @@ -330,18 +330,36 @@ BucketManager::getMergeTimer() return mBucketSnapMerge; } +template <> +MergeCounters +BucketManager::readMergeCounters() +{ + std::lock_guard lock(mBucketMutex); + return mLiveMergeCounters; +} + +template <> MergeCounters -BucketManager::readMergeCounters() +BucketManager::readMergeCounters() +{ + std::lock_guard lock(mBucketMutex); + return mHotArchiveMergeCounters; +} + +template <> +void +BucketManager::incrMergeCounters(MergeCounters const& delta) { std::lock_guard lock(mBucketMutex); - return mMergeCounters; + mLiveMergeCounters += delta; } +template <> void -BucketManager::incrMergeCounters(MergeCounters const& delta) +BucketManager::incrMergeCounters(MergeCounters const& delta) { std::lock_guard lock(mBucketMutex); - mMergeCounters += delta; + mHotArchiveMergeCounters += delta; } bool @@ -623,7 +641,7 @@ BucketManager::getMergeFutureInternal(MergeKey const& key, auto future = promise.get_future().share(); promise.set_value(bucket); mc.mFinishedMergeReattachments++; - incrMergeCounters(mc); + incrMergeCounters(mc); return future; } } @@ -638,7 +656,7 @@ BucketManager::getMergeFutureInternal(MergeKey const& key, "BucketManager::getMergeFuture returning running future for merge {}", key); mc.mRunningMergeReattachments++; - incrMergeCounters(mc); + incrMergeCounters(mc); return i->second; } @@ -1013,10 +1031,10 @@ BucketManager::snapshotLedger(LedgerHeader& currentHeader) currentHeader.ledgerVersion, BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { - // TODO: Hash Archive Bucket - // Dependency: HAS supports Hot Archive BucketList - - hash = mLiveBucketList->getHash(); + SHA256 hsh; + hsh.add(mLiveBucketList->getHash()); + hsh.add(mHotArchiveBucketList->getHash()); + hash = hsh.finish(); } else { @@ -1229,51 +1247,71 @@ BucketManager::assumeState(HistoryArchiveState const& has, releaseAssert(threadIsMain()); releaseAssertOrThrow(mConfig.MODE_ENABLES_BUCKETLIST); - // TODO: Assume archival bucket state // Dependency: HAS supports Hot Archive BucketList - for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) - { - auto curr = getBucketByHashInternal( - hexToBin256(has.currentBuckets.at(i).curr), mSharedLiveBuckets); - auto snap = getBucketByHashInternal( - hexToBin256(has.currentBuckets.at(i).snap), mSharedLiveBuckets); - if (!(curr && snap)) - { - throw std::runtime_error("Missing bucket files while assuming " - "saved live BucketList state"); - } - auto const& nextFuture = has.currentBuckets.at(i).next; - std::shared_ptr nextBucket = nullptr; - if (nextFuture.hasOutputHash()) + auto processBucketList = [&](auto& bl, auto const& hasBuckets) { + auto kNumLevels = std::remove_reference::type::kNumLevels; + using BucketT = + typename std::remove_reference::type::bucket_type; + for (uint32_t i = 0; i < kNumLevels; ++i) { - nextBucket = getBucketByHashInternal( - hexToBin256(nextFuture.getOutputHash()), mSharedLiveBuckets); - if (!nextBucket) + auto curr = + getBucketByHash(hexToBin256(hasBuckets.at(i).curr)); + auto snap = + getBucketByHash(hexToBin256(hasBuckets.at(i).snap)); + if (!(curr && snap)) { - throw std::runtime_error( - "Missing future bucket files while " - "assuming saved live BucketList state"); + throw std::runtime_error("Missing bucket files while assuming " + "saved live BucketList state"); } - } - // Buckets on the BucketList should always be indexed - releaseAssert(curr->isEmpty() || curr->isIndexed()); - releaseAssert(snap->isEmpty() || snap->isIndexed()); - if (nextBucket) - { - releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed()); + auto const& nextFuture = hasBuckets.at(i).next; + std::shared_ptr nextBucket = nullptr; + if (nextFuture.hasOutputHash()) + { + nextBucket = getBucketByHash( + hexToBin256(nextFuture.getOutputHash())); + if (!nextBucket) + { + throw std::runtime_error( + "Missing future bucket files while " + "assuming saved live BucketList state"); + } + } + + // Buckets on the BucketList should always be indexed + releaseAssert(curr->isEmpty() || curr->isIndexed()); + releaseAssert(snap->isEmpty() || snap->isIndexed()); + if (nextBucket) + { + releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed()); + } + + bl.getLevel(i).setCurr(curr); + bl.getLevel(i).setSnap(snap); + bl.getLevel(i).setNext(nextFuture); } + }; - mLiveBucketList->getLevel(i).setCurr(curr); - mLiveBucketList->getLevel(i).setSnap(snap); - mLiveBucketList->getLevel(i).setNext(nextFuture); + processBucketList(*mLiveBucketList, has.currentBuckets); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + if (has.hasHotArchiveBuckets()) + { + processBucketList(*mHotArchiveBucketList, has.hotArchiveBuckets); } +#endif if (restartMerges) { mLiveBucketList->restartMerges(mApp, maxProtocolVersion, has.currentLedger); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + if (has.hasHotArchiveBuckets()) + { + mHotArchiveBucketList->restartMerges(mApp, maxProtocolVersion, + has.currentLedger); + } +#endif } cleanupStaleFiles(has); } @@ -1580,16 +1618,35 @@ BucketManager::scheduleVerifyReferencedBucketsWork( continue; } - // TODO: Update verify to for ArchiveBucket - // Dependency: HAS supports Hot Archive BucketList - auto b = getBucketByHashInternal(h, mSharedLiveBuckets); - if (!b) - { - throw std::runtime_error(fmt::format( - FMT_STRING("Missing referenced bucket {}"), binToHex(h))); - } - seq.emplace_back(std::make_shared( - mApp, b->getFilename().string(), b->getHash(), nullptr)); + auto loadFilenameAndHash = [&]() -> std::pair { + auto live = getBucketByHashInternal(h, mSharedLiveBuckets); + if (!live) + { + auto hot = getBucketByHashInternal(h, mSharedHotArchiveBuckets); + + // Check both live and hot archive buckets for hash. If we don't + // find it in either, we're missing a bucket. Note that live and + // hot archive buckets are guaranteed to have no hash collisions + // due to type field in MetaEntry. + if (!hot) + { + throw std::runtime_error( + fmt::format(FMT_STRING("Missing referenced bucket {}"), + binToHex(h))); + } + return std::make_pair(hot->getFilename().string(), + hot->getHash()); + } + else + { + return std::make_pair(live->getFilename().string(), + live->getHash()); + } + }; + + auto [filename, hash] = loadFilenameAndHash(); + seq.emplace_back( + std::make_shared(mApp, filename, hash, nullptr)); } return mApp.getWorkScheduler().scheduleWork( "verify-referenced-buckets", seq); diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index 38df819a81..d3079d8faf 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -103,7 +103,8 @@ class BucketManager : NonMovableOrCopyable medida::Counter& mLiveBucketListSizeCounter; medida::Counter& mArchiveBucketListSizeCounter; EvictionCounters mBucketListEvictionCounters; - MergeCounters mMergeCounters; + MergeCounters mLiveMergeCounters; + MergeCounters mHotArchiveMergeCounters; std::shared_ptr mEvictionStatistics{}; std::map mBucketListEntryCountCounters; @@ -203,8 +204,8 @@ class BucketManager : NonMovableOrCopyable // Reading and writing the merge counters is done in bulk, and takes a lock // briefly; this can be done from any thread. - MergeCounters readMergeCounters(); - void incrMergeCounters(MergeCounters const& delta); + template MergeCounters readMergeCounters(); + template void incrMergeCounters(MergeCounters const& delta); // Get a reference to a persistent bucket (in the BucketManager's bucket // directory), from the BucketManager's shared bucket-set. diff --git a/src/bucket/HotArchiveBucket.cpp b/src/bucket/HotArchiveBucket.cpp index 6ce3ed7041..0a8d53a71e 100644 --- a/src/bucket/HotArchiveBucket.cpp +++ b/src/bucket/HotArchiveBucket.cpp @@ -36,7 +36,7 @@ HotArchiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, if (countMergeEvents) { - bucketManager.incrMergeCounters(mc); + bucketManager.incrMergeCounters(mc); } return out.getBucket(bucketManager); diff --git a/src/bucket/HotArchiveBucket.h b/src/bucket/HotArchiveBucket.h index 772ec0c22d..02e965328f 100644 --- a/src/bucket/HotArchiveBucket.h +++ b/src/bucket/HotArchiveBucket.h @@ -25,11 +25,6 @@ typedef BucketOutputIterator HotArchiveBucketOutputIterator; class HotArchiveBucket : public BucketBase, public std::enable_shared_from_this { - static std::vector - convertToBucketEntry(std::vector const& archivedEntries, - std::vector const& restoredEntries, - std::vector const& deletedEntries); - public: // Entry type that this bucket stores using EntryT = HotArchiveBucketEntry; @@ -91,6 +86,11 @@ class HotArchiveBucket : public BucketBase, static std::shared_ptr bucketEntryToLoadResult(std::shared_ptr const& be); + static std::vector + convertToBucketEntry(std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); + friend class HotArchiveBucketSnapshot; }; } \ No newline at end of file diff --git a/src/bucket/HotArchiveBucketList.h b/src/bucket/HotArchiveBucketList.h index 74a467435f..b9e187adbd 100644 --- a/src/bucket/HotArchiveBucketList.h +++ b/src/bucket/HotArchiveBucketList.h @@ -15,6 +15,8 @@ namespace stellar class HotArchiveBucketList : public BucketListBase { public: + using bucket_type = HotArchiveBucket; + void addBatch(Application& app, uint32_t currLedger, uint32_t currLedgerProtocol, std::vector const& archiveEntries, diff --git a/src/bucket/LiveBucket.cpp b/src/bucket/LiveBucket.cpp index 7001baa9cc..eb3222f8da 100644 --- a/src/bucket/LiveBucket.cpp +++ b/src/bucket/LiveBucket.cpp @@ -387,7 +387,7 @@ LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, if (countMergeEvents) { - bucketManager.incrMergeCounters(mc); + bucketManager.incrMergeCounters(mc); } return out.getBucket(bucketManager); diff --git a/src/bucket/LiveBucketList.h b/src/bucket/LiveBucketList.h index 0f2a6ac268..6069949033 100644 --- a/src/bucket/LiveBucketList.h +++ b/src/bucket/LiveBucketList.h @@ -17,6 +17,8 @@ namespace stellar class LiveBucketList : public BucketListBase { public: + using bucket_type = LiveBucket; + // Reset Eviction Iterator position if an incoming spill or upgrade has // invalidated the previous position static void updateStartingEvictionIterator(EvictionIterator& iter, diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index 0a5b545097..ed5771dd31 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -1139,9 +1139,10 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist][archival]") // Close ledgers until evicted DEADENTRYs merge with // original INITENTRYs. This checks that BucketList // invariants are respected - for (auto initialDeadMerges = - bm.readMergeCounters().mOldInitEntriesMergedWithNewDead; - bm.readMergeCounters().mOldInitEntriesMergedWithNewDead < + for (auto initialDeadMerges = bm.readMergeCounters() + .mOldInitEntriesMergedWithNewDead; + bm.readMergeCounters() + .mOldInitEntriesMergedWithNewDead < initialDeadMerges + tempEntries.size(); ++ledgerSeq) { diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index 5c22b3b997..34a63a6ed8 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -27,7 +27,10 @@ #include "test/test.h" #include "util/GlobalChecks.h" #include "util/Math.h" +#include "util/ProtocolVersion.h" #include "util/Timer.h" +#include "util/UnorderedSet.h" +#include "xdr/Stellar-ledger-entries.h" #include #include @@ -329,11 +332,33 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", BucketManager& bm = app->getBucketManager(); LiveBucketList& bl = bm.getLiveBucketList(); + HotArchiveBucketList& hotArchive = bm.getHotArchiveBucketList(); auto vers = getAppLedgerVersion(app); - + bool hasHotArchive = protocolVersionStartsFrom( + vers, LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); // Add some entries to get to a nontrivial merge-state. uint32_t ledger = 0; - uint32_t level = 3; + uint32_t level = 4; + UnorderedSet addedHotArchiveKeys; + + // To prevent duplicate merges that can interfere with counters, seed + // the starting Bucket so that each merge is unique. Otherwise, the + // first call to addBatch will merge [{first_batch}, empty_bucket]. We + // will then see other instances of [{first_batch}, empty_bucket] merges + // later on as the Bucket moves its way down the bl. By providing a + // seeded bucket, the first addBatch is a [{first_batch}, seeded_bucket] + // merge, which will not be duplicated by empty bucket merges later. The + // live BL is automatically seeded with the genesis ledger. + if (hasHotArchive) + { + auto initialHotArchiveBucket = + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, 10, addedHotArchiveKeys); + hotArchive.getLevel(0).setCurr(HotArchiveBucket::fresh( + bm, vers, {}, initialHotArchiveBucket, {}, {}, + clock.getIOContext(), /*doFsync=*/true)); + } + do { ++ledger; @@ -345,6 +370,15 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", LedgerTestUtils::generateValidLedgerEntriesWithExclusions( {CONFIG_SETTING}, 10), {}); + if (protocolVersionStartsFrom( + vers, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + addHotArchiveBatchAndUpdateSnapshot( + *app, lh, {}, {}, + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, 10, addedHotArchiveKeys)); + } bm.forgetUnreferencedBuckets( app->getLedgerManager().getLastClosedLedgerHAS()); } while (!LiveBucketList::levelShouldSpill(ledger, level - 1)); @@ -354,17 +388,42 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", // eagerly) REQUIRE(bl.getLevel(level).getNext().isMerging()); + HistoryArchiveState has; + if (hasHotArchive) + { + REQUIRE(hotArchive.getLevel(level).getNext().isMerging()); + has = HistoryArchiveState(ledger, bl, hotArchive, + app->getConfig().NETWORK_PASSPHRASE); + REQUIRE(has.hasHotArchiveBuckets()); + } + else + { + has = HistoryArchiveState(ledger, bl, + app->getConfig().NETWORK_PASSPHRASE); + REQUIRE(!has.hasHotArchiveBuckets()); + } + // Serialize HAS. - HistoryArchiveState has(ledger, bl, - app->getConfig().NETWORK_PASSPHRASE); std::string serialHas = has.toString(); // Simulate level committing (and the FutureBucket clearing), // followed by the typical ledger-close bucket GC event. bl.getLevel(level).commit(); REQUIRE(!bl.getLevel(level).getNext().isMerging()); - auto ra = bm.readMergeCounters().mFinishedMergeReattachments; - REQUIRE(ra == 0); + if (hasHotArchive) + { + hotArchive.getLevel(level).commit(); + REQUIRE(!hotArchive.getLevel(level).getNext().isMerging()); + } + + REQUIRE( + bm.readMergeCounters().mFinishedMergeReattachments == + 0); + if (hasHotArchive) + { + REQUIRE(bm.readMergeCounters() + .mFinishedMergeReattachments == 0); + } // Deserialize HAS. HistoryArchiveState has2; @@ -375,12 +434,29 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", *app, vers, LiveBucketList::keepTombstoneEntries(level)); REQUIRE(has2.currentBuckets[level].next.isMerging()); + if (hasHotArchive) + { + has2.hotArchiveBuckets[level].next.makeLive( + *app, vers, HotArchiveBucketList::keepTombstoneEntries(level)); + REQUIRE(has2.hotArchiveBuckets[level].next.isMerging()); + + // Resolve reattached future. + has2.hotArchiveBuckets[level].next.resolve(); + } + // Resolve reattached future. has2.currentBuckets[level].next.resolve(); - // Check that we reattached to a finished merge. - ra = bm.readMergeCounters().mFinishedMergeReattachments; - REQUIRE(ra != 0); + // Check that we reattached to one finished merge per bl. + if (hasHotArchive) + { + REQUIRE(bm.readMergeCounters() + .mFinishedMergeReattachments == 1); + } + + REQUIRE( + bm.readMergeCounters().mFinishedMergeReattachments == + 1); }); } @@ -397,7 +473,10 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", BucketManager& bm = app->getBucketManager(); LiveBucketList& bl = bm.getLiveBucketList(); + HotArchiveBucketList& hotArchive = bm.getHotArchiveBucketList(); auto vers = getAppLedgerVersion(app); + bool hasHotArchive = protocolVersionStartsFrom( + vers, LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); // This test is a race that will (if all goes well) eventually be won: // we keep trying to do an immediate-reattach to a running merge and @@ -420,8 +499,28 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", // testsuite with no explanation. uint32_t ledger = 0; uint32_t limit = 10000; - while (ledger < limit && - bm.readMergeCounters().mRunningMergeReattachments == 0) + + // Iterate until we've reached the limit, or stop early if both the Hot + // Archive and live BucketList have seen a running merge reattachment. + auto cond = [&]() { + bool reattachmentsNotFinished; + if (hasHotArchive) + { + reattachmentsNotFinished = + bm.readMergeCounters() + .mRunningMergeReattachments < 1 || + bm.readMergeCounters() + .mRunningMergeReattachments < 1; + } + else + { + reattachmentsNotFinished = bm.readMergeCounters() + .mRunningMergeReattachments < 1; + } + return ledger < limit && reattachmentsNotFinished; + }; + + while (cond()) { ++ledger; // Merges will start on one or more levels here, starting a race @@ -435,12 +534,30 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 100), {}); + if (hasHotArchive) + { + addHotArchiveBatchAndUpdateSnapshot( + *app, lh, + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_CODE}, 100), + {}, {}); + } bm.forgetUnreferencedBuckets( app->getLedgerManager().getLastClosedLedgerHAS()); - HistoryArchiveState has(ledger, bl, - app->getConfig().NETWORK_PASSPHRASE); + HistoryArchiveState has; + if (hasHotArchive) + { + has = HistoryArchiveState(ledger, bl, hotArchive, + app->getConfig().NETWORK_PASSPHRASE); + } + else + { + has = HistoryArchiveState(ledger, bl, + app->getConfig().NETWORK_PASSPHRASE); + } + std::string serialHas = has.toString(); // Deserialize and reactivate levels of HAS. Races with the merge @@ -460,12 +577,32 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", LiveBucketList::keepTombstoneEntries(level)); } } + + for (uint32_t level = 0; level < has2.hotArchiveBuckets.size(); + ++level) + { + if (has2.hotArchiveBuckets[level].next.hasHashes()) + { + has2.hotArchiveBuckets[level].next.makeLive( + *app, vers, + HotArchiveBucketList::keepTombstoneEntries(level)); + } + } } CLOG_INFO(Bucket, "reattached to running merge at or around ledger {}", ledger); REQUIRE(ledger < limit); - auto ra = bm.readMergeCounters().mRunningMergeReattachments; - REQUIRE(ra != 0); + + // Because there is a race, we can't guarantee that we'll see exactly 1 + // reattachment, but we should see at least 1. + if (hasHotArchive) + { + REQUIRE(bm.readMergeCounters() + .mRunningMergeReattachments >= 1); + } + + REQUIRE(bm.readMergeCounters().mRunningMergeReattachments >= + 1); }); } @@ -555,64 +692,101 @@ TEST_CASE_VERSIONS( for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { VirtualClock clock; - Application::pointer app = createTestApplication(clock, cfg); + auto app = createTestApplication(clock, cfg); auto vers = getAppLedgerVersion(app); auto& hm = app->getHistoryManager(); auto& bm = app->getBucketManager(); + auto& lm = app->getLedgerManager(); + bool hasHotArchive = protocolVersionStartsFrom( + vers, LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); hm.setPublicationEnabled(false); app->getHistoryArchiveManager().initializeHistoryArchive( tcfg.getArchiveDirName()); + UnorderedSet hotArchiveKeys{}; + auto lastLcl = lm.getLastClosedLedgerNum(); while (hm.getPublishQueueCount() < 5) { // Do not merge this line with the next line: CLOG and // readMergeCounters each acquire a mutex, and it's possible to // deadlock with one of the worker threads if you try to hold them // both at the same time. - auto ra = bm.readMergeCounters().mFinishedMergeReattachments; - CLOG_INFO(Bucket, "finished-merge reattachments while queueing: {}", - ra); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq++; - addLiveBatchAndUpdateSnapshot( - *app, lh, {}, - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 100), - {}); + auto ra = + bm.readMergeCounters().mFinishedMergeReattachments; + auto raHotArchive = bm.readMergeCounters() + .mFinishedMergeReattachments; + CLOG_INFO(Bucket, + "finished-merge reattachments while queueing: live " + "BucketList {}, Hot Archive BucketList {}", + ra, raHotArchive); + if (lm.getLastClosedLedgerNum() != lastLcl) + { + lastLcl = lm.getLastClosedLedgerNum(); + lm.setNextLedgerEntryBatchForBucketTesting( + {}, + LedgerTestUtils:: + generateValidUniqueLedgerEntriesWithExclusions( + {CONFIG_SETTING}, 100), + {}); + if (hasHotArchive) + { + lm.setNextArchiveBatchForBucketTesting( + {}, {}, + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, 10, hotArchiveKeys)); + } + } + clock.crank(false); bm.forgetUnreferencedBuckets( app->getLedgerManager().getLastClosedLedgerHAS()); } + // We should have published nothing and have the first // checkpoint still queued. REQUIRE(hm.getPublishSuccessCount() == 0); REQUIRE(HistoryManager::getMinLedgerQueuedToPublish(app->getConfig()) == 7); - auto oldReattachments = - bm.readMergeCounters().mFinishedMergeReattachments; + auto oldLiveReattachments = + bm.readMergeCounters().mFinishedMergeReattachments; + auto oldHotArchiveReattachments = + bm.readMergeCounters() + .mFinishedMergeReattachments; auto HASs = HistoryManager::getPublishQueueStates(app->getConfig()); REQUIRE(HASs.size() == 5); for (auto& has : HASs) { has.prepareForPublish(*app); + REQUIRE(has.hasHotArchiveBuckets() == hasHotArchive); } - auto ra = bm.readMergeCounters().mFinishedMergeReattachments; + auto liveRa = + bm.readMergeCounters().mFinishedMergeReattachments; + auto hotArchiveRa = bm.readMergeCounters() + .mFinishedMergeReattachments; if (protocolVersionIsBefore(vers, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { // Versions prior to FIRST_PROTOCOL_SHADOWS_REMOVED re-attach to // finished merges - REQUIRE(ra > oldReattachments); + REQUIRE(liveRa > oldLiveReattachments); CLOG_INFO(Bucket, - "finished-merge reattachments after making-live: {}", ra); + "finished-merge reattachments after making-live: {}", + liveRa); + + // Sanity check: Hot archive disabled in older protocols + releaseAssert(!hasHotArchive); } else { // Versions after FIRST_PROTOCOL_SHADOWS_REMOVED do not re-attach, // because merges are cleared - REQUIRE(ra == oldReattachments); + REQUIRE(liveRa == oldLiveReattachments); + + if (hasHotArchive) + { + REQUIRE(hotArchiveRa == oldHotArchiveReattachments); + } } // Un-cork the publication process, nothing should be broken. @@ -661,10 +835,11 @@ TEST_CASE_VERSIONS( // 2048). class StopAndRestartBucketMergesTest { + template static void - resolveAllMerges(LiveBucketList& bl) + resolveAllMerges(BucketListT& bl) { - for (uint32 i = 0; i < LiveBucketList::kNumLevels; ++i) + for (uint32 i = 0; i < BucketListT::kNumLevels; ++i) { auto& level = bl.getLevel(i); auto& next = level.getNext(); @@ -680,226 +855,332 @@ class StopAndRestartBucketMergesTest Hash mCurrBucketHash; Hash mSnapBucketHash; Hash mBucketListHash; + Hash mHotArchiveBucketListHash; Hash mLedgerHeaderHash; - MergeCounters mMergeCounters; + MergeCounters mLiveMergeCounters; + MergeCounters mHotArchiveMergeCounters; void - dumpMergeCounters(std::string const& label, uint32_t level) const + checkEmptyHotArchiveMetrics() const { - CLOG_INFO(Bucket, "MergeCounters: {} (designated level: {})", label, - level); - CLOG_INFO(Bucket, "PreInitEntryProtocolMerges: {}", - mMergeCounters.mPreInitEntryProtocolMerges); - CLOG_INFO(Bucket, "PostInitEntryProtocolMerges: {}", - mMergeCounters.mPostInitEntryProtocolMerges); - CLOG_INFO(Bucket, "mPreShadowRemovalProtocolMerges: {}", - mMergeCounters.mPreShadowRemovalProtocolMerges); - CLOG_INFO(Bucket, "mPostShadowRemovalProtocolMerges: {}", - mMergeCounters.mPostShadowRemovalProtocolMerges); - CLOG_INFO(Bucket, "RunningMergeReattachments: {}", - mMergeCounters.mRunningMergeReattachments); - CLOG_INFO(Bucket, "FinishedMergeReattachments: {}", - mMergeCounters.mFinishedMergeReattachments); - CLOG_INFO(Bucket, "NewMetaEntries: {}", - mMergeCounters.mNewMetaEntries); - CLOG_INFO(Bucket, "NewInitEntries: {}", - mMergeCounters.mNewInitEntries); - CLOG_INFO(Bucket, "NewLiveEntries: {}", - mMergeCounters.mNewLiveEntries); - CLOG_INFO(Bucket, "NewDeadEntries: {}", - mMergeCounters.mNewDeadEntries); - CLOG_INFO(Bucket, "OldMetaEntries: {}", - mMergeCounters.mOldMetaEntries); - CLOG_INFO(Bucket, "OldInitEntries: {}", - mMergeCounters.mOldInitEntries); - CLOG_INFO(Bucket, "OldLiveEntries: {}", - mMergeCounters.mOldLiveEntries); - CLOG_INFO(Bucket, "OldDeadEntries: {}", - mMergeCounters.mOldDeadEntries); - CLOG_INFO(Bucket, "OldEntriesDefaultAccepted: {}", - mMergeCounters.mOldEntriesDefaultAccepted); - CLOG_INFO(Bucket, "NewEntriesDefaultAccepted: {}", - mMergeCounters.mNewEntriesDefaultAccepted); - CLOG_INFO(Bucket, "NewInitEntriesMergedWithOldDead: {}", - mMergeCounters.mNewInitEntriesMergedWithOldDead); - CLOG_INFO(Bucket, "OldInitEntriesMergedWithNewLive: {}", - mMergeCounters.mOldInitEntriesMergedWithNewLive); - CLOG_INFO(Bucket, "OldInitEntriesMergedWithNewDead: {}", - mMergeCounters.mOldInitEntriesMergedWithNewDead); - CLOG_INFO(Bucket, "NewEntriesMergedWithOldNeitherInit: {}", - mMergeCounters.mNewEntriesMergedWithOldNeitherInit); - CLOG_INFO(Bucket, "ShadowScanSteps: {}", - mMergeCounters.mShadowScanSteps); - CLOG_INFO(Bucket, "MetaEntryShadowElisions: {}", - mMergeCounters.mMetaEntryShadowElisions); - CLOG_INFO(Bucket, "LiveEntryShadowElisions: {}", - mMergeCounters.mLiveEntryShadowElisions); - CLOG_INFO(Bucket, "InitEntryShadowElisions: {}", - mMergeCounters.mInitEntryShadowElisions); - CLOG_INFO(Bucket, "DeadEntryShadowElisions: {}", - mMergeCounters.mDeadEntryShadowElisions); - CLOG_INFO(Bucket, "OutputIteratorTombstoneElisions: {}", - mMergeCounters.mOutputIteratorTombstoneElisions); - CLOG_INFO(Bucket, "OutputIteratorBufferUpdates: {}", - mMergeCounters.mOutputIteratorBufferUpdates); - CLOG_INFO(Bucket, "OutputIteratorActualWrites: {}", - mMergeCounters.mOutputIteratorActualWrites); + // If before p23, check that all hot archive metrics are zero + CHECK(mHotArchiveMergeCounters.mPreInitEntryProtocolMerges == 0); + CHECK(mHotArchiveMergeCounters.mPostInitEntryProtocolMerges == 0); + CHECK(mHotArchiveMergeCounters.mPreShadowRemovalProtocolMerges == + 0); + CHECK(mHotArchiveMergeCounters.mPostShadowRemovalProtocolMerges == + 0); + CHECK(mHotArchiveMergeCounters.mNewMetaEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewInitEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewLiveEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewDeadEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldMetaEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldInitEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldLiveEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldDeadEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldEntriesDefaultAccepted == 0); + CHECK(mHotArchiveMergeCounters.mNewEntriesDefaultAccepted == 0); + CHECK(mHotArchiveMergeCounters.mNewInitEntriesMergedWithOldDead == + 0); + CHECK(mHotArchiveMergeCounters.mOldInitEntriesMergedWithNewLive == + 0); + CHECK(mHotArchiveMergeCounters.mOldInitEntriesMergedWithNewDead == + 0); + CHECK( + mHotArchiveMergeCounters.mNewEntriesMergedWithOldNeitherInit == + 0); + CHECK(mHotArchiveMergeCounters.mShadowScanSteps == 0); + CHECK(mHotArchiveMergeCounters.mMetaEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mLiveEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mInitEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mDeadEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mOutputIteratorBufferUpdates == 0); + CHECK(mHotArchiveMergeCounters.mOutputIteratorActualWrites == 0); + } + + void + dumpMergeCounters(std::string const& label, uint32_t level, + uint32_t protocol) const + { + auto dumpCounters = [&](std::string const& label, uint32_t level, + MergeCounters const& counters) { + CLOG_INFO(Bucket, "MergeCounters: {} (designated level: {})", + label, level); + CLOG_INFO(Bucket, "PreInitEntryProtocolMerges: {}", + counters.mPreInitEntryProtocolMerges); + CLOG_INFO(Bucket, "PostInitEntryProtocolMerges: {}", + counters.mPostInitEntryProtocolMerges); + CLOG_INFO(Bucket, "mPreShadowRemovalProtocolMerges: {}", + counters.mPreShadowRemovalProtocolMerges); + CLOG_INFO(Bucket, "mPostShadowRemovalProtocolMerges: {}", + counters.mPostShadowRemovalProtocolMerges); + CLOG_INFO(Bucket, "RunningMergeReattachments: {}", + counters.mRunningMergeReattachments); + CLOG_INFO(Bucket, "FinishedMergeReattachments: {}", + counters.mFinishedMergeReattachments); + CLOG_INFO(Bucket, "NewMetaEntries: {}", + counters.mNewMetaEntries); + CLOG_INFO(Bucket, "NewInitEntries: {}", + counters.mNewInitEntries); + CLOG_INFO(Bucket, "NewLiveEntries: {}", + counters.mNewLiveEntries); + CLOG_INFO(Bucket, "NewDeadEntries: {}", + counters.mNewDeadEntries); + CLOG_INFO(Bucket, "OldMetaEntries: {}", + counters.mOldMetaEntries); + CLOG_INFO(Bucket, "OldInitEntries: {}", + counters.mOldInitEntries); + CLOG_INFO(Bucket, "OldLiveEntries: {}", + counters.mOldLiveEntries); + CLOG_INFO(Bucket, "OldDeadEntries: {}", + counters.mOldDeadEntries); + CLOG_INFO(Bucket, "OldEntriesDefaultAccepted: {}", + counters.mOldEntriesDefaultAccepted); + CLOG_INFO(Bucket, "NewEntriesDefaultAccepted: {}", + counters.mNewEntriesDefaultAccepted); + CLOG_INFO(Bucket, "NewInitEntriesMergedWithOldDead: {}", + counters.mNewInitEntriesMergedWithOldDead); + CLOG_INFO(Bucket, "OldInitEntriesMergedWithNewLive: {}", + counters.mOldInitEntriesMergedWithNewLive); + CLOG_INFO(Bucket, "OldInitEntriesMergedWithNewDead: {}", + counters.mOldInitEntriesMergedWithNewDead); + CLOG_INFO(Bucket, "NewEntriesMergedWithOldNeitherInit: {}", + counters.mNewEntriesMergedWithOldNeitherInit); + CLOG_INFO(Bucket, "ShadowScanSteps: {}", + counters.mShadowScanSteps); + CLOG_INFO(Bucket, "MetaEntryShadowElisions: {}", + counters.mMetaEntryShadowElisions); + CLOG_INFO(Bucket, "LiveEntryShadowElisions: {}", + counters.mLiveEntryShadowElisions); + CLOG_INFO(Bucket, "InitEntryShadowElisions: {}", + counters.mInitEntryShadowElisions); + CLOG_INFO(Bucket, "DeadEntryShadowElisions: {}", + counters.mDeadEntryShadowElisions); + CLOG_INFO(Bucket, "OutputIteratorTombstoneElisions: {}", + counters.mOutputIteratorTombstoneElisions); + CLOG_INFO(Bucket, "OutputIteratorBufferUpdates: {}", + counters.mOutputIteratorBufferUpdates); + CLOG_INFO(Bucket, "OutputIteratorActualWrites: {}", + counters.mOutputIteratorActualWrites); + }; + + dumpCounters(label + " (live)", level, mLiveMergeCounters); + if (protocolVersionStartsFrom( + protocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + dumpCounters(label + " (hot)", level, mHotArchiveMergeCounters); + } } void checkSensiblePostInitEntryMergeCounters(uint32_t protocol) const { - CHECK(mMergeCounters.mPostInitEntryProtocolMerges != 0); + // Check live merge counters + CHECK(mLiveMergeCounters.mPostInitEntryProtocolMerges != 0); if (protocolVersionIsBefore( protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { - CHECK(mMergeCounters.mPostShadowRemovalProtocolMerges == 0); + CHECK(mLiveMergeCounters.mPostShadowRemovalProtocolMerges == 0); } else { - CHECK(mMergeCounters.mPostShadowRemovalProtocolMerges != 0); + CHECK(mLiveMergeCounters.mPostShadowRemovalProtocolMerges != 0); } - CHECK(mMergeCounters.mNewMetaEntries == 0); - CHECK(mMergeCounters.mNewInitEntries != 0); - CHECK(mMergeCounters.mNewLiveEntries != 0); - CHECK(mMergeCounters.mNewDeadEntries != 0); + CHECK(mLiveMergeCounters.mNewMetaEntries == 0); + CHECK(mLiveMergeCounters.mNewInitEntries != 0); + CHECK(mLiveMergeCounters.mNewLiveEntries != 0); + CHECK(mLiveMergeCounters.mNewDeadEntries != 0); - CHECK(mMergeCounters.mOldMetaEntries == 0); - CHECK(mMergeCounters.mOldInitEntries != 0); - CHECK(mMergeCounters.mOldLiveEntries != 0); - CHECK(mMergeCounters.mOldDeadEntries != 0); + CHECK(mLiveMergeCounters.mOldMetaEntries == 0); + CHECK(mLiveMergeCounters.mOldInitEntries != 0); + CHECK(mLiveMergeCounters.mOldLiveEntries != 0); + CHECK(mLiveMergeCounters.mOldDeadEntries != 0); - CHECK(mMergeCounters.mOldEntriesDefaultAccepted != 0); - CHECK(mMergeCounters.mNewEntriesDefaultAccepted != 0); - CHECK(mMergeCounters.mNewInitEntriesMergedWithOldDead != 0); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewLive != 0); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewDead != 0); - CHECK(mMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); + CHECK(mLiveMergeCounters.mOldEntriesDefaultAccepted != 0); + CHECK(mLiveMergeCounters.mNewEntriesDefaultAccepted != 0); + CHECK(mLiveMergeCounters.mNewInitEntriesMergedWithOldDead != 0); + CHECK(mLiveMergeCounters.mOldInitEntriesMergedWithNewLive != 0); + CHECK(mLiveMergeCounters.mOldInitEntriesMergedWithNewDead != 0); + CHECK(mLiveMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); if (protocolVersionIsBefore( protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { - CHECK(mMergeCounters.mShadowScanSteps != 0); - CHECK(mMergeCounters.mLiveEntryShadowElisions != 0); + CHECK(mLiveMergeCounters.mShadowScanSteps != 0); + CHECK(mLiveMergeCounters.mLiveEntryShadowElisions != 0); } else { - CHECK(mMergeCounters.mShadowScanSteps == 0); - CHECK(mMergeCounters.mLiveEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mShadowScanSteps == 0); + CHECK(mLiveMergeCounters.mLiveEntryShadowElisions == 0); } - CHECK(mMergeCounters.mMetaEntryShadowElisions == 0); - CHECK(mMergeCounters.mInitEntryShadowElisions == 0); - CHECK(mMergeCounters.mDeadEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mMetaEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mInitEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mDeadEntryShadowElisions == 0); - CHECK(mMergeCounters.mOutputIteratorBufferUpdates != 0); - CHECK(mMergeCounters.mOutputIteratorActualWrites != 0); - CHECK(mMergeCounters.mOutputIteratorBufferUpdates >= - mMergeCounters.mOutputIteratorActualWrites); + CHECK(mLiveMergeCounters.mOutputIteratorBufferUpdates != 0); + CHECK(mLiveMergeCounters.mOutputIteratorActualWrites != 0); + CHECK(mLiveMergeCounters.mOutputIteratorBufferUpdates >= + mLiveMergeCounters.mOutputIteratorActualWrites); + + // Check hot archive merge counters + if (protocolVersionStartsFrom( + protocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + CHECK(mHotArchiveMergeCounters.mPostInitEntryProtocolMerges == + 0); + CHECK( + mHotArchiveMergeCounters.mPostShadowRemovalProtocolMerges == + 0); + + CHECK(mHotArchiveMergeCounters.mNewMetaEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewInitEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewLiveEntries == 0); + CHECK(mHotArchiveMergeCounters.mNewDeadEntries == 0); + + CHECK(mHotArchiveMergeCounters.mOldMetaEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldInitEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldLiveEntries == 0); + CHECK(mHotArchiveMergeCounters.mOldDeadEntries == 0); + + CHECK(mHotArchiveMergeCounters.mOldEntriesDefaultAccepted != 0); + CHECK(mHotArchiveMergeCounters.mNewEntriesDefaultAccepted != 0); + CHECK( + mHotArchiveMergeCounters.mNewInitEntriesMergedWithOldDead == + 0); + CHECK( + mHotArchiveMergeCounters.mOldInitEntriesMergedWithNewLive == + 0); + CHECK( + mHotArchiveMergeCounters.mOldInitEntriesMergedWithNewDead == + 0); + CHECK(mHotArchiveMergeCounters + .mNewEntriesMergedWithOldNeitherInit == 0); + + CHECK(mHotArchiveMergeCounters.mShadowScanSteps == 0); + CHECK(mHotArchiveMergeCounters.mLiveEntryShadowElisions == 0); + + CHECK(mHotArchiveMergeCounters.mMetaEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mInitEntryShadowElisions == 0); + CHECK(mHotArchiveMergeCounters.mDeadEntryShadowElisions == 0); + + CHECK(mHotArchiveMergeCounters.mOutputIteratorBufferUpdates != + 0); + CHECK(mHotArchiveMergeCounters.mOutputIteratorActualWrites != + 0); + CHECK(mHotArchiveMergeCounters.mOutputIteratorBufferUpdates >= + mHotArchiveMergeCounters.mOutputIteratorActualWrites); + } + else + { + checkEmptyHotArchiveMetrics(); + } } void - checkSensiblePreInitEntryMergeCounters() const + checkSensiblePreInitEntryMergeCounters(uint32_t protocol) const { - CHECK(mMergeCounters.mPreInitEntryProtocolMerges != 0); - CHECK(mMergeCounters.mPreShadowRemovalProtocolMerges != 0); - - CHECK(mMergeCounters.mNewMetaEntries == 0); - CHECK(mMergeCounters.mNewInitEntries == 0); - CHECK(mMergeCounters.mNewLiveEntries != 0); - CHECK(mMergeCounters.mNewDeadEntries != 0); - - CHECK(mMergeCounters.mOldMetaEntries == 0); - CHECK(mMergeCounters.mOldInitEntries == 0); - CHECK(mMergeCounters.mOldLiveEntries != 0); - CHECK(mMergeCounters.mOldDeadEntries != 0); - - CHECK(mMergeCounters.mOldEntriesDefaultAccepted != 0); - CHECK(mMergeCounters.mNewEntriesDefaultAccepted != 0); - CHECK(mMergeCounters.mNewInitEntriesMergedWithOldDead == 0); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewLive == 0); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewDead == 0); - CHECK(mMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); - - CHECK(mMergeCounters.mShadowScanSteps != 0); - CHECK(mMergeCounters.mMetaEntryShadowElisions == 0); - CHECK(mMergeCounters.mLiveEntryShadowElisions != 0); - CHECK(mMergeCounters.mInitEntryShadowElisions == 0); - CHECK(mMergeCounters.mDeadEntryShadowElisions != 0); - - CHECK(mMergeCounters.mOutputIteratorBufferUpdates != 0); - CHECK(mMergeCounters.mOutputIteratorActualWrites != 0); - CHECK(mMergeCounters.mOutputIteratorBufferUpdates >= - mMergeCounters.mOutputIteratorActualWrites); + CHECK(mLiveMergeCounters.mPreInitEntryProtocolMerges != 0); + CHECK(mLiveMergeCounters.mPreShadowRemovalProtocolMerges != 0); + + CHECK(mLiveMergeCounters.mNewMetaEntries == 0); + CHECK(mLiveMergeCounters.mNewInitEntries == 0); + CHECK(mLiveMergeCounters.mNewLiveEntries != 0); + CHECK(mLiveMergeCounters.mNewDeadEntries != 0); + + CHECK(mLiveMergeCounters.mOldMetaEntries == 0); + CHECK(mLiveMergeCounters.mOldInitEntries == 0); + CHECK(mLiveMergeCounters.mOldLiveEntries != 0); + CHECK(mLiveMergeCounters.mOldDeadEntries != 0); + + CHECK(mLiveMergeCounters.mOldEntriesDefaultAccepted != 0); + CHECK(mLiveMergeCounters.mNewEntriesDefaultAccepted != 0); + CHECK(mLiveMergeCounters.mNewInitEntriesMergedWithOldDead == 0); + CHECK(mLiveMergeCounters.mOldInitEntriesMergedWithNewLive == 0); + CHECK(mLiveMergeCounters.mOldInitEntriesMergedWithNewDead == 0); + CHECK(mLiveMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); + + CHECK(mLiveMergeCounters.mShadowScanSteps != 0); + CHECK(mLiveMergeCounters.mMetaEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mLiveEntryShadowElisions != 0); + CHECK(mLiveMergeCounters.mInitEntryShadowElisions == 0); + CHECK(mLiveMergeCounters.mDeadEntryShadowElisions != 0); + + CHECK(mLiveMergeCounters.mOutputIteratorBufferUpdates != 0); + CHECK(mLiveMergeCounters.mOutputIteratorActualWrites != 0); + CHECK(mLiveMergeCounters.mOutputIteratorBufferUpdates >= + mLiveMergeCounters.mOutputIteratorActualWrites); } void checkEqualMergeCounters(Survey const& other) const { - CHECK(mMergeCounters.mPreInitEntryProtocolMerges == - other.mMergeCounters.mPreInitEntryProtocolMerges); - CHECK(mMergeCounters.mPostInitEntryProtocolMerges == - other.mMergeCounters.mPostInitEntryProtocolMerges); - - CHECK(mMergeCounters.mPreShadowRemovalProtocolMerges == - other.mMergeCounters.mPreShadowRemovalProtocolMerges); - CHECK(mMergeCounters.mPostShadowRemovalProtocolMerges == - other.mMergeCounters.mPostShadowRemovalProtocolMerges); - - CHECK(mMergeCounters.mRunningMergeReattachments == - other.mMergeCounters.mRunningMergeReattachments); - CHECK(mMergeCounters.mFinishedMergeReattachments == - other.mMergeCounters.mFinishedMergeReattachments); - - CHECK(mMergeCounters.mNewMetaEntries == - other.mMergeCounters.mNewMetaEntries); - CHECK(mMergeCounters.mNewInitEntries == - other.mMergeCounters.mNewInitEntries); - CHECK(mMergeCounters.mNewLiveEntries == - other.mMergeCounters.mNewLiveEntries); - CHECK(mMergeCounters.mNewDeadEntries == - other.mMergeCounters.mNewDeadEntries); - CHECK(mMergeCounters.mOldMetaEntries == - other.mMergeCounters.mOldMetaEntries); - CHECK(mMergeCounters.mOldInitEntries == - other.mMergeCounters.mOldInitEntries); - CHECK(mMergeCounters.mOldLiveEntries == - other.mMergeCounters.mOldLiveEntries); - CHECK(mMergeCounters.mOldDeadEntries == - other.mMergeCounters.mOldDeadEntries); - - CHECK(mMergeCounters.mOldEntriesDefaultAccepted == - other.mMergeCounters.mOldEntriesDefaultAccepted); - CHECK(mMergeCounters.mNewEntriesDefaultAccepted == - other.mMergeCounters.mNewEntriesDefaultAccepted); - CHECK(mMergeCounters.mNewInitEntriesMergedWithOldDead == - other.mMergeCounters.mNewInitEntriesMergedWithOldDead); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewLive == - other.mMergeCounters.mOldInitEntriesMergedWithNewLive); - CHECK(mMergeCounters.mOldInitEntriesMergedWithNewDead == - other.mMergeCounters.mOldInitEntriesMergedWithNewDead); - CHECK(mMergeCounters.mNewEntriesMergedWithOldNeitherInit == - other.mMergeCounters.mNewEntriesMergedWithOldNeitherInit); - - CHECK(mMergeCounters.mShadowScanSteps == - other.mMergeCounters.mShadowScanSteps); - CHECK(mMergeCounters.mMetaEntryShadowElisions == - other.mMergeCounters.mMetaEntryShadowElisions); - CHECK(mMergeCounters.mLiveEntryShadowElisions == - other.mMergeCounters.mLiveEntryShadowElisions); - CHECK(mMergeCounters.mInitEntryShadowElisions == - other.mMergeCounters.mInitEntryShadowElisions); - CHECK(mMergeCounters.mDeadEntryShadowElisions == - other.mMergeCounters.mDeadEntryShadowElisions); - - CHECK(mMergeCounters.mOutputIteratorTombstoneElisions == - other.mMergeCounters.mOutputIteratorTombstoneElisions); - CHECK(mMergeCounters.mOutputIteratorBufferUpdates == - other.mMergeCounters.mOutputIteratorBufferUpdates); - CHECK(mMergeCounters.mOutputIteratorActualWrites == - other.mMergeCounters.mOutputIteratorActualWrites); + auto checkCountersEqual = [](auto const& counters, + auto const& other) { + CHECK(counters.mPreInitEntryProtocolMerges == + other.mPreInitEntryProtocolMerges); + CHECK(counters.mPostInitEntryProtocolMerges == + other.mPostInitEntryProtocolMerges); + + CHECK(counters.mPreShadowRemovalProtocolMerges == + other.mPreShadowRemovalProtocolMerges); + CHECK(counters.mPostShadowRemovalProtocolMerges == + other.mPostShadowRemovalProtocolMerges); + + CHECK(counters.mRunningMergeReattachments == + other.mRunningMergeReattachments); + CHECK(counters.mFinishedMergeReattachments == + other.mFinishedMergeReattachments); + + CHECK(counters.mNewMetaEntries == other.mNewMetaEntries); + CHECK(counters.mNewInitEntries == other.mNewInitEntries); + CHECK(counters.mNewLiveEntries == other.mNewLiveEntries); + CHECK(counters.mNewDeadEntries == other.mNewDeadEntries); + CHECK(counters.mOldMetaEntries == other.mOldMetaEntries); + CHECK(counters.mOldInitEntries == other.mOldInitEntries); + CHECK(counters.mOldLiveEntries == other.mOldLiveEntries); + CHECK(counters.mOldDeadEntries == other.mOldDeadEntries); + + CHECK(counters.mOldEntriesDefaultAccepted == + other.mOldEntriesDefaultAccepted); + CHECK(counters.mNewEntriesDefaultAccepted == + other.mNewEntriesDefaultAccepted); + CHECK(counters.mNewInitEntriesMergedWithOldDead == + other.mNewInitEntriesMergedWithOldDead); + CHECK(counters.mOldInitEntriesMergedWithNewLive == + other.mOldInitEntriesMergedWithNewLive); + CHECK(counters.mOldInitEntriesMergedWithNewDead == + other.mOldInitEntriesMergedWithNewDead); + CHECK(counters.mNewEntriesMergedWithOldNeitherInit == + other.mNewEntriesMergedWithOldNeitherInit); + + CHECK(counters.mShadowScanSteps == other.mShadowScanSteps); + CHECK(counters.mMetaEntryShadowElisions == + other.mMetaEntryShadowElisions); + CHECK(counters.mLiveEntryShadowElisions == + other.mLiveEntryShadowElisions); + CHECK(counters.mInitEntryShadowElisions == + other.mInitEntryShadowElisions); + CHECK(counters.mDeadEntryShadowElisions == + other.mDeadEntryShadowElisions); + + CHECK(counters.mOutputIteratorTombstoneElisions == + other.mOutputIteratorTombstoneElisions); + CHECK(counters.mOutputIteratorBufferUpdates == + other.mOutputIteratorBufferUpdates); + CHECK(counters.mOutputIteratorActualWrites == + other.mOutputIteratorActualWrites); + }; + + checkCountersEqual(mLiveMergeCounters, other.mLiveMergeCounters); + checkCountersEqual(mHotArchiveMergeCounters, + other.mHotArchiveMergeCounters); } + void checkEqual(Survey const& other) const { @@ -907,17 +1188,28 @@ class StopAndRestartBucketMergesTest CHECK(mSnapBucketHash == other.mSnapBucketHash); CHECK(mBucketListHash == other.mBucketListHash); CHECK(mLedgerHeaderHash == other.mLedgerHeaderHash); + CHECK(mHotArchiveBucketListHash == other.mHotArchiveBucketListHash); checkEqualMergeCounters(other); } - Survey(Application& app, uint32_t level) + Survey(Application& app, uint32_t level, uint32_t protocol) { LedgerManager& lm = app.getLedgerManager(); BucketManager& bm = app.getBucketManager(); LiveBucketList& bl = bm.getLiveBucketList(); + HotArchiveBucketList& hotBl = bm.getHotArchiveBucketList(); // Complete those merges we're about to inspect. resolveAllMerges(bl); + if (protocolVersionStartsFrom( + protocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + resolveAllMerges(hotBl); + mHotArchiveBucketListHash = hotBl.getHash(); + mHotArchiveMergeCounters = + bm.readMergeCounters(); + } - mMergeCounters = bm.readMergeCounters(); + mLiveMergeCounters = bm.readMergeCounters(); mLedgerHeaderHash = lm.getLastClosedLedgerHeader().hash; mBucketListHash = bl.getHash(); BucketLevel& blv = bl.getLevel(level); @@ -931,13 +1223,20 @@ class StopAndRestartBucketMergesTest std::set mDesignatedLedgers; std::map mControlSurveys; std::map mFinalEntries; + std::map mFinalArchiveEntries; std::vector> mInitEntryBatches; std::vector> mLiveEntryBatches; std::vector> mDeadEntryBatches; + std::vector> mArchiveEntryBatches; + + // Initial entries in Hot Archive BucketList, a "genesis leger" equivalent + // for Hot Archive + std::vector mHotArchiveInitialBatch; void collectLedgerEntries(Application& app, - std::map& entries) + std::map& liveEntries, + std::map& archiveEntries) { auto bl = app.getBucketManager().getLiveBucketList(); for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i) @@ -951,12 +1250,41 @@ class StopAndRestartBucketMergesTest if (e.type() == LIVEENTRY || e.type() == INITENTRY) { auto le = e.liveEntry(); - entries[LedgerEntryKey(le)] = le; + liveEntries[LedgerEntryKey(le)] = le; } else { assert(e.type() == DEADENTRY); - entries.erase(e.deadEntry()); + liveEntries.erase(e.deadEntry()); + } + } + } + } + + if (protocolVersionStartsFrom( + getAppLedgerVersion(app), + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + HotArchiveBucketList& hotBl = + app.getBucketManager().getHotArchiveBucketList(); + for (uint32_t i = HotArchiveBucketList::kNumLevels; i > 0; --i) + { + BucketLevel const& level = + hotBl.getLevel(i - 1); + for (auto bucket : {level.getSnap(), level.getCurr()}) + { + for (HotArchiveBucketInputIterator bi(bucket); bi; ++bi) + { + auto const& e = *bi; + if (e.type() == HOT_ARCHIVE_LIVE) + { + archiveEntries.erase(e.key()); + } + else + { + archiveEntries[LedgerEntryKey(e.archivedEntry())] = + e.archivedEntry(); + } } } } @@ -966,23 +1294,33 @@ class StopAndRestartBucketMergesTest void collectFinalLedgerEntries(Application& app) { - collectLedgerEntries(app, mFinalEntries); - CLOG_INFO(Bucket, "Collected final ledger state with {} entries.", - mFinalEntries.size()); + collectLedgerEntries(app, mFinalEntries, mFinalArchiveEntries); + CLOG_INFO(Bucket, + "Collected final ledger live state with {} entries, archived " + "state with {} entries", + mFinalEntries.size(), mFinalArchiveEntries.size()); } void checkAgainstFinalLedgerEntries(Application& app) { std::map testEntries; - collectLedgerEntries(app, testEntries); - CLOG_INFO(Bucket, "Collected test ledger state with {} entries.", - testEntries.size()); + std::map testArchiveEntries; + collectLedgerEntries(app, testEntries, testArchiveEntries); + CLOG_INFO(Bucket, + "Collected test ledger state with {} live entries, {} " + "archived entries", + testEntries.size(), testArchiveEntries.size()); CHECK(testEntries.size() == mFinalEntries.size()); + CHECK(testArchiveEntries.size() == mFinalArchiveEntries.size()); for (auto const& pair : testEntries) { CHECK(mFinalEntries[pair.first] == pair.second); } + for (auto const& pair : testArchiveEntries) + { + CHECK(mFinalArchiveEntries[pair.first] == pair.second); + } } void @@ -1066,10 +1404,37 @@ class StopAndRestartBucketMergesTest "Collecting control surveys in ledger range 2..{} = {:#x}", finalLedger, finalLedger); auto app = createTestApplication(clock, cfg); + auto hasHotArchive = protocolVersionStartsFrom( + mProtocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); std::vector allKeys; std::map currLive; std::map currDead; + std::map currArchive; + + // To prevent duplicate merges that can interfere with counters, seed + // the starting Bucket so that each merge is unique. Otherwise, the + // first call to addBatch will merge [{first_batch}, empty_bucket]. We + // will then see other instances of [{first_batch}, empty_bucket] merges + // later on as the Bucket moves its way down the bl. By providing a + // seeded bucket, the first addBatch is a [{first_batch}, seeded_bucket] + // merge, which will not be duplicated by empty bucket merges later. The + // live BL is automatically seeded with the genesis ledger. + if (hasHotArchive) + { + UnorderedSet empty; + mHotArchiveInitialBatch = + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, 10, empty); + app->getBucketManager() + .getHotArchiveBucketList() + .getLevel(0) + .setCurr(HotArchiveBucket::fresh( + app->getBucketManager(), mProtocol, {}, + mHotArchiveInitialBatch, {}, {}, + app->getClock().getIOContext(), /*doFsync=*/true)); + } for (uint32_t i = 2; !app->getClock().getIOContext().stopped() && i < finalLedger; ++i) @@ -1078,6 +1443,7 @@ class StopAndRestartBucketMergesTest std::vector initEntries; std::vector liveEntries; std::vector deadEntries; + std::vector archiveEntries; if (mInitEntryBatches.size() > 2) { std::set changedEntries; @@ -1143,6 +1509,22 @@ class StopAndRestartBucketMergesTest allKeys.emplace_back(k); currLive.emplace(std::make_pair(k, e)); } + auto newRandomArchive = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_CODE}, nEntriesInBatch); + for (auto const& e : newRandomArchive) + { + auto k = LedgerEntryKey(e); + auto [iter, inserted] = + currArchive.emplace(std::make_pair(k, e)); + + // only insert new entries to Archive BucketList + if (inserted) + { + archiveEntries.emplace_back(e); + } + } + mInitEntryBatches.emplace_back(initEntries); mLiveEntryBatches.emplace_back(liveEntries); mDeadEntryBatches.emplace_back(deadEntries); @@ -1150,13 +1532,20 @@ class StopAndRestartBucketMergesTest lm.setNextLedgerEntryBatchForBucketTesting( mInitEntryBatches.back(), mLiveEntryBatches.back(), mDeadEntryBatches.back()); + if (hasHotArchive) + { + mArchiveEntryBatches.emplace_back(archiveEntries); + lm.setNextArchiveBatchForBucketTesting( + mArchiveEntryBatches.back(), {}, {}); + } + closeLedger(*app); assert(i == lm.getLastClosedLedgerHeader().header.ledgerSeq); if (shouldSurveyLedger(i)) { CLOG_INFO(Bucket, "Taking survey at {} = {:#x}", i, i); - mControlSurveys.insert( - std::make_pair(i, Survey(*app, mDesignatedLevel))); + mControlSurveys.insert(std::make_pair( + i, Survey(*app, mDesignatedLevel, mProtocol))); } } @@ -1189,6 +1578,20 @@ class StopAndRestartBucketMergesTest CLOG_INFO(Bucket, "Running stop/restart test in ledger range 2..{} = {:#x}", finalLedger, finalLedger2); + + if (protocolVersionStartsFrom( + firstProtocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + app->getBucketManager() + .getHotArchiveBucketList() + .getLevel(0) + .setCurr(HotArchiveBucket::fresh( + app->getBucketManager(), mProtocol, {}, + mHotArchiveInitialBatch, {}, {}, + app->getClock().getIOContext(), /*doFsync=*/true)); + } + for (uint32_t i = 2; !app->getClock().getIOContext().stopped() && i < finalLedger; ++i) { @@ -1197,9 +1600,21 @@ class StopAndRestartBucketMergesTest mInitEntryBatches[i - 2], mLiveEntryBatches[i - 2], mDeadEntryBatches[i - 2]); resolveAllMerges(app->getBucketManager().getLiveBucketList()); - auto countersBeforeClose = - app->getBucketManager().readMergeCounters(); + if (protocolVersionStartsFrom( + firstProtocol, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + lm.setNextArchiveBatchForBucketTesting( + mArchiveEntryBatches[i - 2], {}, {}); + resolveAllMerges( + app->getBucketManager().getHotArchiveBucketList()); + } + + auto liveCountersBeforeClose = + app->getBucketManager().readMergeCounters(); + auto archiveCountersBeforeClose = + app->getBucketManager().readMergeCounters(); if (firstProtocol != secondProtocol && i == protocolSwitchLedger) { CLOG_INFO(Bucket, @@ -1234,12 +1649,25 @@ class StopAndRestartBucketMergesTest BucketLevel& blv = bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); + if (protocolVersionStartsFrom( + currProtocol, + LiveBucket:: + FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + HotArchiveBucketList& hotBl = + app->getBucketManager().getHotArchiveBucketList(); + BucketLevel& hotBlv = + hotBl.getLevel(mDesignatedLevel); + REQUIRE(hotBlv.getNext().isMerging()); + } } if (currProtocol == firstProtocol) { + resolveAllMerges( + app->getBucketManager().getHotArchiveBucketList()); // Check that the survey matches expectations. - Survey s(*app, mDesignatedLevel); + Survey s(*app, mDesignatedLevel, currProtocol); s.checkEqual(j->second); } @@ -1268,17 +1696,31 @@ class StopAndRestartBucketMergesTest BucketLevel& blv = bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); + if (protocolVersionStartsFrom( + currProtocol, + LiveBucket:: + FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + HotArchiveBucketList& hotBl = + app->getBucketManager().getHotArchiveBucketList(); + BucketLevel& hotBlv = + hotBl.getLevel(mDesignatedLevel); + REQUIRE(hotBlv.getNext().isMerging()); + } } // If there are restarted merges, we need to reset the counters // to the values they had _before_ the ledger-close so the // restarted merges don't count twice. - app->getBucketManager().incrMergeCounters(countersBeforeClose); + app->getBucketManager().incrMergeCounters( + liveCountersBeforeClose); + app->getBucketManager().incrMergeCounters( + archiveCountersBeforeClose); if (currProtocol == firstProtocol) { // Re-check that the survey matches expectations. - Survey s2(*app, mDesignatedLevel); + Survey s2(*app, mDesignatedLevel, currProtocol); s2.checkEqual(j->second); } } @@ -1303,16 +1745,16 @@ class StopAndRestartBucketMergesTest LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { mControlSurveys.rbegin()->second.dumpMergeCounters( - "control, Post-INITENTRY", mDesignatedLevel); + "control, Post-INITENTRY", mDesignatedLevel, mProtocol); mControlSurveys.rbegin() ->second.checkSensiblePostInitEntryMergeCounters(mProtocol); } else { mControlSurveys.rbegin()->second.dumpMergeCounters( - "control, Pre-INITENTRY", mDesignatedLevel); + "control, Pre-INITENTRY", mDesignatedLevel, mProtocol); mControlSurveys.rbegin() - ->second.checkSensiblePreInitEntryMergeCounters(); + ->second.checkSensiblePreInitEntryMergeCounters(mProtocol); } runStopAndRestartTest(mProtocol, mProtocol); runStopAndRestartTest(mProtocol, mProtocol + 1); @@ -1328,7 +1770,13 @@ TEST_CASE("bucket persistence over app restart with initentry", 1, static_cast( LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + , + static_cast( + HotArchiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION) +#endif + }) { for (uint32_t level : {2, 3}) { @@ -1348,7 +1796,13 @@ TEST_CASE("bucket persistence over app restart with initentry - extended", 1, static_cast( LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + , + static_cast( + HotArchiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION) +#endif + }) { for (uint32_t level : {2, 3, 4, 5}) { diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp index c5b79d9f04..bf2d4e1304 100644 --- a/src/bucket/test/BucketTestUtils.cpp +++ b/src/bucket/test/BucketTestUtils.cpp @@ -84,7 +84,13 @@ for_versions_with_differing_bucket_logic( 1, static_cast( LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}, + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + , + static_cast( + LiveBucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION) +#endif + }, cfg, f); } @@ -251,9 +257,16 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( restoredKeys.emplace_back(key); } } + mTestRestoredEntries.insert(mTestRestoredEntries.end(), + restoredKeys.begin(), + restoredKeys.end()); + mTestArchiveEntries.insert( + mTestArchiveEntries.end(), + evictedState.archivedEntries.begin(), + evictedState.archivedEntries.end()); mApp.getBucketManager().addHotArchiveBatch( - mApp, lh, evictedState.archivedEntries, restoredKeys, - {}); + mApp, lh, mTestArchiveEntries, mTestRestoredEntries, + mTestDeletedEntries); } if (ledgerCloseMeta) @@ -284,7 +297,14 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( // Use the testing values. mApp.getBucketManager().addLiveBatch( mApp, lh, mTestInitEntries, mTestLiveEntries, mTestDeadEntries); + mUseTestEntries = false; + mTestInitEntries.clear(); + mTestLiveEntries.clear(); + mTestDeadEntries.clear(); + mTestArchiveEntries.clear(); + mTestRestoredEntries.clear(); + mTestDeletedEntries.clear(); } else { diff --git a/src/bucket/test/BucketTestUtils.h b/src/bucket/test/BucketTestUtils.h index 62aa7265b5..3839344a31 100644 --- a/src/bucket/test/BucketTestUtils.h +++ b/src/bucket/test/BucketTestUtils.h @@ -66,6 +66,10 @@ class LedgerManagerForBucketTests : public LedgerManagerImpl std::vector mTestLiveEntries; std::vector mTestDeadEntries; + std::vector mTestArchiveEntries; + std::vector mTestRestoredEntries; + std::vector mTestDeletedEntries; + protected: void transferLedgerEntriesToBucketList( AbstractLedgerTxn& ltx, @@ -85,6 +89,18 @@ class LedgerManagerForBucketTests : public LedgerManagerImpl mTestDeadEntries = deadEntries; } + void + setNextArchiveBatchForBucketTesting( + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) + { + mUseTestEntries = true; + mTestArchiveEntries = archiveEntries; + mTestRestoredEntries = restoredEntries; + mTestDeletedEntries = deletedEntries; + } + LedgerManagerForBucketTests(Application& app) : LedgerManagerImpl(app) { } diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index 30769476ca..97f447bab5 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -190,11 +190,15 @@ ApplyBucketsWork::doWork() { ZoneScoped; - // Step 1: index buckets. Step 2: apply buckets. Step 3: assume state + // Step 1: index live buckets. Step 2: apply buckets. Step 3: assume state if (!mIndexBucketsWork) { - // Spawn indexing work for the first time - mIndexBucketsWork = addWork(mBucketsToApply); + // Spawn indexing work for the first time. Hot Archive buckets aren't + // needed for apply (since we only store live state in SQL tables), so + // for now only index the live BL. AssumeStateWork will take care of + // index hot archive buckets later. + mIndexBucketsWork = + addWork>(mBucketsToApply); return State::WORK_RUNNING; } diff --git a/src/catchup/ApplyBucketsWork.h b/src/catchup/ApplyBucketsWork.h index e2eab2c518..a5d210157c 100644 --- a/src/catchup/ApplyBucketsWork.h +++ b/src/catchup/ApplyBucketsWork.h @@ -14,7 +14,8 @@ namespace stellar class AssumeStateWork; class LiveBucketList; class Bucket; -class IndexBucketsWork; +template class IndexBucketsWork; +class LiveBucket; struct HistoryArchiveState; struct LedgerHeaderHistoryEntry; @@ -25,7 +26,7 @@ class ApplyBucketsWork : public Work bool mSpawnedAssumeStateWork{false}; std::shared_ptr mAssumeStateWork{}; - std::shared_ptr mIndexBucketsWork{}; + std::shared_ptr> mIndexBucketsWork{}; size_t mTotalBuckets{0}; size_t mAppliedBuckets{0}; size_t mAppliedEntries{0}; diff --git a/src/catchup/AssumeStateWork.cpp b/src/catchup/AssumeStateWork.cpp index 9460d0fb03..2b63f8635e 100644 --- a/src/catchup/AssumeStateWork.cpp +++ b/src/catchup/AssumeStateWork.cpp @@ -26,34 +26,52 @@ AssumeStateWork::AssumeStateWork(Application& app, // Maintain reference to all Buckets in HAS to avoid garbage collection, // including future buckets that have already finished merging auto& bm = mApp.getBucketManager(); - for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) - { - auto curr = bm.getBucketByHash( - hexToBin256(mHas.currentBuckets.at(i).curr)); - auto snap = bm.getBucketByHash( - hexToBin256(mHas.currentBuckets.at(i).snap)); - if (!(curr && snap)) + auto processBuckets = [&](auto const& hasBuckets, size_t expectedLevels, + auto& workBuckets) { + releaseAssert(hasBuckets.size() == expectedLevels); + using BucketT = typename std::decay_t< + decltype(hasBuckets)>::value_type::bucket_type; + for (uint32_t i = 0; i < expectedLevels; ++i) { - throw std::runtime_error("Missing bucket files while " - "assuming saved BucketList state"); - } - - mBuckets.emplace_back(curr); - mBuckets.emplace_back(snap); - auto& nextFuture = mHas.currentBuckets.at(i).next; - if (nextFuture.hasOutputHash()) - { - auto nextBucket = bm.getBucketByHash( - hexToBin256(nextFuture.getOutputHash())); - if (!nextBucket) + auto curr = + bm.getBucketByHash(hexToBin256(hasBuckets.at(i).curr)); + auto snap = + bm.getBucketByHash(hexToBin256(hasBuckets.at(i).snap)); + if (!(curr && snap)) { - throw std::runtime_error("Missing future bucket files while " + throw std::runtime_error("Missing bucket files while " "assuming saved BucketList state"); } - mBuckets.emplace_back(nextBucket); + workBuckets.emplace_back(curr); + workBuckets.emplace_back(snap); + auto& nextFuture = hasBuckets.at(i).next; + if (nextFuture.hasOutputHash()) + { + auto nextBucket = bm.getBucketByHash( + hexToBin256(nextFuture.getOutputHash())); + if (!nextBucket) + { + throw std::runtime_error( + "Missing future bucket files while " + "assuming saved BucketList state"); + } + + workBuckets.emplace_back(nextBucket); + } } + }; + + processBuckets(mHas.currentBuckets, LiveBucketList::kNumLevels, + mLiveBuckets); + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + if (has.hasHotArchiveBuckets()) + { + processBuckets(mHas.hotArchiveBuckets, HotArchiveBucketList::kNumLevels, + mHotArchiveBuckets); } +#endif } BasicWork::State @@ -64,19 +82,27 @@ AssumeStateWork::doWork() std::vector> seq; // Index Bucket files - seq.push_back(std::make_shared(mApp, mBuckets)); + seq.push_back( + std::make_shared>(mApp, mLiveBuckets)); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + seq.push_back(std::make_shared>( + mApp, mHotArchiveBuckets)); +#endif // Add bucket files to BucketList and restart merges auto assumeStateCB = [&has = mHas, maxProtocolVersion = mMaxProtocolVersion, restartMerges = mRestartMerges, - &buckets = mBuckets](Application& app) { + &liveBuckets = mLiveBuckets, + &hotArchiveBuckets = + mHotArchiveBuckets](Application& app) { app.getBucketManager().assumeState(has, maxProtocolVersion, restartMerges); // Drop bucket references once assume state complete since buckets // now referenced by BucketList - buckets.clear(); + liveBuckets.clear(); + hotArchiveBuckets.clear(); // Check invariants after state has been assumed app.getInvariantManager().checkAfterAssumeState(has.currentLedger); diff --git a/src/catchup/AssumeStateWork.h b/src/catchup/AssumeStateWork.h index 92dc4b903c..88cfcf9299 100644 --- a/src/catchup/AssumeStateWork.h +++ b/src/catchup/AssumeStateWork.h @@ -12,6 +12,7 @@ namespace stellar class Bucket; struct HistoryArchiveState; class LiveBucket; +class HotArchiveBucket; class AssumeStateWork : public Work { @@ -22,7 +23,8 @@ class AssumeStateWork : public Work // Keep strong reference to buckets in HAS so they are not garbage // collected during indexing - std::vector> mBuckets{}; + std::vector> mLiveBuckets{}; + std::vector> mHotArchiveBuckets{}; public: AssumeStateWork(Application& app, HistoryArchiveState const& has, diff --git a/src/catchup/CatchupWork.cpp b/src/catchup/CatchupWork.cpp index 0902a044a9..e3acc0a94c 100644 --- a/src/catchup/CatchupWork.cpp +++ b/src/catchup/CatchupWork.cpp @@ -75,7 +75,6 @@ setHerderStateTo(FileTransferInfo const& ft, uint32_t ledger, Application& app) CatchupWork::CatchupWork(Application& app, CatchupConfiguration catchupConfiguration, - std::set> bucketsToRetain, std::shared_ptr archive) : Work(app, "catchup", BasicWork::RETRY_NEVER) , mLocalState{app.getLedgerManager().getLastClosedLedgerHAS()} @@ -83,7 +82,6 @@ CatchupWork::CatchupWork(Application& app, mApp.getTmpDirManager().tmpDir(getName()))} , mCatchupConfiguration{catchupConfiguration} , mArchive{archive} - , mRetainedBuckets{bucketsToRetain} { if (mArchive) { @@ -126,7 +124,8 @@ CatchupWork::doReset() ZoneScoped; mBucketsAppliedEmitted = false; mTransactionsVerifyEmitted = false; - mBuckets.clear(); + mLiveBuckets.clear(); + mHotBuckets.clear(); mDownloadVerifyLedgersSeq.reset(); mBucketVerifyApplySeq.reset(); mTransactionsVerifyApplySeq.reset(); @@ -143,7 +142,6 @@ CatchupWork::doReset() mCurrentWork.reset(); mHAS.reset(); mBucketHAS.reset(); - mRetainedBuckets.clear(); } void @@ -216,10 +214,10 @@ CatchupWork::downloadApplyBuckets() // Download buckets, or skip if catchup is local if (!mCatchupConfiguration.localBucketsOnly()) { - std::vector hashes = - mBucketHAS->differingBuckets(mLocalState); + auto hashes = mBucketHAS->differingBuckets(mLocalState); auto getBuckets = std::make_shared( - mApp, mBuckets, hashes, *mDownloadDir, mArchive); + mApp, mLiveBuckets, mHotBuckets, hashes.live, hashes.hot, + *mDownloadDir, mArchive); seq.push_back(getBuckets); auto verifyHASCallback = [has = *mBucketHAS](Application& app) { @@ -237,7 +235,7 @@ CatchupWork::downloadApplyBuckets() } auto applyBuckets = std::make_shared( - mApp, mBuckets, *mBucketHAS, version); + mApp, mLiveBuckets, *mBucketHAS, version); seq.push_back(applyBuckets); return std::make_shared(mApp, "download-verify-apply-buckets", seq, RETRY_NEVER); @@ -497,7 +495,8 @@ CatchupWork::runCatchupStep() mVerifiedLedgerRangeStart, !mCatchupConfiguration.localBucketsOnly()); mBucketsAppliedEmitted = true; - mBuckets.clear(); + mLiveBuckets.clear(); + mHotBuckets.clear(); mLastApplied = mApp.getLedgerManager().getLastClosedLedgerHeader(); diff --git a/src/catchup/CatchupWork.h b/src/catchup/CatchupWork.h index 45c68a62ca..34c4330da4 100644 --- a/src/catchup/CatchupWork.h +++ b/src/catchup/CatchupWork.h @@ -47,7 +47,8 @@ class CatchupWork : public Work protected: HistoryArchiveState mLocalState; std::unique_ptr mDownloadDir; - std::map> mBuckets; + std::map> mLiveBuckets; + std::map> mHotBuckets; void doReset() override; BasicWork::State doWork() override; @@ -65,7 +66,6 @@ class CatchupWork : public Work static uint32_t const PUBLISH_QUEUE_MAX_SIZE; CatchupWork(Application& app, CatchupConfiguration catchupConfiguration, - std::set> bucketsToRetain, std::shared_ptr archive = nullptr); virtual ~CatchupWork(); std::string getStatus() const override; @@ -128,6 +128,5 @@ class CatchupWork : public Work std::optional mHAS; std::optional mBucketHAS; - std::set> mRetainedBuckets; }; } diff --git a/src/catchup/IndexBucketsWork.cpp b/src/catchup/IndexBucketsWork.cpp index 49b7a29fe4..fde6955eb7 100644 --- a/src/catchup/IndexBucketsWork.cpp +++ b/src/catchup/IndexBucketsWork.cpp @@ -5,6 +5,7 @@ #include "IndexBucketsWork.h" #include "bucket/BucketIndex.h" #include "bucket/BucketManager.h" +#include "bucket/HotArchiveBucket.h" #include "bucket/LiveBucket.h" #include "util/Fs.h" #include "util/Logging.h" @@ -13,14 +14,16 @@ namespace stellar { -IndexBucketsWork::IndexWork::IndexWork(Application& app, - std::shared_ptr b) +template +IndexBucketsWork::IndexWork::IndexWork(Application& app, + std::shared_ptr b) : BasicWork(app, "index-work", BasicWork::RETRY_NEVER), mBucket(b) { } +template BasicWork::State -IndexBucketsWork::IndexWork::onRun() +IndexBucketsWork::IndexWork::onRun() { if (mState == State::WORK_WAITING) { @@ -30,20 +33,23 @@ IndexBucketsWork::IndexWork::onRun() return mState; } +template bool -IndexBucketsWork::IndexWork::onAbort() +IndexBucketsWork::IndexWork::onAbort() { return true; }; +template void -IndexBucketsWork::IndexWork::onReset() +IndexBucketsWork::IndexWork::onReset() { mState = BasicWork::State::WORK_WAITING; } +template void -IndexBucketsWork::IndexWork::postWork() +IndexBucketsWork::IndexWork::postWork() { Application& app = this->mApp; asio::io_context& ctx = app.getWorkerIOContext(); @@ -85,8 +91,7 @@ IndexBucketsWork::IndexWork::postWork() if (!self->mIndex) { - // TODO: Fix this when archive BucketLists assume state - self->mIndex = BucketIndex::createIndex( + self->mIndex = BucketIndex::createIndex( bm, self->mBucket->getFilename(), self->mBucket->getHash(), ctx); } @@ -117,14 +122,16 @@ IndexBucketsWork::IndexWork::postWork() "IndexWork: starting in background"); } -IndexBucketsWork::IndexBucketsWork( - Application& app, std::vector> const& buckets) +template +IndexBucketsWork::IndexBucketsWork( + Application& app, std::vector> const& buckets) : Work(app, "index-bucketList", BasicWork::RETRY_NEVER), mBuckets(buckets) { } +template BasicWork::State -IndexBucketsWork::doWork() +IndexBucketsWork::doWork() { if (!mWorkSpawned) { @@ -134,17 +141,19 @@ IndexBucketsWork::doWork() return checkChildrenStatus(); } +template void -IndexBucketsWork::doReset() +IndexBucketsWork::doReset() { mWorkSpawned = false; } +template void -IndexBucketsWork::spawnWork() +IndexBucketsWork::spawnWork() { UnorderedSet indexedBuckets; - auto spawnIndexWork = [&](std::shared_ptr const& b) { + auto spawnIndexWork = [&](auto const& b) { // Don't index empty bucket or buckets that are already being // indexed. Sometimes one level's snap bucket may be another // level's future bucket. The indexing job may have started but @@ -167,4 +176,7 @@ IndexBucketsWork::spawnWork() mWorkSpawned = true; } -} \ No newline at end of file + +template class IndexBucketsWork; +template class IndexBucketsWork; +} diff --git a/src/catchup/IndexBucketsWork.h b/src/catchup/IndexBucketsWork.h index 08415387ee..749fbf139f 100644 --- a/src/catchup/IndexBucketsWork.h +++ b/src/catchup/IndexBucketsWork.h @@ -14,20 +14,19 @@ namespace stellar class Bucket; class BucketIndex; class BucketManager; -class LiveBucket; -class IndexBucketsWork : public Work +template class IndexBucketsWork : public Work { class IndexWork : public BasicWork { - std::shared_ptr mBucket; + std::shared_ptr mBucket; std::unique_ptr mIndex; BasicWork::State mState{BasicWork::State::WORK_WAITING}; void postWork(); public: - IndexWork(Application& app, std::shared_ptr b); + IndexWork(Application& app, std::shared_ptr b); protected: State onRun() override; @@ -35,14 +34,14 @@ class IndexBucketsWork : public Work void onReset() override; }; - std::vector> const& mBuckets; + std::vector> const& mBuckets; bool mWorkSpawned{false}; void spawnWork(); public: IndexBucketsWork(Application& app, - std::vector> const& buckets); + std::vector> const& buckets); protected: State doWork() override; diff --git a/src/catchup/LedgerApplyManager.h b/src/catchup/LedgerApplyManager.h index 9a3987eee5..959cf2e07e 100644 --- a/src/catchup/LedgerApplyManager.h +++ b/src/catchup/LedgerApplyManager.h @@ -68,10 +68,8 @@ class LedgerApplyManager // LedgerManager detects it is desynchronized from SCP's consensus ledger. // This method is present in the public interface to permit testing and // offline catchups. - virtual void - startCatchup(CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) = 0; + virtual void startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) = 0; // Return status of catchup for or empty string, if no catchup in progress virtual std::string getStatus() const = 0; diff --git a/src/catchup/LedgerApplyManagerImpl.cpp b/src/catchup/LedgerApplyManagerImpl.cpp index b24369a64e..98d142d313 100644 --- a/src/catchup/LedgerApplyManagerImpl.cpp +++ b/src/catchup/LedgerApplyManagerImpl.cpp @@ -266,9 +266,8 @@ LedgerApplyManagerImpl::processLedger(LedgerCloseData const& ledgerData, } void -LedgerApplyManagerImpl::startCatchup( - CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) +LedgerApplyManagerImpl::startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) { ZoneScoped; releaseAssert(threadIsMain()); @@ -293,7 +292,7 @@ LedgerApplyManagerImpl::startCatchup( // NB: if WorkScheduler is aborting this returns nullptr, // which means we don't "really" start catchup. mCatchupWork = mApp.getWorkScheduler().scheduleWork( - configuration, bucketsToRetain, archive); + configuration, archive); } std::string @@ -445,7 +444,7 @@ LedgerApplyManagerImpl::startOnlineCatchup() auto hash = std::make_optional(lcd.getTxSet()->previousLedgerHash()); startCatchup({LedgerNumHashPair(firstBufferedLedgerSeq - 1, hash), getCatchupCount(), CatchupConfiguration::Mode::ONLINE}, - nullptr, {}); + nullptr); } void diff --git a/src/catchup/LedgerApplyManagerImpl.h b/src/catchup/LedgerApplyManagerImpl.h index 8e140d27f4..6bc4bbb6f0 100644 --- a/src/catchup/LedgerApplyManagerImpl.h +++ b/src/catchup/LedgerApplyManagerImpl.h @@ -87,10 +87,8 @@ class LedgerApplyManagerImpl : public LedgerApplyManager ProcessLedgerResult processLedger(LedgerCloseData const& ledgerData, bool isLatestSlot) override; - void startCatchup( - CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) override; + void startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) override; std::string getStatus() const override; diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp index c8f17f20b8..dd239bbd67 100644 --- a/src/herder/test/UpgradesTests.cpp +++ b/src/herder/test/UpgradesTests.cpp @@ -2070,7 +2070,7 @@ TEST_CASE("upgrade to version 11", "[upgrades]") std::this_thread::sleep_for(std::chrono::milliseconds(10)); bl.resolveAnyReadyFutures(); } - auto mc = bm.readMergeCounters(); + auto mc = bm.readMergeCounters(); CLOG_INFO(Bucket, "Ledger {} did {} old-protocol merges, {} new-protocol " @@ -2193,7 +2193,7 @@ TEST_CASE("upgrade to version 12", "[upgrades]") std::this_thread::sleep_for(std::chrono::milliseconds(10)); bl.resolveAnyReadyFutures(); } - auto mc = bm.readMergeCounters(); + auto mc = bm.readMergeCounters(); if (ledgerSeq < 5) { diff --git a/src/history/FileTransferInfo.h b/src/history/FileTransferInfo.h index 348d47de89..bc4dfaf482 100644 --- a/src/history/FileTransferInfo.h +++ b/src/history/FileTransferInfo.h @@ -37,11 +37,13 @@ class FileTransferInfo std::string getLocalDir(TmpDir const& localRoot) const; public: - FileTransferInfo(LiveBucket const& bucket) + template + FileTransferInfo(BucketT const& bucket) : mType(FileType::HISTORY_FILE_TYPE_BUCKET) , mHexDigits(binToHex(bucket.getHash())) , mLocalPath(bucket.getFilename().string()) { + BUCKET_TYPE_ASSERT(BucketT); } FileTransferInfo(TmpDir const& snapDir, FileType const& snapType, diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp index 4afed7db0e..1cface7815 100644 --- a/src/history/HistoryArchive.cpp +++ b/src/history/HistoryArchive.cpp @@ -241,24 +241,31 @@ HistoryArchiveState::getBucketListHash() const // relatively-different representations. Everything will explode if there is // any difference in these algorithms anyways, so.. - SHA256 totalHash; - auto hashBuckets = [&totalHash](auto const& buckets) { + auto hashBuckets = [](auto const& buckets) { + SHA256 hash; for (auto const& level : buckets) { SHA256 levelHash; levelHash.add(hexToBin(level.curr)); levelHash.add(hexToBin(level.snap)); - totalHash.add(levelHash.finish()); + hash.add(levelHash.finish()); } + + return hash.finish(); }; - hashBuckets(currentBuckets); - hashBuckets(hotArchiveBuckets); + if (hasHotArchiveBuckets()) + { + SHA256 hash; + hash.add(hashBuckets(currentBuckets)); + hash.add(hashBuckets(hotArchiveBuckets)); + return hash.finish(); + } - return totalHash.finish(); + return hashBuckets(currentBuckets); } -std::vector +HistoryArchiveState::BucketHashReturnT HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const { ZoneScoped; @@ -266,9 +273,9 @@ HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const std::set inhibit; uint256 zero; inhibit.insert(binToHex(zero)); - std::vector ret; - auto processBuckets = [&inhibit, &ret](auto const& buckets, - auto const& otherBuckets) { + auto processBuckets = [&inhibit](auto const& buckets, + auto const& otherBuckets) { + std::vector ret; for (auto b : otherBuckets) { inhibit.insert(b.curr); @@ -302,12 +309,12 @@ HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const } } } + return ret; }; - processBuckets(currentBuckets, other.currentBuckets); - processBuckets(hotArchiveBuckets, other.hotArchiveBuckets); - - return ret; + auto liveHashes = processBuckets(currentBuckets, other.currentBuckets); + auto hotHashes = processBuckets(hotArchiveBuckets, other.hotArchiveBuckets); + return BucketHashReturnT(std::move(liveHashes), std::move(hotHashes)); } std::vector @@ -335,47 +342,48 @@ HistoryArchiveState::containsValidBuckets(Application& app) const { ZoneScoped; // This function assumes presence of required buckets to verify state - uint32_t minBucketVersion = 0; - bool nonEmptySeen = false; - auto validateBucketVersion = [&](uint32_t bucketVersion) { - if (bucketVersion < minBucketVersion) - { - CLOG_ERROR(History, - "Incompatible bucket versions: expected version " - "{} or higher, got {}", - minBucketVersion, bucketVersion); - return false; - } - minBucketVersion = bucketVersion; - return true; - }; + auto validateBucketList = [&](auto const& buckets, + uint32_t expectedLevels) { + // Get Bucket version and set nonEmptySeen + bool nonEmptySeen = false; + auto getVersionAndCheckEmpty = [&](auto const& bucket) { + int32_t version = 0; + releaseAssert(bucket); + if (!bucket->isEmpty()) + { + version = bucket->getBucketVersion(); + if (!nonEmptySeen) + { + nonEmptySeen = true; + } + } + return version; + }; - // Process bucket, return version - auto processBucket = [&](auto const& bucket) { - int32_t version = 0; - releaseAssert(bucket); - if (!bucket->isEmpty()) - { - version = bucket->getBucketVersion(); - if (!nonEmptySeen) + uint32_t minBucketVersion = 0; + auto validateBucketVersion = [&](uint32_t bucketVersion) { + if (bucketVersion < minBucketVersion) { - nonEmptySeen = true; + CLOG_ERROR(History, + "Incompatible bucket versions: expected version " + "{} or higher, got {}", + minBucketVersion, bucketVersion); + return false; } - } - return version; - }; + minBucketVersion = bucketVersion; + return true; + }; + + using BucketT = + typename std::decay_t::value_type::bucket_type; - auto validateBucketList = [&](auto const& buckets, - uint32_t expectedLevels) { if (buckets.size() != expectedLevels) { CLOG_ERROR(History, "Invalid HAS: bucket list size mismatch"); return false; } - using BucketT = - typename std::decay_t::value_type::bucket_type; for (uint32_t j = expectedLevels; j != 0; --j) { @@ -388,8 +396,8 @@ HistoryArchiveState::containsValidBuckets(Application& app) const hexToBin256(level.curr)); auto snap = app.getBucketManager().getBucketByHash( hexToBin256(level.snap)); - if (!validateBucketVersion(processBucket(snap)) || - !validateBucketVersion(processBucket(curr))) + if (!validateBucketVersion(getVersionAndCheckEmpty(snap)) || + !validateBucketVersion(getVersionAndCheckEmpty(curr))) { return false; } @@ -411,11 +419,13 @@ HistoryArchiveState::containsValidBuckets(Application& app) const auto const& prev = buckets[i - 1]; auto prevSnap = app.getBucketManager().getBucketByHash( hexToBin256(prev.snap)); - uint32_t prevSnapVersion = processBucket(prevSnap); + uint32_t prevSnapVersion = getVersionAndCheckEmpty(prevSnap); if (!nonEmptySeen) { - // No real buckets seen yet, move on + // We're iterating from the bottom up, so if we haven't seen a + // non-empty bucket yet, we can skip the check because the + // bucket is default initialized continue; } else if (protocolVersionStartsFrom( diff --git a/src/history/HistoryArchive.h b/src/history/HistoryArchive.h index 10d68de337..d604c31a62 100644 --- a/src/history/HistoryArchive.h +++ b/src/history/HistoryArchive.h @@ -74,6 +74,18 @@ struct HistoryArchiveState static inline unsigned const HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22 = 2; + struct BucketHashReturnT + { + std::vector live; + std::vector hot; + + explicit BucketHashReturnT(std::vector&& live, + std::vector&& hot) + : live(live), hot(hot) + { + } + }; + unsigned version{HISTORY_ARCHIVE_STATE_VERSION_PRE_PROTOCOL_22}; std::string server; std::string networkPassphrase; @@ -83,11 +95,9 @@ struct HistoryArchiveState HistoryArchiveState(); -#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& liveBuckets, HotArchiveBucketList const& hotBuckets, std::string const& networkPassphrase); -#endif HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& liveBuckets, std::string const& networkPassphrase); @@ -106,9 +116,8 @@ struct HistoryArchiveState // Return vector of buckets to fetch/apply to turn 'other' into 'this'. // Vector is sorted from largest/highest-numbered bucket to smallest/lowest, // and with snap buckets occurring before curr buckets. Zero-buckets are - // omitted. - std::vector - differingBuckets(HistoryArchiveState const& other) const; + // omitted. Hashes are distinguished by live and Hot Archive buckets. + BucketHashReturnT differingBuckets(HistoryArchiveState const& other) const; // Return vector of all buckets referenced by this state. std::vector allBuckets() const; diff --git a/src/history/HistoryManagerImpl.cpp b/src/history/HistoryManagerImpl.cpp index 2eefc86772..92c83414b4 100644 --- a/src/history/HistoryManagerImpl.cpp +++ b/src/history/HistoryManagerImpl.cpp @@ -410,7 +410,7 @@ HistoryManagerImpl::queueCurrentHistory(uint32_t ledger) auto ledgerVers = mApp.getLedgerManager() .getLastClosedLedgerHeader() .header.ledgerVersion; - if (protocolVersionIsBefore( + if (protocolVersionStartsFrom( ledgerVers, BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { diff --git a/src/history/StateSnapshot.cpp b/src/history/StateSnapshot.cpp index 3ab2a9e66e..08ce68bc2c 100644 --- a/src/history/StateSnapshot.cpp +++ b/src/history/StateSnapshot.cpp @@ -120,7 +120,9 @@ StateSnapshot::differingHASFiles(HistoryArchiveState const& other) addIfExists(mTransactionResultSnapFile); addIfExists(mSCPHistorySnapFile); - for (auto const& hash : mLocalState.differingBuckets(other)) + auto hashes = mLocalState.differingBuckets(other); + + for (auto const& hash : hashes.live) { auto b = mApp.getBucketManager().getBucketByHash( hexToBin256(hash)); @@ -128,6 +130,16 @@ StateSnapshot::differingHASFiles(HistoryArchiveState const& other) addIfExists(std::make_shared(*b)); } +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + for (auto const& hash : hashes.hot) + { + auto b = mApp.getBucketManager().getBucketByHash( + hexToBin256(hash)); + releaseAssert(b); + addIfExists(std::make_shared(*b)); + } +#endif + return files; } } diff --git a/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json b/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json new file mode 100644 index 0000000000..18fb8d3b29 --- /dev/null +++ b/src/history/serialize-tests/stellar-history.testnet.6714239.networkPassphrase.v2.json @@ -0,0 +1,184 @@ +{ + "version": 2, + "server": "v9.0.1-dirty", + "currentLedger": 6714239, + "networkPassphrase": "(V) (;,,;) (V)", + "currentBuckets": [ + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "c3131b946b5cadf713ca88d299505fe16572ffeefa083b2858a674452fd8ba76", + "next": { + "state": 1, + "output": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "snap": "e08d65b07ca3cb0999a340247afcf0fedbe1d1e1df6ada0c34422e2d3b905735" + }, + { + "curr": "b767206bf07e3dbbe14cff681234b7ccfd4dab5957ce6d440f692409498ff909", + "next": { + "state": 1, + "output": "e08d65b07ca3cb0999a340247afcf0fedbe1d1e1df6ada0c34422e2d3b905735" + }, + "snap": "0bdeee425d0b4c3458353b7b20901e60eb8b5289dd8a714e59f910a47b49d66e" + }, + { + "curr": "7a1132e7566dea51a35f6981181ad3f108256bb5f9470f0e9df3222c138c6446", + "next": { + "state": 1, + "output": "0bdeee425d0b4c3458353b7b20901e60eb8b5289dd8a714e59f910a47b49d66e" + }, + "snap": "1863067ae6d91218c589b2ccc40a983edc144196ca3a2cd43c7426275a8a3f40" + }, + { + "curr": "f4e99dd7c25206f6766911dc812502f0ec2cd5469f4742b7848523aa6e0da03e", + "next": { + "state": 1, + "output": "dd9bcfba61bf17be7093f56eb6e1392d5f25981282d4331cb51961852c11ee16" + }, + "snap": "04a5699bb688ef82e8a352b2ccfa134458c794a0365dddfac00f2e6fc7c159f9" + }, + { + "curr": "f9de28d23c53d1affe871a97a5c9747bbc9a208754388dc88cdea96852977471", + "next": { + "state": 1, + "output": "b6d012ce7af5624c24d4ff386ae172516ff0cd13f70cd030edbb503b87ad196b" + }, + "snap": "1fd4b80ec5278fc08269f96728206fcfbf5d3f5efe1bf7f93d4a3d79a75eeca8" + }, + { + "curr": "71f4453669ec84632afcdd1f2a97685121cef52a01db58c8d4c810310c07c0d8", + "next": { + "state": 1, + "output": "c0992883bd5f4631f736c5287538342c08e00f80be16b36a5a794772114a3db9" + }, + "snap": "b8913fa01d3b58b763fc04ee1528317c0ec71f250500758e09d0a839ca405be4" + }, + { + "curr": "a113930757a7ff48a8898dad74c1446a942b5e5b5f443626a8f943768432ec41", + "next": { + "state": 1, + "output": "9b6feec6e7e366b898a59ad562b31ce3305d7e1545f92bf5fda5c13e032bc0f9" + }, + "snap": "d3b1a36290f39d4cd09e7ef80b7cb871df9a3a5b1e40d8e5cfd26c754914ca84" + }, + { + "curr": "e57d1c6342f6e47c2ac0305cd5251bb0fb2cdd40923af87c4657e896e33acdc5", + "next": { + "state": 1, + "output": "de8805e4232fe81c04f5536487e586ab6d3ef38eff93bad5bf6872a3e53ced6b" + }, + "snap": "fcddef737957961d828023a081b84449dc0ab20524e5155837bae12a3b18ac64" + }, + { + "curr": "5c3387bcaad3139bb48ff2a99010d6f075cc9b20ba2f22c194fcda2a97926f55", + "next": { + "state": 1, + "output": "3373185b0eb537b909c56e6e16e76e33d966dc7ee1e7168123cfe1114d444e88" + }, + "snap": "2958d66f083ca13ca97a184a5be3a03b3c2e494f832b1ac1a3e16d7b02e9f50c" + }, + { + "curr": "ae7e4814b50e176d8e3532e462e2e9db02f218adebd74603d7e349cc19f489e1", + "next": { + "state": 1, + "output": "50abed8a9d86c072cfe8388246b7a378dc355fe996fd7384a5ee57e8da2ad51d" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "hotArchiveBuckets": [ + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "c3131b946b5cadf713ca88d299505fe16572ffeefa083b2858a674452fd8ba74", + "next": { + "state": 1, + "output": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "snap": "e08d65b07ca3cb0999a340247afcf0fedbe1d1e1df6ada0c34422e2d3b905732" + }, + { + "curr": "b767206bf07e3dbbe14cff681234b7ccfd4dab5957ce6d440f692409498ff901", + "next": { + "state": 1, + "output": "e08d65b07ca3cb0999a340247afcf0fedbe1d1e1df6ada0c34422e2d3b905732" + }, + "snap": "0bdeee425d0b4c3458353b7b20901e60eb8b5289dd8a714e59f910a47b49d661" + }, + { + "curr": "7a1132e7566dea51a35f6981181ad3f108256bb5f9470f0e9df3222c138c6442", + "next": { + "state": 1, + "output": "0bdeee425d0b4c3458353b7b20901e60eb8b5289dd8a714e59f910a47b49d661" + }, + "snap": "1863067ae6d91218c589b2ccc40a983edc144196ca3a2cd43c7426275a8a3f42" + }, + { + "curr": "f4e99dd7c25206f6766911dc812502f0ec2cd5469f4742b7848523aa6e0da031", + "next": { + "state": 1, + "output": "dd9bcfba61bf17be7093f56eb6e1392d5f25981282d4331cb51961852c11ee12" + }, + "snap": "04a5699bb688ef82e8a352b2ccfa134458c794a0365dddfac00f2e6fc7c159f1" + }, + { + "curr": "f9de28d23c53d1affe871a97a5c9747bbc9a208754388dc88cdea96852977472", + "next": { + "state": 1, + "output": "b6d012ce7af5624c24d4ff386ae172516ff0cd13f70cd030edbb503b87ad1961" + }, + "snap": "1fd4b80ec5278fc08269f96728206fcfbf5d3f5efe1bf7f93d4a3d79a75eeca2" + }, + { + "curr": "71f4453669ec84632afcdd1f2a97685121cef52a01db58c8d4c810310c07c0d1", + "next": { + "state": 1, + "output": "c0992883bd5f4631f736c5287538342c08e00f80be16b36a5a794772114a3db2" + }, + "snap": "b8913fa01d3b58b763fc04ee1528317c0ec71f250500758e09d0a839ca405be1" + }, + { + "curr": "a113930757a7ff48a8898dad74c1446a942b5e5b5f443626a8f943768432ec42", + "next": { + "state": 1, + "output": "9b6feec6e7e366b898a59ad562b31ce3305d7e1545f92bf5fda5c13e032bc0f1" + }, + "snap": "d3b1a36290f39d4cd09e7ef80b7cb871df9a3a5b1e40d8e5cfd26c754914ca24" + }, + { + "curr": "e57d1c6342f6e47c2ac0305cd5251bb0fb2cdd40923af87c4657e896e33acdc1", + "next": { + "state": 1, + "output": "de8805e4232fe81c04f5536487e586ab6d3ef38eff93bad5bf6872a3e53ced62" + }, + "snap": "fcddef737957961d828023a081b84449dc0ab20524e5155837bae12a3b18ac61" + }, + { + "curr": "5c3387bcaad3139bb48ff2a99010d6f075cc9b20ba2f22c194fcda2a97926f52", + "next": { + "state": 1, + "output": "3373185b0eb537b909c56e6e16e76e33d966dc7ee1e7168123cfe1114d444e81" + }, + "snap": "2958d66f083ca13ca97a184a5be3a03b3c2e494f832b1ac1a3e16d7b02e9f502" + }, + { + "curr": "ae7e4814b50e176d8e3532e462e2e9db02f218adebd74603d7e349cc19f489e2", + "next": { + "state": 1, + "output": "50abed8a9d86c072cfe8388246b7a378dc355fe996fd7384a5ee57e8da2ad52" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +} \ No newline at end of file diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index 817b55560e..3c3ed5a301 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -172,55 +172,177 @@ TEST_CASE("History bucket verification", "[history][catchup]") auto bucketGenerator = TestBucketGenerator{ *app, app->getHistoryArchiveManager().getHistoryArchive( cg->getArchiveDirName())}; - std::vector hashes; + std::vector liveHashes; + std::vector hotHashes; auto& wm = app->getWorkScheduler(); - std::map> mBuckets; + + std::map> buckets; + std::map> hotBuckets; auto tmpDir = std::make_unique(app->getTmpDirManager().tmpDir("bucket-test")); SECTION("successful download and verify") { - hashes.push_back(bucketGenerator.generateBucket( - TestBucketState::CONTENTS_AND_HASH_OK)); - hashes.push_back(bucketGenerator.generateBucket( - TestBucketState::CONTENTS_AND_HASH_OK)); - auto verify = - wm.executeWork(mBuckets, hashes, *tmpDir); - REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); + SECTION("live buckets") + { + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); + } + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + SECTION("hot archive buckets") + { + hotHashes.push_back( + bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + hotHashes.push_back( + bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); + } + + SECTION("both live and hot archive buckets") + { + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + hotHashes.push_back( + bucketGenerator.generateBucket( + TestBucketState::CONTENTS_AND_HASH_OK)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); + } +#endif } SECTION("download fails file not found") { - hashes.push_back( - bucketGenerator.generateBucket(TestBucketState::FILE_NOT_UPLOADED)); + SECTION("live buckets") + { + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::FILE_NOT_UPLOADED)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + } - auto verify = - wm.executeWork(mBuckets, hashes, *tmpDir); - REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + SECTION("hot archive buckets") + { + hotHashes.push_back( + bucketGenerator.generateBucket( + TestBucketState::FILE_NOT_UPLOADED)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + } + + SECTION("both live and hot archive buckets") + { + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::FILE_NOT_UPLOADED)); + hotHashes.push_back( + bucketGenerator.generateBucket( + TestBucketState::FILE_NOT_UPLOADED)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + } +#endif } SECTION("download succeeds but unzip fails") { - hashes.push_back(bucketGenerator.generateBucket( - TestBucketState::CORRUPTED_ZIPPED_FILE)); + SECTION("live buckets") + { + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CORRUPTED_ZIPPED_FILE)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + } +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + SECTION("hot archive buckets") + { + hotHashes.push_back( + bucketGenerator.generateBucket( + TestBucketState::CORRUPTED_ZIPPED_FILE)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + } - auto verify = - wm.executeWork(mBuckets, hashes, *tmpDir); - REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + SECTION("both live and hot archive buckets") + { + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::CORRUPTED_ZIPPED_FILE)); + hotHashes.push_back( + bucketGenerator.generateBucket( + TestBucketState::CORRUPTED_ZIPPED_FILE)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + } +#endif } SECTION("verify fails hash mismatch") { - hashes.push_back( - bucketGenerator.generateBucket(TestBucketState::HASH_MISMATCH)); + SECTION("live buckets") + { + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::HASH_MISMATCH)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + } - auto verify = - wm.executeWork(mBuckets, hashes, *tmpDir); - REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + SECTION("hot archive buckets") + { + hotHashes.push_back( + bucketGenerator.generateBucket( + TestBucketState::HASH_MISMATCH)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + } + + SECTION("both live and hot archive buckets") + { + liveHashes.push_back(bucketGenerator.generateBucket( + TestBucketState::HASH_MISMATCH)); + hotHashes.push_back( + bucketGenerator.generateBucket( + TestBucketState::HASH_MISMATCH)); + auto verify = wm.executeWork( + buckets, hotBuckets, liveHashes, hotHashes, *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_FAILURE); + } +#endif } SECTION("no hashes to verify") { // Ensure proper behavior when no hashes are passed in - auto verify = wm.executeWork( - mBuckets, std::vector(), *tmpDir); - REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); + SECTION("live buckets") + { + auto verify = wm.executeWork( + buckets, hotBuckets, std::vector(), + std::vector(), *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); + } + + SECTION("hot archive buckets") + { + auto verify = wm.executeWork( + buckets, hotBuckets, std::vector(), + std::vector(), *tmpDir); + REQUIRE(verify->getState() == BasicWork::State::WORK_SUCCESS); + } } } @@ -1203,7 +1325,7 @@ TEST_CASE("Catchup non-initentry buckets to initentry-supporting works", // Check that during catchup/replay, we did not use any INITENTRY code, // were still on the old protocol. - auto mc = a->getBucketManager().readMergeCounters(); + auto mc = a->getBucketManager().readMergeCounters(); REQUIRE(mc.mPostInitEntryProtocolMerges == 0); REQUIRE(mc.mNewInitEntries == 0); REQUIRE(mc.mOldInitEntries == 0); @@ -1245,7 +1367,7 @@ TEST_CASE("Catchup non-initentry buckets to initentry-supporting works", } // Check that we did in fact use INITENTRY code. - mc = a->getBucketManager().readMergeCounters(); + mc = a->getBucketManager().readMergeCounters(); REQUIRE(mc.mPostInitEntryProtocolMerges != 0); REQUIRE(mc.mNewInitEntries != 0); REQUIRE(mc.mOldInitEntries != 0); @@ -1346,7 +1468,9 @@ TEST_CASE_VERSIONS( BucketTestUtils::for_versions_with_differing_bucket_logic( cfg, [&](Config const& cfg) { - Application::pointer app = createTestApplication(clock, cfg); + auto app = + createTestApplication( + clock, cfg); auto& hm = app->getHistoryManager(); auto& lm = app->getLedgerManager(); auto& bl = app->getBucketManager().getLiveBucketList(); @@ -1355,9 +1479,11 @@ TEST_CASE_VERSIONS( { auto lcl = lm.getLastClosedLedgerHeader(); lcl.header.ledgerSeq += 1; - BucketTestUtils::addLiveBatchAndUpdateSnapshot( - *app, lcl.header, {}, - LedgerTestUtils::generateValidUniqueLedgerEntries(8), {}); + lm.setNextLedgerEntryBatchForBucketTesting( + {}, + LedgerTestUtils::generateValidLedgerEntriesWithExclusions( + {LedgerEntryType::CONFIG_SETTING}, 8), + {}); clock.crank(true); } diff --git a/src/history/test/HistoryTestsUtils.cpp b/src/history/test/HistoryTestsUtils.cpp index 1632c38d0e..d9cb8f2b40 100644 --- a/src/history/test/HistoryTestsUtils.cpp +++ b/src/history/test/HistoryTestsUtils.cpp @@ -4,6 +4,9 @@ #include "history/test/HistoryTestsUtils.h" #include "bucket/BucketManager.h" +#include "bucket/BucketUtils.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/HotArchiveBucketList.h" #include "catchup/CatchupRange.h" #include "crypto/Hex.h" #include "crypto/Random.h" @@ -14,12 +17,14 @@ #include "ledger/LedgerRange.h" #include "ledger/LedgerTxn.h" #include "ledger/LedgerTxnHeader.h" +#include "ledger/test/LedgerTestUtils.h" #include "lib/catch.hpp" #include "main/ApplicationUtils.h" #include "test/TestAccount.h" #include "test/TestUtils.h" #include "test/TxTests.h" #include "test/test.h" +#include "util/GlobalChecks.h" #include "util/Math.h" #include "util/XDROperators.h" #include "work/WorkScheduler.h" @@ -32,6 +37,21 @@ namespace stellar namespace historytestutils { +namespace +{ +void +setConfigForArchival(Config& cfg) +{ + // Evict very aggressively, but only 1 entry at a time so that Hot + // Archive Buckets churn + cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true; + cfg.TESTING_MINIMUM_PERSISTENT_ENTRY_LIFETIME = 10; + cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1; + cfg.TESTING_EVICTION_SCAN_SIZE = 100'000; + cfg.TESTING_MAX_ENTRIES_TO_ARCHIVE = 1; +} +} + std::string HistoryConfigurator::getArchiveDirName() const { @@ -124,36 +144,50 @@ RealGenesisTmpDirHistoryConfigurator::configure(Config& mCfg, return mCfg; } -BucketOutputIteratorForTesting::BucketOutputIteratorForTesting( +template +BucketOutputIteratorForTesting::BucketOutputIteratorForTesting( std::string const& tmpDir, uint32_t protocolVersion, MergeCounters& mc, asio::io_context& ctx) - : BucketOutputIterator{ + : BucketOutputIterator{ tmpDir, true, testutil::testBucketMetadata(protocolVersion), mc, ctx, /*doFsync=*/true} { } +template std::pair -BucketOutputIteratorForTesting::writeTmpTestBucket() +BucketOutputIteratorForTesting::writeTmpTestBucket() { - auto ledgerEntries = - LedgerTestUtils::generateValidUniqueLedgerEntries(NUM_ITEMS_PER_BUCKET); - auto bucketEntries = - LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {}); - for (auto const& bucketEntry : bucketEntries) + auto generateEntries = [this]() { + if constexpr (std::is_same_v) + { + auto le = LedgerTestUtils::generateValidUniqueLedgerEntries( + NUM_ITEMS_PER_BUCKET); + return BucketT::convertToBucketEntry(false, {}, le, {}); + } + else + { + UnorderedSet empty; + auto keys = LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE}, NUM_ITEMS_PER_BUCKET, empty); + return BucketT::convertToBucketEntry({}, {}, keys); + } + }; + + for (auto const& bucketEntry : generateEntries()) { - put(bucketEntry); + this->put(bucketEntry); } // Finish writing and close the bucket file - REQUIRE(mBuf); - mOut.writeOne(*mBuf, &mHasher, &mBytesPut); - mObjectsPut++; - mBuf.reset(); - mOut.close(); - - return std::pair(mFilename.string(), - mHasher.finish()); + REQUIRE(this->mBuf); + this->mOut.writeOne(*this->mBuf, &this->mHasher, &this->mBytesPut); + this->mObjectsPut++; + this->mBuf.reset(); + this->mOut.close(); + + return std::pair(this->mFilename.string(), + this->mHasher.finish()); }; TestBucketGenerator::TestBucketGenerator( @@ -164,9 +198,12 @@ TestBucketGenerator::TestBucketGenerator( mApp.getTmpDirManager().tmpDir("tmp-bucket-generator")); } +template std::string TestBucketGenerator::generateBucket(TestBucketState state) { + BUCKET_TYPE_ASSERT(BucketT); + uint256 hash = HashUtils::pseudoRandomForTesting(); if (state == TestBucketState::FILE_NOT_UPLOADED) { @@ -174,7 +211,7 @@ TestBucketGenerator::generateBucket(TestBucketState state) return binToHex(hash); } MergeCounters mc; - BucketOutputIteratorForTesting bucketOut{ + BucketOutputIteratorForTesting bucketOut{ mTmpDir->getName(), mApp.getConfig().LEDGER_PROTOCOL_VERSION, mc, mApp.getClock().getIOContext()}; std::string filename; @@ -381,7 +418,11 @@ CatchupSimulation::CatchupSimulation(VirtualClock::Mode mode, bool startApp, Config::TestDbMode dbMode) : mClock(std::make_unique(mode)) , mHistoryConfigurator(cg) - , mCfg(getTestConfig(0, dbMode)) + , mCfg([&] { + auto cfg = getTestConfig(0, dbMode); + setConfigForArchival(cfg); + return cfg; + }()) , mAppPtr(createTestApplication(*mClock, mHistoryConfigurator->configure(mCfg, true), /*newDB*/ true, /*startApp*/ false)) @@ -493,10 +534,13 @@ CatchupSimulation::generateRandomLedger(uint32_t version) 10; res.writeBytes = 100'000; uint32_t inclusion = 100; + sorobanTxs.push_back(createUploadWasmTx( - getApp(), stroopy, inclusion, DEFAULT_TEST_RESOURCE_FEE, res)); + getApp(), stroopy, inclusion, DEFAULT_TEST_RESOURCE_FEE * 5, + res, {}, 0, rand_uniform(101, 2'000))); sorobanTxs.push_back(createUploadWasmTx( - getApp(), eve, inclusion * 5, DEFAULT_TEST_RESOURCE_FEE, res)); + getApp(), eve, inclusion * 5, DEFAULT_TEST_RESOURCE_FEE * 5, + res, {}, 0, rand_uniform(101, 2'000))); check = true; } } @@ -617,6 +661,19 @@ CatchupSimulation::ensureLedgerAvailable(uint32_t targetLedger, getApp().getBucketManager().getLiveBucketList(); } } + + // Make sure the Hot Archive isn't empty + if (protocolVersionStartsFrom( + getApp() + .getLedgerManager() + .getLastClosedLedgerHeader() + .header.ledgerVersion, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + releaseAssert( + getApp().getBucketManager().getHotArchiveBucketList().getSize() >= + 1'000); + } } void @@ -740,10 +797,12 @@ CatchupSimulation::createCatchupApplication( mCfgs.back().CATCHUP_COMPLETE = count == std::numeric_limits::max(); mCfgs.back().CATCHUP_RECENT = count; + setConfigForArchival(mCfgs.back()); if (ledgerVersion) { mCfgs.back().TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = *ledgerVersion; } + mSpawnedAppsClocks.emplace_front(); auto newApp = createTestApplication( mSpawnedAppsClocks.front(), @@ -765,7 +824,7 @@ CatchupSimulation::catchupOffline(Application::pointer app, uint32_t toLedger, : CatchupConfiguration::Mode::OFFLINE_BASIC; auto catchupConfiguration = CatchupConfiguration{toLedger, app->getConfig().CATCHUP_RECENT, mode}; - lm.startCatchup(catchupConfiguration, nullptr, {}); + lm.startCatchup(catchupConfiguration, nullptr); REQUIRE(!app->getClock().getIOContext().stopped()); auto& lam = app->getLedgerApplyManager(); @@ -1119,5 +1178,10 @@ CatchupSimulation::restartApp() mClock = std::make_unique(mClock->getMode()); mAppPtr = createTestApplication(*mClock, mCfg, /*newDB*/ false); } + +template std::string + TestBucketGenerator::generateBucket(TestBucketState); +template std::string + TestBucketGenerator::generateBucket(TestBucketState); } } diff --git a/src/history/test/HistoryTestsUtils.h b/src/history/test/HistoryTestsUtils.h index 29a5564cc7..cfa2f4692d 100644 --- a/src/history/test/HistoryTestsUtils.h +++ b/src/history/test/HistoryTestsUtils.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/BucketUtils.h" #include "bucket/HotArchiveBucketList.h" #include "bucket/LiveBucketList.h" #include "catchup/VerifyLedgerChainWork.h" @@ -47,7 +48,7 @@ enum class TestBucketState class HistoryConfigurator; class TestBucketGenerator; -class BucketOutputIteratorForTesting; +template class BucketOutputIteratorForTesting; struct CatchupPerformedWork; class HistoryConfigurator : NonCopyable @@ -99,8 +100,11 @@ class RealGenesisTmpDirHistoryConfigurator : public TmpDirHistoryConfigurator Config& configure(Config& cfg, bool writable) const override; }; -class BucketOutputIteratorForTesting : public LiveBucketOutputIterator +template +class BucketOutputIteratorForTesting : public BucketOutputIterator { + BUCKET_TYPE_ASSERT(BucketT); + const size_t NUM_ITEMS_PER_BUCKET = 5; public: @@ -121,6 +125,7 @@ class TestBucketGenerator TestBucketGenerator(Application& app, std::shared_ptr archive); + template std::string generateBucket( TestBucketState desiredState = TestBucketState::CONTENTS_AND_HASH_OK); }; diff --git a/src/historywork/DownloadBucketsWork.cpp b/src/historywork/DownloadBucketsWork.cpp index a4b0d01106..0c185e3b8e 100644 --- a/src/historywork/DownloadBucketsWork.cpp +++ b/src/historywork/DownloadBucketsWork.cpp @@ -18,13 +18,17 @@ namespace stellar DownloadBucketsWork::DownloadBucketsWork( Application& app, - std::map>& buckets, - std::vector hashes, TmpDir const& downloadDir, - std::shared_ptr archive) + std::map>& liveBuckets, + std::map>& hotBuckets, + std::vector liveHashes, std::vector hotHashes, + TmpDir const& downloadDir, std::shared_ptr archive) : BatchWork{app, "download-verify-buckets"} - , mBuckets{buckets} - , mHashes{hashes} - , mNextBucketIter{mHashes.begin()} + , mLiveBuckets{liveBuckets} + , mHotBuckets{hotBuckets} + , mLiveHashes{liveHashes} + , mHotHashes{hotHashes} + , mNextLiveBucketIter{mLiveHashes.begin()} + , mNextHotBucketIter{mHotHashes.begin()} , mDownloadDir{downloadDir} , mArchive{archive} { @@ -35,11 +39,14 @@ DownloadBucketsWork::getStatus() const { if (!isDone() && !isAborting()) { - if (!mHashes.empty()) + if (!mLiveHashes.empty()) { - auto numStarted = std::distance(mHashes.begin(), mNextBucketIter); + auto numStarted = + std::distance(mLiveHashes.begin(), mNextLiveBucketIter) + + std::distance(mHotHashes.begin(), mNextHotBucketIter); auto numDone = numStarted - getNumWorksInBatch(); - auto total = static_cast(mHashes.size()); + auto total = + static_cast(mLiveHashes.size() + mHotHashes.size()); auto pct = (100 * numDone) / total; return fmt::format( FMT_STRING( @@ -53,13 +60,15 @@ DownloadBucketsWork::getStatus() const bool DownloadBucketsWork::hasNext() const { - return mNextBucketIter != mHashes.end(); + return mNextLiveBucketIter != mLiveHashes.end() || + mNextHotBucketIter != mHotHashes.end(); } void DownloadBucketsWork::resetIter() { - mNextBucketIter = mHashes.begin(); + mNextLiveBucketIter = mLiveHashes.begin(); + mNextHotBucketIter = mHotHashes.begin(); } std::shared_ptr @@ -71,7 +80,10 @@ DownloadBucketsWork::yieldMoreWork() throw std::runtime_error("Nothing to iterate over!"); } - auto hash = *mNextBucketIter; + // Iterate through live hashes then Hot Archive hashes + auto isHotHash = mNextLiveBucketIter == mLiveHashes.end(); + auto hash = isHotHash ? *mNextHotBucketIter : *mNextLiveBucketIter; + FileTransferInfo ft(mDownloadDir, FileType::HISTORY_FILE_TYPE_BUCKET, hash); auto w1 = std::make_shared(mApp, ft, mArchive); @@ -90,16 +102,29 @@ DownloadBucketsWork::yieldMoreWork() }; std::weak_ptr weak( std::static_pointer_cast(shared_from_this())); - auto successCb = [weak, ft, hash](Application& app) -> bool { + auto successCb = [weak, ft, hash, isHotHash](Application& app) -> bool { auto self = weak.lock(); if (self) { auto bucketPath = ft.localPath_nogz(); - auto b = app.getBucketManager().adoptFileAsBucket( - bucketPath, hexToBin256(hash), - /*mergeKey=*/nullptr, - /*index=*/nullptr); - self->mBuckets[hash] = b; + + if (isHotHash) + { + auto b = + app.getBucketManager().adoptFileAsBucket( + bucketPath, hexToBin256(hash), + /*mergeKey=*/nullptr, + /*index=*/nullptr); + self->mHotBuckets[hash] = b; + } + else + { + auto b = app.getBucketManager().adoptFileAsBucket( + bucketPath, hexToBin256(hash), + /*mergeKey=*/nullptr, + /*index=*/nullptr); + self->mLiveBuckets[hash] = b; + } } return true; }; @@ -111,7 +136,14 @@ DownloadBucketsWork::yieldMoreWork() auto w4 = std::make_shared( mApp, "download-verify-sequence-" + hash, seq); - ++mNextBucketIter; + if (isHotHash) + { + ++mNextHotBucketIter; + } + else + { + ++mNextLiveBucketIter; + } return w4; } } diff --git a/src/historywork/DownloadBucketsWork.h b/src/historywork/DownloadBucketsWork.h index 573f5d8a82..83c5c736cb 100644 --- a/src/historywork/DownloadBucketsWork.h +++ b/src/historywork/DownloadBucketsWork.h @@ -17,17 +17,23 @@ class HistoryArchive; class DownloadBucketsWork : public BatchWork { - std::map>& mBuckets; - std::vector mHashes; - std::vector::const_iterator mNextBucketIter; + std::map>& mLiveBuckets; + std::map>& mHotBuckets; + std::vector mLiveHashes; + std::vector mHotHashes; + std::vector::const_iterator mNextLiveBucketIter; + std::vector::const_iterator mNextHotBucketIter; TmpDir const& mDownloadDir; std::shared_ptr mArchive; public: + // Note: hashes must contain both live and hot archive bucket hashes DownloadBucketsWork( Application& app, - std::map>& buckets, - std::vector hashes, TmpDir const& downloadDir, + std::map>& liveBuckets, + std::map>& hotBuckets, + std::vector liveHashes, std::vector hotHashes, + TmpDir const& downloadDir, std::shared_ptr archive = nullptr); ~DownloadBucketsWork() = default; std::string getStatus() const override; diff --git a/src/ledger/LedgerManager.h b/src/ledger/LedgerManager.h index e10f11ab3d..b5754e470f 100644 --- a/src/ledger/LedgerManager.h +++ b/src/ledger/LedgerManager.h @@ -170,10 +170,8 @@ class LedgerManager // LedgerManager detects it is desynchronized from SCP's consensus ledger. // This method is present in the public interface to permit testing and // offline catchups. - virtual void - startCatchup(CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) = 0; + virtual void startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) = 0; // Forcibly close the current ledger, applying `ledgerData` as the consensus // changes. This is normally done automatically as part of diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index b1aeda1160..e9166234d1 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -656,14 +656,12 @@ LedgerManagerImpl::valueExternalized(LedgerCloseData const& ledgerData, } void -LedgerManagerImpl::startCatchup( - CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) +LedgerManagerImpl::startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) { ZoneScoped; setState(LM_CATCHING_UP_STATE); - mApp.getLedgerApplyManager().startCatchup(configuration, archive, - bucketsToRetain); + mApp.getLedgerApplyManager().startCatchup(configuration, archive); } uint64_t diff --git a/src/ledger/LedgerManagerImpl.h b/src/ledger/LedgerManagerImpl.h index 3538021a04..8afcfffc8a 100644 --- a/src/ledger/LedgerManagerImpl.h +++ b/src/ledger/LedgerManagerImpl.h @@ -224,10 +224,8 @@ class LedgerManagerImpl : public LedgerManager Database& getDatabase() override; - void startCatchup( - CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) override; + void startCatchup(CatchupConfiguration configuration, + std::shared_ptr archive) override; void closeLedger(LedgerCloseData const& ledgerData, bool calledViaExternalize) override; diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index 2e959d636e..4a893beb5e 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -892,7 +892,7 @@ catchup(Application::pointer app, CatchupConfiguration cc, try { - app->getLedgerManager().startCatchup(cc, archive, {}); + app->getLedgerManager().startCatchup(cc, archive); } catch (std::invalid_argument const&) { diff --git a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json index c3c55557a0..4e995116f6 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json @@ -6,24 +6,24 @@ "v": 0 }, "ledgerHeader": { - "hash": "d4ac85b1db60e58b070999d97cc3e60881db5da799ff2a71e87c2a6db3978ad4", + "hash": "c4e89b27d71a10046b3993b1692d3941ed0139b190cfeaf42fc7ca1575de2726", "header": { "ledgerVersion": 23, - "previousLedgerHash": "114439ca9d61a89a909b9a5d2aa726e9be3ecd4a5b0f0cab36de604a5f643cc6", + "previousLedgerHash": "cd0e8831e112ba4d7ba52b8a295f2f5d9c922a4f32408a0bd902bf31988aa06c", "scpValue": { - "txSetHash": "31a57ddc07402ae0623b06b041a6a4196a19caac487491e0a5205c8a4f76e732", + "txSetHash": "1f7a881c1be1201af678467343ba1a2801c15183e4140c4b16ff6277cc7c26a2", "closeTime": 1451692800, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "ba83d21fab1c12875efbe5fe7bd9882247a5ca115a81896834d0a161df85c5258b1950831c3dc9da3241deee47b94594b21d8ef315b2f060dc66917f9fd1e609" + "signature": "5869d4582ee630594be8e7f1a2bbd20192dbaf635f945872d7fa02fb8b9d33e73d387860a12a628e3c98238e03551e15951b9e4990482c236a22c8c836eef50a" } } }, "txSetResultHash": "65b6fe91abfe43ed98fa2163f08fdf3f2f3231101bba05102521186c25a1cc4b", - "bucketListHash": "685227c142ea174d494b5efe8dd9c01aee683dddcbd75076b5acc8729a56e883", + "bucketListHash": "2c092b2b7db88c611a3a10b308ac925fb82eb1331204aaee84e247485b486e3b", "ledgerSeq": 28, "totalCoins": 1000000000000000000, "feePool": 804520, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "114439ca9d61a89a909b9a5d2aa726e9be3ecd4a5b0f0cab36de604a5f643cc6", + "previousLedgerHash": "cd0e8831e112ba4d7ba52b8a295f2f5d9c922a4f32408a0bd902bf31988aa06c", "phases": [ { "v": 0, diff --git a/src/testdata/ledger-close-meta-v1-protocol-23.json b/src/testdata/ledger-close-meta-v1-protocol-23.json index 5e8ede27a8..6fe4d2791f 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23.json @@ -6,24 +6,24 @@ "v": 0 }, "ledgerHeader": { - "hash": "ede8d13efde47058a47388532e2a6ce544f6744423a268ecf513aff86916283a", + "hash": "1a1722f149f5348813f73ff4c0cb45245224d43ebcb69b44e8290e7697b90793", "header": { "ledgerVersion": 23, - "previousLedgerHash": "87fdf0a3595bf4021274bb88fac521cf02060dd961b9fda38879b287a5418cb6", + "previousLedgerHash": "5263ba08cd1c7ea229e165999b8aaf27c9c99a2f25bed68deedf9fd729fb614b", "scpValue": { - "txSetHash": "df738c70c1acc6753d41ceea35716fd95888db1f36558af535bb2b8a78c856e3", + "txSetHash": "60b3ac298285f6dbe97be76298af5c11d10c1be4df942e719c12ce8b27213fc9", "closeTime": 0, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "95c4d776f015ad5e69167c6e47be677311b30efffab6617b7dd8f94de86b2f219011cd9bb908ec08d39227594fa4b2580cb73f65fa583ac06f03d0e7dae5dd08" + "signature": "2f16c8287011bb137d7f3d10758a418c1d7f25ce963d5fea61d4b57de6a9455653d890c4eb236273208daf8700b3bb5d8c7e76dc58f8cf0190b3e132f409030a" } } }, - "txSetResultHash": "f66233c106977a4cc148e019411ff6ddfaf76c337d004ed9a304a70407b161d0", - "bucketListHash": "bf090cb59a5f1fd97d083af9e132128ba69a84998e804d6f02fc34e82c9e4b9e", + "txSetResultHash": "249b974bacf8b5c4a8f0b5598194c1b9eca64af0b5c1506daa871c1533b6baac", + "bucketListHash": "f54692ac02d6c9d4715dc33ee72dd50277c43957c60106f4773e926a572cb20e", "ledgerSeq": 7, "totalCoins": 1000000000000000000, "feePool": 800, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "87fdf0a3595bf4021274bb88fac521cf02060dd961b9fda38879b287a5418cb6", + "previousLedgerHash": "5263ba08cd1c7ea229e165999b8aaf27c9c99a2f25bed68deedf9fd729fb614b", "phases": [ { "v": 0, @@ -188,43 +188,22 @@ "txProcessing": [ { "result": { - "transactionHash": "324d0628e2a215d367f181f0e3aacbaa26fa638e676e73fb9ad26a360314a7b7", + "transactionHash": "0db2322d85e9d8ea2421559922bb6107429650ebdad304c907480853d465c10d", "result": { - "feeCharged": 300, + "feeCharged": 100, "result": { - "code": "txFEE_BUMP_INNER_SUCCESS", - "innerResultPair": { - "transactionHash": "b28c171f9658320b5ce8d50e4e1a36b74afbb2a92eec7df92a8981067131b025", - "result": { - "feeCharged": 200, - "result": { - "code": "txSUCCESS", - "results": [ - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } - } - }, - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } - } - } - ] - }, - "ext": { - "v": 0 + "code": "txSUCCESS", + "results": [ + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } } } - } + ] }, "ext": { "v": 0 @@ -235,13 +214,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 4, + "lastModifiedLedgerSeq": 5, "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 400000000, - "seqNum": 17179869184, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989700, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -249,7 +228,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -265,9 +268,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -275,7 +278,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -299,61 +326,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], - "ext": { - "v": 0 - } - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { - "lastModifiedLedgerSeq": 7, - "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], - "ext": { - "v": 0 - } - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 5, - "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", - "balance": 200010000, - "seqNum": 21474836480, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -361,7 +336,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -377,9 +376,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", - "balance": 200010000, - "seqNum": 21474836481, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 4, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -429,18 +428,43 @@ "state": { "lastModifiedLedgerSeq": 6, "data": { - "type": "TRUSTLINE", - "trustLine": { + "type": "ACCOUNT", + "account": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 0, - "limit": 100, - "flags": 1, + "balance": 399999900, + "seqNum": 12884901889, + "numSubEntries": 1, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 6, + "seqTime": 0 + } + } + } + } + } } } }, @@ -454,18 +478,43 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { + "type": "ACCOUNT", + "account": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 50, - "limit": 100, - "flags": 1, + "balance": 400000900, + "seqNum": 12884901889, + "numSubEntries": 1, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 6, + "seqTime": 0 + } + } + } + } + } } } }, @@ -473,28 +522,49 @@ "v": 0 } } - } - ] - }, - { - "changes": [ + }, { "type": "LEDGER_ENTRY_STATE", "state": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 50, - "limit": 100, - "flags": 1, + "type": "ACCOUNT", + "account": { + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 4, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 7, + "seqTime": 0 + } + } + } + } + } } } }, @@ -508,18 +578,43 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 100, - "limit": 100, - "flags": 1, + "type": "ACCOUNT", + "account": { + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999988600, + "seqNum": 4, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 7, + "seqTime": 0 + } + } + } + } + } } } }, @@ -538,22 +633,43 @@ }, { "result": { - "transactionHash": "0db2322d85e9d8ea2421559922bb6107429650ebdad304c907480853d465c10d", + "transactionHash": "324d0628e2a215d367f181f0e3aacbaa26fa638e676e73fb9ad26a360314a7b7", "result": { - "feeCharged": 100, + "feeCharged": 300, "result": { - "code": "txSUCCESS", - "results": [ - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } + "code": "txFEE_BUMP_INNER_SUCCESS", + "innerResultPair": { + "transactionHash": "b28c171f9658320b5ce8d50e4e1a36b74afbb2a92eec7df92a8981067131b025", + "result": { + "feeCharged": 200, + "result": { + "code": "txSUCCESS", + "results": [ + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } + } + }, + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } + } + } + ] + }, + "ext": { + "v": 0 } } - ] + } }, "ext": { "v": 0 @@ -564,13 +680,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 5, + "lastModifiedLedgerSeq": 4, "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989700, - "seqNum": 3, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 400000000, + "seqNum": 17179869184, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -578,31 +694,7 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -618,9 +710,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 3, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -628,57 +720,85 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, + "v": 0 + } + } + }, + "ext": { + "v": 0 + } + } + } + ], + "txApplyProcessing": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "txChangesBefore": [ + { + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 7, + "data": { + "type": "ACCOUNT", + "account": { + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } + "v": 0 + } + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { + "lastModifiedLedgerSeq": 7, + "data": { + "type": "ACCOUNT", + "account": { + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], + "ext": { + "v": 0 } } + }, + "ext": { + "v": 0 } } }, - "ext": { - "v": 0 - } - } - } - ], - "txApplyProcessing": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "txChangesBefore": [ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 7, + "lastModifiedLedgerSeq": 5, "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 3, + "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", + "balance": 200010000, + "seqNum": 21474836480, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -686,31 +806,7 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -726,9 +822,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 4, + "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", + "balance": 200010000, + "seqNum": 21474836481, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -778,43 +874,18 @@ "state": { "lastModifiedLedgerSeq": 6, "data": { - "type": "ACCOUNT", - "account": { + "type": "TRUSTLINE", + "trustLine": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 399999900, - "seqNum": 12884901889, - "numSubEntries": 1, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 0, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 6, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -828,43 +899,18 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { + "type": "TRUSTLINE", + "trustLine": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 400000900, - "seqNum": 12884901889, - "numSubEntries": 1, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 50, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 6, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -872,49 +918,28 @@ "v": 0 } } - }, + } + ] + }, + { + "changes": [ { "type": "LEDGER_ENTRY_STATE", "state": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 4, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "type": "TRUSTLINE", + "trustLine": { + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 50, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 7, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -928,43 +953,18 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999988600, - "seqNum": 4, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "type": "TRUSTLINE", + "trustLine": { + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 100, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 7, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } },