From 36d75a9a13b5d46ce3f864affd4ae4cad9a35357 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 27 May 2024 18:29:10 +0200 Subject: [PATCH 1/2] Port paged-nmap changes Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 17 +- Cargo.toml | 1 + substrate/frame/paged-list/fuzzer/Cargo.toml | 2 +- .../frame/paged-list/fuzzer/src/paged_list.rs | 8 +- substrate/frame/paged-list/src/lib.rs | 7 +- substrate/frame/paged-list/src/mock.rs | 17 +- substrate/frame/paged-list/src/paged_list.rs | 579 ------------- substrate/frame/paged-list/src/tests.rs | 19 +- substrate/frame/support/fuzzer/Cargo.toml | 30 + .../fuzzer/src/bin/paged_all_in_one.rs | 97 +++ .../support/fuzzer/src/bin/paged_list.rs | 73 ++ .../support/fuzzer/src/bin/paged_nmap.rs | 77 ++ substrate/frame/support/fuzzer/src/lib.rs | 130 +++ .../procedural/src/pallet/expand/storage.rs | 20 + .../procedural/src/pallet/parse/storage.rs | 39 + substrate/frame/support/src/lib.rs | 3 +- substrate/frame/support/src/storage/lists.rs | 141 ++++ substrate/frame/support/src/storage/mod.rs | 81 +- .../frame/support/src/storage/types/mod.rs | 4 + .../support/src/storage/types/paged_list.rs | 472 +++++++++++ .../support/src/storage/types/paged_nmap.rs | 792 ++++++++++++++++++ .../frame/support/test/tests/final_keys.rs | 263 +++++- 22 files changed, 2188 insertions(+), 684 deletions(-) delete mode 100644 substrate/frame/paged-list/src/paged_list.rs create mode 100644 substrate/frame/support/fuzzer/Cargo.toml create mode 100644 substrate/frame/support/fuzzer/src/bin/paged_all_in_one.rs create mode 100644 substrate/frame/support/fuzzer/src/bin/paged_list.rs create mode 100644 substrate/frame/support/fuzzer/src/bin/paged_nmap.rs create mode 100644 substrate/frame/support/fuzzer/src/lib.rs create mode 100644 substrate/frame/support/src/storage/lists.rs create mode 100644 substrate/frame/support/src/storage/types/paged_list.rs create mode 100644 substrate/frame/support/src/storage/types/paged_nmap.rs diff --git a/Cargo.lock b/Cargo.lock index 1767245c6abf..46fb0a3e0612 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5917,6 +5917,17 @@ dependencies = [ "syn 2.0.61", ] +[[package]] +name = "frame-support-storage-fuzzer" +version = "0.1.0" +dependencies = [ + "arbitrary", + "frame-support", + "honggfuzz", + "sp-io", + "sp-metadata-ir", +] + [[package]] name = "frame-support-test" version = "3.0.0" @@ -6592,13 +6603,13 @@ dependencies = [ [[package]] name = "honggfuzz" -version = "0.5.55" +version = "0.5.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848e9c511092e0daa0a35a63e8e6e475a3e8f870741448b9f6028d69b142f18e" +checksum = "7c76b6234c13c9ea73946d1379d33186151148e0da231506b964b44f3d023505" dependencies = [ "arbitrary", "lazy_static", - "memmap2 0.5.10", + "memmap2 0.9.3", "rustc_version 0.4.0", ] diff --git a/Cargo.toml b/Cargo.toml index d6099e420f91..cc4e7de63c25 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -405,6 +405,7 @@ members = [ "substrate/frame/statement", "substrate/frame/sudo", "substrate/frame/support", + "substrate/frame/support/fuzzer", "substrate/frame/support/procedural", "substrate/frame/support/procedural/tools", "substrate/frame/support/procedural/tools/derive", diff --git a/substrate/frame/paged-list/fuzzer/Cargo.toml b/substrate/frame/paged-list/fuzzer/Cargo.toml index 6ff07ba1ddd2..a17968ca307d 100644 --- a/substrate/frame/paged-list/fuzzer/Cargo.toml +++ b/substrate/frame/paged-list/fuzzer/Cargo.toml @@ -18,7 +18,7 @@ path = "src/paged_list.rs" [dependencies] arbitrary = "1.3.2" -honggfuzz = "0.5.49" +honggfuzz = "0.5.56" frame-support = { path = "../../support", default-features = false, features = ["std"] } sp-io = { path = "../../../primitives/io", default-features = false, features = ["std"] } diff --git a/substrate/frame/paged-list/fuzzer/src/paged_list.rs b/substrate/frame/paged-list/fuzzer/src/paged_list.rs index 43b797eee6bf..0362612008fa 100644 --- a/substrate/frame/paged-list/fuzzer/src/paged_list.rs +++ b/substrate/frame/paged-list/fuzzer/src/paged_list.rs @@ -21,7 +21,8 @@ //! //! # Debugging a panic //! Once a panic is found, it can be debugged with -//! `cargo hfuzz run-debug pallet-paged-list hfuzz_workspace/pallet-paged-list/*.fuzz`. +//! `cargo hfuzz run-debug pallet-paged-list-fuzzer +//! hfuzz_workspace/pallet-paged-list-fuzzer/*.fuzz`. //! //! # More information //! More information about `honggfuzz` can be found @@ -47,7 +48,7 @@ fn main() { /// /// It also changes the maximal number of elements per page dynamically, hence the `page_size`. fn drain_append_work(ops: Vec, page_size: u8) { - if page_size == 0 { + if page_size < 4 { return } @@ -61,11 +62,12 @@ fn drain_append_work(ops: Vec, page_size: u8) { assert!(total >= 0); assert_eq!(List::iter().count(), total as usize); + assert_eq!(total as u64, List::len()); // We have the assumption that the queue removes the metadata when empty. if total == 0 { assert_eq!(List::drain().count(), 0); - assert_eq!(Meta::from_storage().unwrap_or_default(), Default::default()); + assert_eq!(Meta::from_storage(((),)).unwrap_or_default(), Default::default()); } } diff --git a/substrate/frame/paged-list/src/lib.rs b/substrate/frame/paged-list/src/lib.rs index ddeed174f34b..ffcc7bef2e9e 100644 --- a/substrate/frame/paged-list/src/lib.rs +++ b/substrate/frame/paged-list/src/lib.rs @@ -66,15 +66,14 @@ pub use pallet::*; pub mod mock; -mod paged_list; mod tests; use codec::FullCodec; use frame_support::{ pallet_prelude::StorageList, + storage::types::StoragePagedList, traits::{PalletInfoAccess, StorageInstance}, }; -pub use paged_list::StoragePagedList; #[frame_support::pallet] pub mod pallet { @@ -111,6 +110,10 @@ impl, I: 'static> StorageList for Pallet { type Iterator = as StorageList>::Iterator; type Appender = as StorageList>::Appender; + fn len() -> u64 { + List::::len() + } + fn iter() -> Self::Iterator { List::::iter() } diff --git a/substrate/frame/paged-list/src/mock.rs b/substrate/frame/paged-list/src/mock.rs index e086b4ba2b27..d014a703db68 100644 --- a/substrate/frame/paged-list/src/mock.rs +++ b/substrate/frame/paged-list/src/mock.rs @@ -19,8 +19,12 @@ #![cfg(feature = "std")] -use crate::{paged_list::StoragePagedListMeta, Config, ListPrefix}; -use frame_support::{derive_impl, traits::ConstU16}; +use crate::{Config, ListPrefix}; +use frame_support::{ + derive_impl, + storage::types::StoragePagedListMeta, + traits::{ConstU16, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -53,14 +57,9 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = ConstU16<42>; - type OnSetCode = (); + type PalletInfo = PalletInfo; // FAIL-CI remove more type MaxConsumers = frame_support::traits::ConstU32<16>; } diff --git a/substrate/frame/paged-list/src/paged_list.rs b/substrate/frame/paged-list/src/paged_list.rs deleted file mode 100644 index eecc728cd62a..000000000000 --- a/substrate/frame/paged-list/src/paged_list.rs +++ /dev/null @@ -1,579 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Paged storage list. - -// links are better than no links - even when they refer to private stuff. -#![allow(rustdoc::private_intra_doc_links)] -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(missing_docs)] -#![deny(unsafe_code)] - -use codec::{Decode, Encode, EncodeLike, FullCodec}; -use core::marker::PhantomData; -use frame_support::{ - defensive, - storage::StoragePrefixedContainer, - traits::{Get, StorageInstance}, - CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, -}; -use sp_runtime::traits::Saturating; -use sp_std::prelude::*; - -pub type PageIndex = u32; -pub type ValueIndex = u32; - -/// A paginated storage list. -/// -/// # Motivation -/// -/// This type replaces `StorageValue>` in situations where only iteration and appending is -/// needed. There are a few places where this is the case. A paginated structure reduces the memory -/// usage when a storage transactions needs to be rolled back. The main motivation is therefore a -/// reduction of runtime memory on storage transaction rollback. Should be configured such that the -/// size of a page is about 64KiB. This can only be ensured when `V` implements `MaxEncodedLen`. -/// -/// # Implementation -/// -/// The metadata of this struct is stored in [`StoragePagedListMeta`]. The data is stored in -/// [`Page`]s. -/// -/// Each [`Page`] holds at most `ValuesPerNewPage` values in its `values` vector. The last page is -/// the only one that could have less than `ValuesPerNewPage` values. -/// **Iteration** happens by starting -/// at [`first_page`][StoragePagedListMeta::first_page]/ -/// [`first_value_offset`][StoragePagedListMeta::first_value_offset] and incrementing these indices -/// as long as there are elements in the page and there are pages in storage. All elements of a page -/// are loaded once a page is read from storage. Iteration then happens on the cached elements. This -/// reduces the number of storage `read` calls on the overlay. **Appending** to the list happens by -/// appending to the last page by utilizing [`sp_io::storage::append`]. It allows to directly extend -/// the elements of `values` vector of the page without loading the whole vector from storage. A new -/// page is instantiated once [`Page::next`] overflows `ValuesPerNewPage`. Its vector will also be -/// created through [`sp_io::storage::append`]. **Draining** advances the internal indices identical -/// to Iteration. It additionally persists the increments to storage and thereby 'drains' elements. -/// Completely drained pages are deleted from storage. -/// -/// # Further Observations -/// -/// - The encoded layout of a page is exactly its [`Page::values`]. The [`Page::next`] offset is -/// stored in the [`StoragePagedListMeta`] instead. There is no particular reason for this, -/// besides having all management state handy in one location. -/// - The PoV complexity of iterating compared to a `StorageValue>` is improved for -/// "shortish" iterations and worse for total iteration. The append complexity is identical in the -/// asymptotic case when using an `Appender`, and worse in all. For example when appending just -/// one value. -/// - It does incur a read overhead on the host side as compared to a `StorageValue>`. -pub struct StoragePagedList { - _phantom: PhantomData<(Prefix, Value, ValuesPerNewPage)>, -} - -/// The state of a [`StoragePagedList`]. -/// -/// This struct doubles as [`frame_support::storage::StorageList::Appender`]. -#[derive( - Encode, Decode, CloneNoBound, PartialEqNoBound, EqNoBound, DebugNoBound, DefaultNoBound, -)] -// todo ignore scale bounds -pub struct StoragePagedListMeta { - /// The first page that could contain a value. - /// - /// Can be >0 when pages were deleted. - pub first_page: PageIndex, - /// The first index inside `first_page` that could contain a value. - /// - /// Can be >0 when values were deleted. - pub first_value_offset: ValueIndex, - - /// The last page that could contain data. - /// - /// Appending starts at this page index. - pub last_page: PageIndex, - /// The last value inside `last_page` that could contain a value. - /// - /// Appending starts at this index. If the page does not hold a value at this index, then the - /// whole list is empty. The only case where this can happen is when both are `0`. - pub last_page_len: ValueIndex, - - _phantom: PhantomData<(Prefix, Value, ValuesPerNewPage)>, -} - -impl frame_support::storage::StorageAppender - for StoragePagedListMeta -where - Prefix: StorageInstance, - Value: FullCodec, - ValuesPerNewPage: Get, -{ - fn append(&mut self, item: EncodeLikeValue) - where - EncodeLikeValue: EncodeLike, - { - self.append_one(item); - } -} - -impl StoragePagedListMeta -where - Prefix: StorageInstance, - Value: FullCodec, - ValuesPerNewPage: Get, -{ - pub fn from_storage() -> Option { - let key = Self::key(); - - sp_io::storage::get(&key).and_then(|raw| Self::decode(&mut &raw[..]).ok()) - } - - pub fn key() -> Vec { - meta_key::() - } - - pub fn append_one(&mut self, item: EncodeLikeValue) - where - EncodeLikeValue: EncodeLike, - { - // Note: we use >= here in case someone decreased it in a runtime upgrade. - if self.last_page_len >= ValuesPerNewPage::get() { - self.last_page.saturating_inc(); - self.last_page_len = 0; - } - let key = page_key::(self.last_page); - self.last_page_len.saturating_inc(); - sp_io::storage::append(&key, item.encode()); - self.store(); - } - - pub fn store(&self) { - let key = Self::key(); - self.using_encoded(|enc| sp_io::storage::set(&key, enc)); - } - - pub fn reset(&mut self) { - *self = Default::default(); - Self::delete(); - } - - pub fn delete() { - sp_io::storage::clear(&Self::key()); - } -} - -/// A page that was decoded from storage and caches its values. -pub struct Page { - /// The index of the page. - index: PageIndex, - /// The remaining values of the page, to be drained by [`Page::next`]. - values: sp_std::iter::Skip>, -} - -impl Page { - /// Read the page with `index` from storage and assume the first value at `value_index`. - pub fn from_storage( - index: PageIndex, - value_index: ValueIndex, - ) -> Option { - let key = page_key::(index); - let values = sp_io::storage::get(&key) - .and_then(|raw| sp_std::vec::Vec::::decode(&mut &raw[..]).ok())?; - if values.is_empty() { - // Don't create empty pages. - return None - } - let values = values.into_iter().skip(value_index as usize); - - Some(Self { index, values }) - } - - /// Whether no more values can be read from this page. - pub fn is_eof(&self) -> bool { - self.values.len() == 0 - } - - /// Delete this page from storage. - pub fn delete(&self) { - delete_page::(self.index); - } -} - -/// Delete a page with `index` from storage. -// Does not live under `Page` since it does not require the `Value` generic. -pub(crate) fn delete_page(index: PageIndex) { - let key = page_key::(index); - sp_io::storage::clear(&key); -} - -/// Storage key of a page with `index`. -// Does not live under `Page` since it does not require the `Value` generic. -pub(crate) fn page_key(index: PageIndex) -> Vec { - (StoragePagedListPrefix::::final_prefix(), b"page", index).encode() -} - -pub(crate) fn meta_key() -> Vec { - (StoragePagedListPrefix::::final_prefix(), b"meta").encode() -} - -impl Iterator for Page { - type Item = V; - - fn next(&mut self) -> Option { - self.values.next() - } -} - -/// Iterates over values of a [`StoragePagedList`]. -/// -/// Can optionally drain the iterated values. -pub struct StoragePagedListIterator { - // Design: we put the Page into the iterator to have fewer storage look-ups. Yes, these - // look-ups would be cached anyway, but bugging the overlay on each `.next` call still seems - // like a poor trade-off than caching it in the iterator directly. Iterating and modifying is - // not allowed at the same time anyway, just like with maps. Note: if Page is empty then - // the iterator did not find any data upon setup or ran out of pages. - page: Option>, - drain: bool, - meta: StoragePagedListMeta, -} - -impl StoragePagedListIterator -where - Prefix: StorageInstance, - Value: FullCodec, - ValuesPerNewPage: Get, -{ - /// Read self from the storage. - pub fn from_meta( - meta: StoragePagedListMeta, - drain: bool, - ) -> Self { - let page = Page::::from_storage::(meta.first_page, meta.first_value_offset); - Self { page, drain, meta } - } -} - -impl Iterator - for StoragePagedListIterator -where - Prefix: StorageInstance, - Value: FullCodec, - ValuesPerNewPage: Get, -{ - type Item = Value; - - fn next(&mut self) -> Option { - let page = self.page.as_mut()?; - let value = match page.next() { - Some(value) => value, - None => { - defensive!("There are no empty pages in storage; nuking the list"); - self.meta.reset(); - self.page = None; - return None - }, - }; - - if page.is_eof() { - if self.drain { - page.delete::(); - self.meta.first_value_offset = 0; - self.meta.first_page.saturating_inc(); - } - - debug_assert!(!self.drain || self.meta.first_page == page.index + 1); - self.page = Page::from_storage::(page.index.saturating_add(1), 0); - if self.drain { - if self.page.is_none() { - self.meta.reset(); - } else { - self.meta.store(); - } - } - } else { - if self.drain { - self.meta.first_value_offset.saturating_inc(); - self.meta.store(); - } - } - Some(value) - } -} - -impl frame_support::storage::StorageList - for StoragePagedList -where - Prefix: StorageInstance, - Value: FullCodec, - ValuesPerNewPage: Get, -{ - type Iterator = StoragePagedListIterator; - type Appender = StoragePagedListMeta; - - fn iter() -> Self::Iterator { - StoragePagedListIterator::from_meta(Self::read_meta(), false) - } - - fn drain() -> Self::Iterator { - StoragePagedListIterator::from_meta(Self::read_meta(), true) - } - - fn appender() -> Self::Appender { - Self::appender() - } -} - -impl StoragePagedList -where - Prefix: StorageInstance, - Value: FullCodec, - ValuesPerNewPage: Get, -{ - fn read_meta() -> StoragePagedListMeta { - // Use default here to not require a setup migration. - StoragePagedListMeta::from_storage().unwrap_or_default() - } - - /// Provides a fast append iterator. - /// - /// The list should not be modified while appending. Also don't call it recursively. - fn appender() -> StoragePagedListMeta { - Self::read_meta() - } - - /// Return the elements of the list. - #[cfg(test)] - fn as_vec() -> Vec { - >::iter().collect() - } - - /// Return and remove the elements of the list. - #[cfg(test)] - fn as_drained_vec() -> Vec { - >::drain().collect() - } -} - -/// Provides the final prefix for a [`StoragePagedList`]. -/// -/// It solely exists so that when re-using it from the iterator and meta struct, none of the un-used -/// generics bleed through. Otherwise when only having the `StoragePrefixedContainer` implementation -/// on the list directly, the iterator and metadata need to muster *all* generics, even the ones -/// that are completely useless for prefix calculation. -struct StoragePagedListPrefix(PhantomData); - -impl StoragePrefixedContainer for StoragePagedListPrefix -where - Prefix: StorageInstance, -{ - fn pallet_prefix() -> &'static [u8] { - Prefix::pallet_prefix().as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - Prefix::STORAGE_PREFIX.as_bytes() - } -} - -impl StoragePrefixedContainer - for StoragePagedList -where - Prefix: StorageInstance, - Value: FullCodec, - ValuesPerNewPage: Get, -{ - fn pallet_prefix() -> &'static [u8] { - StoragePagedListPrefix::::pallet_prefix() - } - - fn storage_prefix() -> &'static [u8] { - StoragePagedListPrefix::::storage_prefix() - } -} - -/// Prelude for (doc)tests. -#[cfg(feature = "std")] -#[allow(dead_code)] -pub(crate) mod mock { - pub use super::*; - pub use frame_support::parameter_types; - #[cfg(test)] - pub use frame_support::{storage::StorageList as _, StorageNoopGuard}; - #[cfg(test)] - pub use sp_io::TestExternalities; - - parameter_types! { - pub const ValuesPerNewPage: u32 = 5; - pub const MaxPages: Option = Some(20); - } - - pub struct Prefix; - impl StorageInstance for Prefix { - fn pallet_prefix() -> &'static str { - "test" - } - const STORAGE_PREFIX: &'static str = "foo"; - } - - pub type List = StoragePagedList; -} - -#[cfg(test)] -mod tests { - use super::mock::*; - - #[test] - fn append_works() { - TestExternalities::default().execute_with(|| { - List::append_many(0..1000); - assert_eq!(List::as_vec(), (0..1000).collect::>()); - }); - } - - /// Draining all works. - #[test] - fn simple_drain_works() { - TestExternalities::default().execute_with(|| { - let _g = StorageNoopGuard::default(); // All in all a No-Op - List::append_many(0..1000); - - assert_eq!(List::as_drained_vec(), (0..1000).collect::>()); - - assert_eq!(List::read_meta(), Default::default()); - - // all gone - assert_eq!(List::as_vec(), Vec::::new()); - // Need to delete the metadata manually. - StoragePagedListMeta::::delete(); - }); - } - - /// Drain half of the elements and iterator the rest. - #[test] - fn partial_drain_works() { - TestExternalities::default().execute_with(|| { - List::append_many(0..100); - - let vals = List::drain().take(50).collect::>(); - assert_eq!(vals, (0..50).collect::>()); - - let meta = List::read_meta(); - // Will switch over to `10/0`, but will in the next call. - assert_eq!((meta.first_page, meta.first_value_offset), (10, 0)); - - // 50 gone, 50 to go - assert_eq!(List::as_vec(), (50..100).collect::>()); - }); - } - - /// Draining, appending and iterating work together. - #[test] - fn drain_append_iter_works() { - TestExternalities::default().execute_with(|| { - for r in 1..=100 { - List::append_many(0..12); - List::append_many(0..12); - - let dropped = List::drain().take(12).collect::>(); - assert_eq!(dropped, (0..12).collect::>()); - - assert_eq!(List::as_vec(), (0..12).cycle().take(r * 12).collect::>()); - } - }); - } - - /// Pages are removed ASAP. - #[test] - fn drain_eager_page_removal() { - TestExternalities::default().execute_with(|| { - List::append_many(0..9); - - assert!(sp_io::storage::exists(&page_key::(0))); - assert!(sp_io::storage::exists(&page_key::(1))); - - assert_eq!(List::drain().take(5).count(), 5); - // Page 0 is eagerly removed. - assert!(!sp_io::storage::exists(&page_key::(0))); - assert!(sp_io::storage::exists(&page_key::(1))); - }); - } - - /// Appending encodes pages as `Vec`. - #[test] - fn append_storage_layout() { - TestExternalities::default().execute_with(|| { - List::append_many(0..9); - - let key = page_key::(0); - let raw = sp_io::storage::get(&key).expect("Page should be present"); - let as_vec = Vec::::decode(&mut &raw[..]).unwrap(); - assert_eq!(as_vec.len(), 5, "First page contains 5"); - - let key = page_key::(1); - let raw = sp_io::storage::get(&key).expect("Page should be present"); - let as_vec = Vec::::decode(&mut &raw[..]).unwrap(); - assert_eq!(as_vec.len(), 4, "Second page contains 4"); - - let meta = sp_io::storage::get(&meta_key::()).expect("Meta should be present"); - let meta: StoragePagedListMeta = - Decode::decode(&mut &meta[..]).unwrap(); - assert_eq!(meta.first_page, 0); - assert_eq!(meta.first_value_offset, 0); - assert_eq!(meta.last_page, 1); - assert_eq!(meta.last_page_len, 4); - - let page = Page::::from_storage::(0, 0).unwrap(); - assert_eq!(page.index, 0); - assert_eq!(page.values.count(), 5); - - let page = Page::::from_storage::(1, 0).unwrap(); - assert_eq!(page.index, 1); - assert_eq!(page.values.count(), 4); - }); - } - - #[test] - fn page_key_correct() { - let got = page_key::(0); - let pallet_prefix = StoragePagedListPrefix::::final_prefix(); - let want = (pallet_prefix, b"page", 0).encode(); - - assert_eq!(want.len(), 32 + 4 + 4); - assert!(want.starts_with(&pallet_prefix[..])); - assert_eq!(got, want); - } - - #[test] - fn meta_key_correct() { - let got = meta_key::(); - let pallet_prefix = StoragePagedListPrefix::::final_prefix(); - let want = (pallet_prefix, b"meta").encode(); - - assert_eq!(want.len(), 32 + 4); - assert!(want.starts_with(&pallet_prefix[..])); - assert_eq!(got, want); - } - - #[test] - fn peekable_drain_also_deletes() { - TestExternalities::default().execute_with(|| { - List::append_many(0..10); - - let mut iter = List::drain().peekable(); - assert_eq!(iter.peek(), Some(&0)); - // `peek` does remove one element... - assert_eq!(List::iter().count(), 9); - }); - } -} diff --git a/substrate/frame/paged-list/src/tests.rs b/substrate/frame/paged-list/src/tests.rs index becb4b23508e..f1a2b3c7d923 100644 --- a/substrate/frame/paged-list/src/tests.rs +++ b/substrate/frame/paged-list/src/tests.rs @@ -46,7 +46,7 @@ fn append_many_works() { #[docify::export] #[test] fn appender_works() { - use frame_support::storage::StorageAppender; + use frame_support::storage::StorageListAppender; test_closure(|| { let mut appender = PagedList::appender(); @@ -86,23 +86,24 @@ fn drain_works() { #[test] fn iter_independent_works() { test_closure(|| { - PagedList::append_many(0..1000); - PagedList2::append_many(0..1000); + PagedList::append_many(0..100); + PagedList2::append_many(0..200); - assert_eq!(PagedList::iter().collect::>(), (0..1000).collect::>()); - assert_eq!(PagedList::iter().collect::>(), (0..1000).collect::>()); + assert_eq!(PagedList::iter().collect::>(), (0..100).collect::>()); + assert_eq!(PagedList2::iter().collect::>(), (0..200).collect::>()); // drain - assert_eq!(PagedList::drain().collect::>(), (0..1000).collect::>()); - assert_eq!(PagedList2::iter().collect::>(), (0..1000).collect::>()); + assert_eq!(PagedList::drain().collect::>(), (0..100).collect::>()); + assert_eq!(PagedList2::drain().collect::>(), (0..200).collect::>()); assert_eq!(PagedList::iter().count(), 0); + assert_eq!(PagedList2::iter().count(), 0); }); } #[test] fn prefix_distinct() { - let p1 = List::::final_prefix(); - let p2 = List::::final_prefix(); + let p1 = MetaOf::::storage_key(((),)); + let p2 = MetaOf::::storage_key(((),)); assert_ne!(p1, p2); } diff --git a/substrate/frame/support/fuzzer/Cargo.toml b/substrate/frame/support/fuzzer/Cargo.toml new file mode 100644 index 000000000000..9b1ebfd2781d --- /dev/null +++ b/substrate/frame/support/fuzzer/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "frame-support-storage-fuzzer" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Fuzz storage types of frame-support" +publish = false + +[[bin]] +name = "paged-storage-list-fuzzer" +path = "src/bin/paged_list.rs" + +[[bin]] +name = "paged-storage-nmap-fuzzer" +path = "src/bin/paged_nmap.rs" + +[[bin]] +name = "paged-storage-all-in-one-fuzzer" +path = "src/bin/paged_all_in_one.rs" + +[dependencies] +arbitrary = "1.3.0" +honggfuzz = "0.5.49" + +frame-support = { features = [ "std" ], path = "../../support" } +sp-io = { path = "../../../primitives/io", default-features = false, features = [ "std" ] } +sp-metadata-ir = { path = "../../../primitives/metadata-ir", default-features = false, features = [ "std" ] } diff --git a/substrate/frame/support/fuzzer/src/bin/paged_all_in_one.rs b/substrate/frame/support/fuzzer/src/bin/paged_all_in_one.rs new file mode 100644 index 000000000000..0b4980bcc55f --- /dev/null +++ b/substrate/frame/support/fuzzer/src/bin/paged_all_in_one.rs @@ -0,0 +1,97 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Fuzzer to test all paged data structures in one go to ensure that they won't interfere with +//! each other. +//! +//! # Running +//! Running this fuzzer can be done with `cd substrate/frame/support/fuzzer && cargo hfuzz run +//! paged-storage-all-in-one-fuzzer`. `honggfuzz` CLI options can be used by setting +//! `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! +//! # More information +//! More information about `honggfuzz` can be found +//! [here](https://docs.rs/honggfuzz/). + +use frame_support::{storage::StorageKeyedList, StorageNoopGuard}; +use honggfuzz::fuzz; +use std::collections::BTreeMap; + +use frame_support_storage_fuzzer::*; + +fn main() { + loop { + fuzz!(|data: (Vec, u16)| { + drain_append_work(data.0, data.1); + }); + } +} + +/// Appends and drains random number of elements in random order and checks storage invariants. +/// +/// It also changes the maximal number of elements per page dynamically, hence the `page_size`. +fn drain_append_work(ops: Vec, page_size: u16) { + if page_size < 4 { + return + } + + TestExternalities::default().execute_with(|| { + // Changing the heapsize should be fine at any point in time - even to non-multiple of 4, + HeapSize::set(&page_size.into()); + + let _g = StorageNoopGuard::default(); + let mut map_totals: BTreeMap = BTreeMap::new(); + let mut list_total = 0i64; + + for op in ops.into_iter() { + match op { + AllInOneOp::NMap(op) => { + let total = map_totals.entry(op.key).or_insert(0); + *total += op.op.exec_map(op.key); + + assert!(*total >= 0); + assert_eq!(NMap::iter((op.key,)).count(), *total as usize); + assert_eq!(*total as u64, NMap::len((op.key,))); + + // We have the assumption that the queue removes the metadata when empty. + if *total == 0 { + assert_eq!(NMap::drain((op.key,)).count(), 0); + assert_eq!(NMap::meta((op.key,)), Default::default()); + } + }, + AllInOneOp::List(op) => { + list_total += op.exec_list::(); + + assert!(list_total >= 0); + assert_eq!(List::iter().count(), list_total as usize); + assert_eq!(list_total as u64, List::len()); + + if list_total == 0 { + assert_eq!(List::drain().count(), 0); + assert_eq!(List::meta(), Default::default()); + } + }, + } + } + + for (key, total) in map_totals { + assert_eq!(NMap::drain((key,)).count(), total as usize); + } + assert_eq!(List::drain().count(), list_total as usize); + // `StorageNoopGuard` checks that there is no storage leaked. + }); +} diff --git a/substrate/frame/support/fuzzer/src/bin/paged_list.rs b/substrate/frame/support/fuzzer/src/bin/paged_list.rs new file mode 100644 index 000000000000..466a4961c748 --- /dev/null +++ b/substrate/frame/support/fuzzer/src/bin/paged_list.rs @@ -0,0 +1,73 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Running +//! Running this fuzzer can be done with `cd substrate/frame/support/fuzzer && cargo hfuzz run +//! paged-storage-list-fuzzer`. `honggfuzz` CLI options can be used by setting `HFUZZ_RUN_ARGS`, +//! such as `-n 4` to use 4 threads. +//! +//! # More information +//! More information about `honggfuzz` can be found +//! [here](https://docs.rs/honggfuzz/). + +use frame_support::StorageNoopGuard; +use honggfuzz::fuzz; + +use frame_support_storage_fuzzer::*; + +fn main() { + loop { + fuzz!(|data: (Vec, u8)| { + drain_append_work(data.0, data.1); + }); + } +} + +/// Appends and drains random number of elements in random order and checks storage invariants. +/// +/// It also changes the maximal number of elements per page dynamically, hence the `page_size`. +fn drain_append_work(ops: Vec, page_size: u8) { + if page_size < 4 { + return + } + + TestExternalities::default().execute_with(|| { + // We change the `HeapSize` to demonstrate that it can cope with arbitrary changes at any + // time: + HeapSize::set(&page_size.into()); + + let _g = StorageNoopGuard::default(); + let mut total: i64 = 0; + + for op in ops.into_iter() { + total += op.exec_list::(); + + assert!(total >= 0); + assert_eq!(List::iter().count(), total as usize); + assert_eq!(List::len(), total as u64); + + // We have the assumption that the queue removes the metadata when empty. + if total == 0 { + assert_eq!(List::drain().count(), 0); + assert_eq!(List::meta(), Default::default()); + } + } + + assert_eq!(List::drain().count(), total as usize); + // `StorageNoopGuard` checks that there is no storage leaked. + }); +} diff --git a/substrate/frame/support/fuzzer/src/bin/paged_nmap.rs b/substrate/frame/support/fuzzer/src/bin/paged_nmap.rs new file mode 100644 index 000000000000..f5863cb5ec62 --- /dev/null +++ b/substrate/frame/support/fuzzer/src/bin/paged_nmap.rs @@ -0,0 +1,77 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Running +//! Running this fuzzer can be done with `cd substrate/frame/support/fuzzer && cargo hfuzz run +//! paged-storage-nmap-fuzzer`. `honggfuzz` CLI options can be used by setting `HFUZZ_RUN_ARGS`, +//! such as `-n 4` to use 4 threads. +//! +//! # More information +//! More information about `honggfuzz` can be found +//! [here](https://docs.rs/honggfuzz/). + +use frame_support::{storage::StorageKeyedList, StorageNoopGuard}; +use honggfuzz::fuzz; +use std::collections::BTreeMap; + +use frame_support_storage_fuzzer::*; + +fn main() { + loop { + fuzz!(|data: (Vec, u8)| { + drain_append_work(data.0, data.1); + }); + } +} + +/// Appends and drains random number of elements in random order and checks storage invariants. +/// +/// It also changes the maximal number of elements per page dynamically, hence the `page_size`. +fn drain_append_work(ops: Vec, page_size: u8) { + if page_size < 4 { + return + } + + TestExternalities::default().execute_with(|| { + // We change the `HeapSize` to demonstrate that it can cope with arbitrary changes at any + // time: + HeapSize::set(&page_size.into()); + + let _g = StorageNoopGuard::default(); + let mut totals: BTreeMap = BTreeMap::new(); + + for op in ops.into_iter() { + let total = totals.entry(op.key).or_insert(0); + *total += op.op.exec_map(op.key); + + assert!(*total >= 0); + assert_eq!(NMap::iter((op.key,)).count(), *total as usize); + assert_eq!(NMap::len((op.key,)), *total as u64); + + // The queue removes the metadata when empty. + if *total == 0 { + assert_eq!(NMap::drain((op.key,)).count(), 0); + assert_eq!(NMap::meta((op.key,)), Default::default()); + } + } + + for (key, total) in totals { + assert_eq!(NMap::drain((key,)).count(), total as usize); + } + // `StorageNoopGuard` checks that there is no storage leaked. + }); +} diff --git a/substrate/frame/support/fuzzer/src/lib.rs b/substrate/frame/support/fuzzer/src/lib.rs new file mode 100644 index 000000000000..80ae2a0be0fd --- /dev/null +++ b/substrate/frame/support/fuzzer/src/lib.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Shared code of the fuzzers. +//! +//! This provides the arbitrary types that will be randomly generated by the fuzzing framework. + +pub use frame_support::{ + pallet_prelude::NMapKey, + parameter_types, + storage::{types::ValueQuery, StorageList as _}, + Blake2_128Concat, +}; +use frame_support::{ + pallet_prelude::{StorageList, StoragePagedNMap}, + storage::{types::StoragePagedList, StorageKeyedList}, + traits::StorageInstance, +}; +pub use sp_io::{hashing::twox_128, TestExternalities}; + +/// What storage operation to do. +pub enum Op { + /// Append some values. + Append(Vec), + /// Try to drain as many items as possible, limited by the inner value. + Drain(u8), +} + +impl arbitrary::Arbitrary<'_> for Op { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + if u.arbitrary::()? { + Ok(Op::Append(Vec::::arbitrary(u)?)) + } else { + Ok(Op::Drain(u.arbitrary::()?)) + } + } +} + +impl Op { + /// Execute the operation on a List. + pub fn exec_list>(self) -> i64 { + match self { + Op::Append(v) => { + let l = v.len(); + List::append_many(v); + l as i64 + }, + Op::Drain(v) => -(List::drain().take(v as usize).count() as i64), + } + } + + /// Execute the operation on a Map. NOTE: The map is currently hard-coded. + pub fn exec_map(self, key: u32) -> i64 { + match self { + Op::Append(v) => { + let l = v.len(); + NMap::append_many((key,), v); + l as i64 + }, + Op::Drain(v) => -(NMap::drain((key,)).take(v as usize).count() as i64), + } + } +} + +/// An `Op` that also needs a key to function. For example when using it with a map. +pub struct KeyedOp { + /// The operation to do. + pub op: Op, + /// On what key the operation should be done. + pub key: u32, +} + +impl arbitrary::Arbitrary<'_> for KeyedOp { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(KeyedOp { op: Op::arbitrary(u)?, key: u.arbitrary::()? }) + } +} + +/// Aggregated version that can be used to fuzz a map and a list at the same time. +pub enum AllInOneOp { + /// Do a list operation. + List(Op), + /// Do an operation on a key (map). + NMap(KeyedOp), +} + +impl arbitrary::Arbitrary<'_> for AllInOneOp { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + if u.arbitrary::()? { + Ok(AllInOneOp::List(Op::arbitrary(u)?)) + } else { + Ok(AllInOneOp::NMap(KeyedOp::arbitrary(u)?)) + } + } +} + +parameter_types! { + /// Changeable heap size that will be used for the list and map. + pub storage HeapSize: u32 = 20; +} + +pub struct Prefix; +impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "test" + } + + const STORAGE_PREFIX: &'static str = "foo"; +} + +// NOTE: We use the *same* prefix for the list and the map. This should **never** be done by a +// pallet. We just do it here to show that the storage key derivations of a List and an NMap do not +// collide. + +pub type List = StoragePagedList; +pub type NMap = StoragePagedNMap,), u32, HeapSize>; diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs index 3cc8a843e3b1..0ccb64a16ce5 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs @@ -196,6 +196,14 @@ pub fn process_generics(def: &mut Def) -> syn::Result { + args.args.push(syn::GenericArgument::Type(value.clone())); + let heap_size = heap_size.unwrap_or_else(|| default_max_values.clone()); + args.args.push(syn::GenericArgument::Type(heap_size)); + let max_pages = max_pages.unwrap_or_else(|| default_max_values.clone()); + args.args.push(syn::GenericArgument::Type(max_pages)); + }, StorageGenerics::Map { hasher, key, value, query_kind, on_empty, max_values } | StorageGenerics::CountedMap { hasher, @@ -263,6 +271,7 @@ pub fn process_generics(def: &mut Def) -> syn::Result (1, 2, 3), + Metadata::PagedList { .. } => (1, 2, 3), // FAIL-CI Metadata::NMap { .. } | Metadata::CountedNMap { .. } => (2, 3, 4), Metadata::Map { .. } | Metadata::CountedMap { .. } => (3, 4, 5), Metadata::DoubleMap { .. } => (5, 6, 7), @@ -337,6 +346,14 @@ fn augment_final_docs(def: &mut Def) { ); push_string_literal(&doc_line, storage); }, + // FAIL-CI + Metadata::PagedList { value } => { + let doc_line = format!( + "Storage type is [`StoragePagedList`] with value type `{}`.", + value.to_token_stream() + ); + push_string_literal(&doc_line, storage); + }, Metadata::DoubleMap { key1, key2, value } => { let doc_line = format!( "Storage type is [`StorageDoubleMap`] with key1 type {}, key2 type {} and value type {}.", @@ -506,6 +523,9 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { } ) }, + Metadata::PagedList { .. } => { + unreachable!("Getters are forbidden for storage type 'PagedList'.") // FAIL-CI + }, Metadata::CountedMap { key, value } => { let query = match storage.query_kind.as_ref().expect("Checked by def") { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => diff --git a/substrate/frame/support/procedural/src/pallet/parse/storage.rs b/substrate/frame/support/procedural/src/pallet/parse/storage.rs index 9d96a18b5694..476e1bc626ab 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/storage.rs @@ -152,6 +152,7 @@ impl PalletStorageAttrInfo { pub enum Metadata { Value { value: syn::Type }, Map { value: syn::Type, key: syn::Type }, + PagedList { value: syn::Type }, CountedMap { value: syn::Type, key: syn::Type }, DoubleMap { value: syn::Type, key1: syn::Type, key2: syn::Type }, NMap { keys: Vec, keygen: syn::Type, value: syn::Type }, @@ -230,6 +231,12 @@ pub enum StorageGenerics { on_empty: Option, max_values: Option, }, + // FAIL-CI + PagedList { + value: syn::Type, + heap_size: Option, + max_pages: Option, + }, CountedMap { hasher: syn::Type, key: syn::Type, @@ -265,6 +272,7 @@ impl StorageGenerics { let res = match self.clone() { Self::DoubleMap { value, key1, key2, .. } => Metadata::DoubleMap { value, key1, key2 }, Self::Map { value, key, .. } => Metadata::Map { value, key }, + Self::PagedList { value, .. } => Metadata::PagedList { value }, Self::CountedMap { value, key, .. } => Metadata::CountedMap { value, key }, Self::Value { value, .. } => Metadata::Value { value }, Self::NMap { keygen, value, .. } => @@ -285,6 +293,8 @@ impl StorageGenerics { Self::Value { query_kind, .. } | Self::NMap { query_kind, .. } | Self::CountedNMap { query_kind, .. } => query_kind.clone(), + // A list cannot be queried - only iterated. + Self::PagedList { .. } => None, } } } @@ -292,6 +302,7 @@ impl StorageGenerics { enum StorageKind { Value, Map, + PagedList, CountedMap, DoubleMap, NMap, @@ -432,6 +443,25 @@ fn process_named_generics( max_values: parsed.remove("MaxValues").map(|binding| binding.ty), } }, + StorageKind::PagedList => { + check_generics( + &parsed, + &map_mandatory_generics, + &map_optional_generics, + "StoragePagedList", + args_span, + )?; + + StorageGenerics::PagedList { + // FAIL-CI + value: parsed + .remove("Value") + .map(|binding| binding.ty) + .expect("checked above as mandatory generic"), + heap_size: parsed.remove("HeapSize").map(|binding| binding.ty), + max_pages: parsed.remove("MaxPagex").map(|binding| binding.ty), + } + }, StorageKind::CountedMap => { check_generics( &parsed, @@ -606,6 +636,13 @@ fn process_unnamed_generics( retrieve_arg(4).ok(), use_default_hasher(1)?, ), + StorageKind::PagedList => ( + // FAIL-CI double check + None, + Metadata::PagedList { value: retrieve_arg(1)? }, + None, + true, + ), StorageKind::CountedMap => ( None, Metadata::CountedMap { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, @@ -655,6 +692,8 @@ fn process_generics( let storage_kind = match &*segment.ident.to_string() { "StorageValue" => StorageKind::Value, "StorageMap" => StorageKind::Map, + "StoragePagedList" => StorageKind::PagedList, + "StoragePagedNMap" => StorageKind::PagedList, "CountedStorageMap" => StorageKind::CountedMap, "StorageDoubleMap" => StorageKind::DoubleMap, "StorageNMap" => StorageKind::NMap, diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 7eddea1259d7..06c314d58536 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -889,7 +889,8 @@ pub mod pallet_prelude { bounded_vec::BoundedVec, types::{ CountedStorageMap, CountedStorageNMap, Key as NMapKey, OptionQuery, ResultQuery, - StorageDoubleMap, StorageMap, StorageNMap, StorageValue, ValueQuery, + StorageDoubleMap, StorageMap, StorageNMap, StoragePagedList, StoragePagedNMap, + StorageValue, ValueQuery, }, weak_bounded_vec::WeakBoundedVec, StorageList, diff --git a/substrate/frame/support/src/storage/lists.rs b/substrate/frame/support/src/storage/lists.rs new file mode 100644 index 000000000000..632b66ad0a34 --- /dev/null +++ b/substrate/frame/support/src/storage/lists.rs @@ -0,0 +1,141 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and types for non-continous storage types (lists). + +use codec::{EncodeLike, FullCodec}; + +/// A non-continuous container type that can only be iterated. +pub trait StorageList { + /// Iterator for normal and draining iteration. + type Iterator: Iterator; + + /// Append iterator for fast append operations. + type Appender: StorageListAppender; + + /// Number of elements in the list. + fn len() -> u64; + + /// List the elements in append order. + fn iter() -> Self::Iterator; + + /// Drain the elements in append order. + /// + /// Note that this drains a value as soon as it is being inspected. For example `take_while(|_| + /// false)` still drains the first element. This also applies to `peek()`. + fn drain() -> Self::Iterator; + + /// A fast append iterator. + fn appender() -> Self::Appender; + + /// Append a single element. + /// + /// Should not be called repeatedly; use `append_many` instead. + /// Worst case linear `O(len)` with `len` being the number if elements in the list. + fn append_one(item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike, + { + Self::append_many(core::iter::once(item)); + } + + /// Append many elements. + /// + /// Should not be called repeatedly; use `appender` instead. + /// Worst case linear `O(len + items.count())` with `len` beings the number if elements in the + /// list. + fn append_many(items: I) + where + EncodeLikeValue: EncodeLike, + I: IntoIterator, + { + let mut ap = Self::appender(); + ap.append_many(items); + } +} + +/// A non-continuous container type with a key that can only be iterated. +pub trait StorageKeyedList { + /// Iterator for normal and draining iteration. + type Iterator: Iterator; + + /// Append iterator for fast append operations. + type Appender: StorageListAppender; + + /// Number of elements in the list. + fn len(key: K) -> u64; + + /// List the elements in append order. + fn iter(key: K) -> Self::Iterator; + + /// Drain the elements in append order. + /// + /// Note that this drains a value as soon as it is being inspected. For example `take_while(|_| + /// false)` still drains the first element. This also applies to `peek()`. + fn drain(key: K) -> Self::Iterator; + + /// A fast append iterator. + fn appender(key: K) -> Self::Appender; + + /// Append a single element. + /// + /// Should not be called repeatedly; use `append_many` instead. + /// Worst case linear `O(len)` with `len` being the number if elements in the list. + fn append_one(key: K, item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike, + { + Self::append_many(key, core::iter::once(item)); + } + + /// Append many elements. + /// + /// Should not be called repeatedly; use `appender` instead. + /// Worst case linear `O(len + items.count())` with `len` beings the number if elements in the + /// list. + fn append_many(key: K, items: I) + where + EncodeLikeValue: EncodeLike, + I: IntoIterator, + { + let mut ap = Self::appender(key); + ap.append_many(items); + } +} + +/// Append iterator to append values to a storage struct. +/// +/// Can be used in situations where appending does not have constant time complexity. +pub trait StorageListAppender { + /// Append a single item in constant time `O(1)`. + fn append(&mut self, item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike; + + /// Append many items in linear time `O(items.count())`. + // Note: a default impl is provided since `Self` is already assumed to be optimal for single + // append operations. + fn append_many(&mut self, items: I) + where + EncodeLikeValue: EncodeLike, + I: IntoIterator, + { + for item in items.into_iter() { + self.append(item); + } + } +} diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index f7d7447482d0..f30a3a506106 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -46,6 +46,7 @@ pub mod child; #[doc(hidden)] pub mod generator; pub mod hashed; +pub mod lists; pub mod migration; pub mod storage_noop_guard; mod stream_iter; @@ -54,6 +55,8 @@ pub mod types; pub mod unhashed; pub mod weak_bounded_vec; +pub use lists::*; + /// Utility type for converting a storage map into a `Get` impl which returns the maximum /// key size. pub struct KeyLenOf(PhantomData); @@ -159,7 +162,7 @@ pub trait StorageValue { /// /// # Warning /// - /// `None` does not mean that `get()` does not return a value. The default value is completely + /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. fn decode_len() -> Option where @@ -195,75 +198,6 @@ pub trait StorageValue { } } -/// A non-continuous container type. -pub trait StorageList { - /// Iterator for normal and draining iteration. - type Iterator: Iterator; - - /// Append iterator for fast append operations. - type Appender: StorageAppender; - - /// List the elements in append order. - fn iter() -> Self::Iterator; - - /// Drain the elements in append order. - /// - /// Note that this drains a value as soon as it is being inspected. For example `take_while(|_| - /// false)` still drains the first element. This also applies to `peek()`. - fn drain() -> Self::Iterator; - - /// A fast append iterator. - fn appender() -> Self::Appender; - - /// Append a single element. - /// - /// Should not be called repeatedly; use `append_many` instead. - /// Worst case linear `O(len)` with `len` being the number if elements in the list. - fn append_one(item: EncodeLikeValue) - where - EncodeLikeValue: EncodeLike, - { - Self::append_many(core::iter::once(item)); - } - - /// Append many elements. - /// - /// Should not be called repeatedly; use `appender` instead. - /// Worst case linear `O(len + items.count())` with `len` beings the number if elements in the - /// list. - fn append_many(items: I) - where - EncodeLikeValue: EncodeLike, - I: IntoIterator, - { - let mut ap = Self::appender(); - ap.append_many(items); - } -} - -/// Append iterator to append values to a storage struct. -/// -/// Can be used in situations where appending does not have constant time complexity. -pub trait StorageAppender { - /// Append a single item in constant time `O(1)`. - fn append(&mut self, item: EncodeLikeValue) - where - EncodeLikeValue: EncodeLike; - - /// Append many items in linear time `O(items.count())`. - // Note: a default impl is provided since `Self` is already assumed to be optimal for single - // append operations. - fn append_many(&mut self, items: I) - where - EncodeLikeValue: EncodeLike, - I: IntoIterator, - { - for item in items.into_iter() { - self.append(item); - } - } -} - /// A strongly-typed map in storage. /// /// Details on implementation can be found at [`generator::StorageMap`]. @@ -1342,7 +1276,7 @@ impl Iterator for ChildTriePrefixIterator { /// Trait for storage types that store all its value after a unique prefix. pub trait StoragePrefixedContainer { - /// Pallet prefix. Used for generating final key. + /// Module prefix. Used for generating final key. fn pallet_prefix() -> &'static [u8]; /// Storage prefix. Used for generating final key. @@ -1469,7 +1403,8 @@ pub trait StoragePrefixedMap { /// This trait is sealed. pub trait StorageAppend: private::Sealed {} -/// It is expected that the length is at the beginning of the encoded object +/// Marker trait that will be implemented for types that support to decode their length in an +/// efficient way. It is expected that the length is at the beginning of the encoded object /// and that the length is a `Compact`. /// /// This trait is sealed. @@ -1791,7 +1726,7 @@ mod test { }); } - // This test ensures that the Digest encoding does not change without being noticed. + // This test ensures that the Digest encoding does not change without being noticied. #[test] fn digest_storage_append_works_as_expected() { TestExternalities::default().execute_with(|| { diff --git a/substrate/frame/support/src/storage/types/mod.rs b/substrate/frame/support/src/storage/types/mod.rs index 631410f425d1..edd43e8ab769 100644 --- a/substrate/frame/support/src/storage/types/mod.rs +++ b/substrate/frame/support/src/storage/types/mod.rs @@ -28,6 +28,8 @@ mod double_map; mod key; mod map; mod nmap; +mod paged_list; +mod paged_nmap; mod value; pub use counted_map::{CountedStorageMap, CountedStorageMapInstance, Counter}; @@ -39,6 +41,8 @@ pub use key::{ }; pub use map::StorageMap; pub use nmap::StorageNMap; +pub use paged_list::{StoragePagedList, StoragePagedListMeta}; +pub use paged_nmap::{StoragePagedNMap, StoragePagedNMapMeta}; pub use value::StorageValue; /// Trait implementing how the storage optional value is converted into the queried type. diff --git a/substrate/frame/support/src/storage/types/paged_list.rs b/substrate/frame/support/src/storage/types/paged_list.rs new file mode 100644 index 000000000000..f3813e680b19 --- /dev/null +++ b/substrate/frame/support/src/storage/types/paged_list.rs @@ -0,0 +1,472 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Paged storage list. + +// links are better than no links - even when they refer to private stuff. +#![allow(rustdoc::private_intra_doc_links)] +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(missing_docs)] +#![deny(unsafe_code)] + +use super::{ + key::KeyGenerator, + paged_nmap::{StoragePagedNMap, StoragePagedNMapAppender, StoragePagedNMapIterator}, +}; +use crate::{ + hash::StorageHasher, + storage::{ + lists::*, + types::paged_nmap::{StoragePagedNMapMeta, StoragePagedNMapPrefix}, + EncodeLikeTuple, KeyLenOf, StorageEntryMetadataBuilder, StoragePrefixedContainer, + TupleToEncodedIter, + }, + traits::{StorageInfo, StorageInstance}, + DefaultNoBound, +}; +use codec::{Encode, EncodeLike, FullCodec}; +use core::marker::PhantomData; +use frame_support::traits::Get; +use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; +use sp_std::prelude::*; + +/// A list in storage that stores items in a paged manner. +#[derive(DefaultNoBound)] +pub struct StoragePagedList { + /// Phantom data. + pub _phantom: PhantomData<(Prefix, Value, HeapSize, MaxPages)>, +} + +/// The metadata of a [`StoragePagedList`]. +pub type StoragePagedListMeta = + StoragePagedNMapMeta; + +/// Iterator type to inspect elements of a [`StoragePagedList`]. +pub struct StoragePagedListIterator { + inner: StoragePagedNMapIterator, +} + +/// Iterator type to append elements to a [`StoragePagedList`]. +pub struct StoragePagedListAppender { + inner: StoragePagedNMapAppender, +} + +/// An implementation of [`KeyGenerator`] that uses `()` as key type and does nothing. +pub struct EmptyKeyGen; + +/// The key type of [`EmptyKeyGen`]. +pub type EmptyKey = ((),); + +impl KeyGenerator for EmptyKeyGen { + type Key = (); + type KArg = EmptyKey; + type HashFn = Box Vec>; + type HArg = (); + + const HASHER_METADATA: &'static [sp_metadata_ir::StorageHasherIR] = &[]; + + fn final_key + TupleToEncodedIter>(_key: KArg) -> Vec { + Vec::new() + } + + fn migrate_key + TupleToEncodedIter>( + _key: &KArg, + _hash_fns: Self::HArg, + ) -> Vec { + Vec::new() + } +} + +impl StorageList + for StoragePagedList +where + Prefix: StorageInstance, + Value: FullCodec, + HeapSize: Get, +{ + type Iterator = StoragePagedListIterator; + type Appender = StoragePagedListAppender; + + fn len() -> u64 { + as StorageKeyedList< + ((),), + Value, + >>::len(((),)) + } + + fn iter() -> Self::Iterator { + StoragePagedListIterator { inner: as StorageKeyedList>::iter(((),)) } + } + + fn drain() -> Self::Iterator { + StoragePagedListIterator { inner: as StorageKeyedList>::drain(((),)) } + } + + fn appender() -> Self::Appender { + StoragePagedListAppender { inner: as StorageKeyedList>::appender(((),)) } + } +} + +impl Iterator + for StoragePagedListIterator +where + Prefix: StorageInstance, + Value: FullCodec, + HeapSize: Get, +{ + type Item = Value; + + fn next(&mut self) -> Option { + self.inner.next() + } +} + +impl StorageListAppender + for StoragePagedListAppender +where + Prefix: StorageInstance, + Value: FullCodec, + HeapSize: Get, +{ + fn append(&mut self, item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike, + { + self.inner.append(item) + } +} + +// Needed for FRAME +impl crate::traits::StorageInfoTrait + for StoragePagedList +where + Prefix: StorageInstance, + Value: FullCodec, + HeapSize: Get, + MaxPages: Get>, +{ + fn storage_info() -> Vec { + vec![StorageInfo { + pallet_name: ::pallet_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxPages::get(), + max_size: Some(HeapSize::get()), + }] + } +} + +// Needed for FRAME +impl StoragePrefixedContainer + for StoragePagedList +where + Prefix: StorageInstance, + Value: FullCodec, + HeapSize: Get, +{ + fn pallet_prefix() -> &'static [u8] { + // There is no difference between the prefices of a List and NMap. + StoragePagedNMapPrefix::::pallet_prefix() + } + + fn storage_prefix() -> &'static [u8] { + StoragePagedNMapPrefix::::storage_prefix() + } +} + +// Needed for FRAME +impl StorageEntryMetadataBuilder + for StoragePagedList +where + Prefix: StorageInstance, + Value: FullCodec + scale_info::StaticTypeInfo, +{ + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let docs = if cfg!(feature = "no-metadata-docs") { vec![] } else { docs }; + + let entry = StorageEntryMetadataIR { + name: Prefix::STORAGE_PREFIX, + modifier: crate::storage::types::StorageEntryModifierIR::Optional, // FAIL-CI + ty: StorageEntryTypeIR::Map { + hashers: vec![crate::Twox64Concat::METADATA], + key: scale_info::meta_type::(), + value: scale_info::meta_type::>(), + }, + default: None::.encode(), // FAIL-CI + docs, + }; + + entries.push(entry); + } +} + +// Needed for FRAME +impl Get + for KeyLenOf> +where + Prefix: StorageInstance, + Value: FullCodec, + HeapSize: Get, +{ + fn get() -> u32 { + super::paged_nmap::page_key::(((),), u32::MAX).len() as u32 + } +} + +// Test helpers: +#[cfg(feature = "std")] +#[allow(dead_code)] +impl StoragePagedList +where + Prefix: StorageInstance, + Value: FullCodec, + HeapSize: Get, +{ + /// Return the metadata struct of this list. + pub fn meta() -> StoragePagedNMapMeta { + // Use default here to not require a setup migration. + StoragePagedNMap::::meta(((),)) + } + + /// Return the elements of the list. + pub fn vec() -> Vec { + >::iter().collect() + } + + /// Return and remove the elements of the list. + pub fn take() -> Vec { + >::drain().collect() + } +} + +/// Prelude for (doc)tests. +#[cfg(feature = "std")] +#[allow(dead_code)] +pub(crate) mod mock { + pub use super::*; + pub use crate::storage::types::paged_nmap::*; + pub use codec::Compact; + pub use frame_support::parameter_types; + + pub fn page_key(page: PageIndex) -> Vec { + crate::storage::types::paged_nmap::page_key::(((),), page) + } + + pub fn meta_key() -> Vec { + crate::storage::types::paged_nmap::meta_key::(((),)) + } + + parameter_types! { + pub const HeapSize: u32 = 20; + pub const MaxPages: Option = Some(20); + } + + pub struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "FinalKeysNone" + } + const STORAGE_PREFIX: &'static str = "PagedList"; + } + pub struct Prefix2; + impl StorageInstance for Prefix2 { + fn pallet_prefix() -> &'static str { + "FinalKeysNone" + } + const STORAGE_PREFIX: &'static str = "PagedList"; + } + + pub type List = StoragePagedList; + pub type KeyGen = EmptyKeyGen; + pub type Key = ((),); + pub type ListCompact = StoragePagedList, HeapSize>; +} + +#[cfg(test)] +mod tests { + use super::mock::*; + use crate::StorageNoopGuard; + use codec::{Decode, Encode}; + use sp_core::twox_128; + use sp_io::TestExternalities; + + #[test] + fn append_works() { + TestExternalities::default().execute_with(|| { + List::append_many(0..1000); + assert_eq!(List::vec(), (0..1000).collect::>()); + }); + } + + /// Draining all works. + #[test] + fn simple_drain_works() { + TestExternalities::default().execute_with(|| { + let _g = StorageNoopGuard::default(); // All in all a No-Op + List::append_many(0..1000); + + assert_eq!(List::take(), (0..1000).collect::>()); + + assert_eq!(List::meta(), Default::default()); + + // all gone + assert_eq!(List::vec(), Vec::::new()); + // Need to delete the metadata manually. + StoragePagedNMapMeta::::delete(((),)); + }); + } + + /// Drain half of the elements and iterator the rest. + #[test] + fn partial_drain_works() { + TestExternalities::default().execute_with(|| { + List::append_many(0..100); + + let vals = List::drain().take(50).collect::>(); + assert_eq!(vals, (0..50).collect::>()); + + let meta = List::meta(); + // Will switch over to `10/0`, but will in the next call. + assert_eq!((meta.first_page, meta.first_value_offset), (10, 0)); + assert_eq!(List::len(), 50); + + // 50 gone, 50 to go + assert_eq!(List::vec(), (50..100).collect::>()); + }); + } + + /// Draining, appending and iterating work together. + #[test] + fn drain_append_iter_works() { + TestExternalities::default().execute_with(|| { + for r in 1..=100 { + List::append_many(0..12); + List::append_many(0..12); + + let dropped = List::drain().take(12).collect::>(); + assert_eq!(dropped, (0..12).collect::>()); + + assert_eq!(List::vec(), (0..12).cycle().take(r * 12).collect::>()); + assert_eq!(List::len() as usize, r * 12); + } + }); + } + + /// Pages are removed ASAP. + #[test] + fn drain_eager_page_removal() { + TestExternalities::default().execute_with(|| { + List::append_many(0..9); + + assert!(sp_io::storage::exists(&page_key::(0))); + assert!(sp_io::storage::exists(&page_key::(1))); + + assert_eq!(List::drain().take(5).count(), 5); + // Page 0 is eagerly removed. + assert!(!sp_io::storage::exists(&page_key::(0))); + assert!(sp_io::storage::exists(&page_key::(1))); + }); + } + + /// Appending encodes pages as `Vec`. + #[test] + fn append_storage_layout() { + TestExternalities::default().execute_with(|| { + List::append_many(0..9); + + let key = page_key::(0); + let raw = sp_io::storage::get(&key).expect("Page should be present"); + let vec = Vec::::decode(&mut &raw[..]).unwrap(); + assert_eq!(vec.len(), 5, "First page contains 5"); + + let key = page_key::(1); + let raw = sp_io::storage::get(&key).expect("Page should be present"); + let vec = Vec::::decode(&mut &raw[..]).unwrap(); + assert_eq!(vec.len(), 4, "Second page contains 4"); + + let meta = sp_io::storage::get(&meta_key::()).expect("Meta should be present"); + let meta: StoragePagedNMapMeta = + codec::Decode::decode(&mut &meta[..]).unwrap(); + assert_eq!(meta.first_page, 0); + assert_eq!(meta.first_value_offset, 0); + assert_eq!(meta.last_page, 1); + assert_eq!(meta.last_page_byte_offset, 16); + + let page = Page::::from_storage::(((),), 0, 0).unwrap(); + assert_eq!(page.index, 0); + assert_eq!(page.values.count(), 5); + + let page = Page::::from_storage::(((),), 1, 0).unwrap(); + assert_eq!(page.index, 1); + assert_eq!(page.values.count(), 4); + }); + } + + #[test] + fn final_prefix_correct() { + let got = StoragePagedNMapPrefix::::final_prefix(); + let want = [twox_128(b"FinalKeysNone"), twox_128(b"PagedList")].concat(); + + assert_eq!(want, got); + } + + #[test] + fn page_key_correct() { + let got = page_key::(0); + let pallet_prefix = StoragePagedNMapPrefix::::final_prefix(); + let want = (pallet_prefix, b"page", 0u32).encode(); + + assert_eq!(want.len(), 32 + 4 + 4); + assert!(want.starts_with(&pallet_prefix[..])); + assert_eq!(got, want); + } + + #[test] + fn meta_key_correct() { + let got = meta_key::(); + let pallet_prefix = StoragePagedNMapPrefix::::final_prefix(); + let want = (pallet_prefix, b"meta").encode(); + + assert_eq!(want.len(), 32 + 4); + assert!(want.starts_with(&pallet_prefix[..])); + assert_eq!(got, want); + } + + #[test] + fn peekable_drain_also_deletes() { + TestExternalities::default().execute_with(|| { + List::append_many(0..10); + + let mut iter = List::drain().peekable(); + assert_eq!(iter.peek(), Some(&0)); + // `peek` does remove one element... + assert_eq!(List::iter().count(), 9); + }); + } + + #[test] + fn heap_size_works() { + TestExternalities::default().execute_with(|| { + List::append_many(0..100); + ListCompact::append_many((0..100).map(|i| Compact(i))); + + // They should have the same number of items: + assert_eq!(List::meta().total_items, ListCompact::meta().total_items); + // But the compact variant should have more values per page: + }); + } +} diff --git a/substrate/frame/support/src/storage/types/paged_nmap.rs b/substrate/frame/support/src/storage/types/paged_nmap.rs new file mode 100644 index 000000000000..b56937434d9d --- /dev/null +++ b/substrate/frame/support/src/storage/types/paged_nmap.rs @@ -0,0 +1,792 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Paged storage n-map. + +// links are better than no links - even when they refer to private stuff. +#![allow(rustdoc::private_intra_doc_links)] +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(missing_docs)] +#![deny(unsafe_code)] + +use super::key::KeyGenerator; +use crate::{ + storage::{types::StorageEntryMetadataBuilder, EncodeLikeTuple, TupleToEncodedIter}, + traits::{StorageInfo, StorageInstance}, + StorageHasher, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec}; +use core::marker::PhantomData; +use frame_support::{ + defensive, storage::StoragePrefixedContainer, traits::Get, CloneNoBound, DebugNoBound, + DefaultNoBound, EqNoBound, PartialEqNoBound, +}; +use sp_metadata_ir::{StorageEntryMetadataIR, StorageEntryTypeIR}; +use sp_runtime::traits::Saturating; +use sp_std::{iter::Skip as SkipIter, prelude::*}; + +pub type PageIndex = u32; +pub type ValueIndex = u32; + +/// A paginated storage list. +/// +/// # Motivation +/// +/// This type replaces `StorageValue>` in situations where only iteration and appending is +/// needed. There are a few places where this is the case. A paginated structure reduces the memory +/// usage when a storage transactions needs to be rolled back. The main motivation is therefore a +/// reduction of runtime memory on storage transaction rollback. Should be configured such that the +/// size of a page is about 64KiB. This can only be ensured when `V` implements `MaxEncodedLen`. +/// +/// # Implementation +/// +/// The metadata of this struct is stored in [`StoragePagedNMapMeta`]. The data is stored in +/// [`Page`]s. +/// +/// Each [`Page`] holds at most `HeapSize` values in its `values` vector. The last page is +/// the only one that could have less than `HeapSize` values. +/// **Iteration** happens by starting +/// at [`first_page`][StoragePagedNMapMeta::first_page]/ +/// [`first_value_offset`][StoragePagedNMapMeta::first_value_offset] and incrementing these indices +/// as long as there are elements in the page and there are pages in storage. All elements of a page +/// are loaded once a page is read from storage. Iteration then happens on the cached elements. This +/// reduces the number of storage `read` calls on the overlay. **Appending** to the list happens by +/// appending to the last page by utilizing [`sp_io::storage::append`]. It allows to directly extend +/// the elements of `values` vector of the page without loading the whole vector from storage. A new +/// page is instantiated once [`Page::next`] overflows `HeapSize`. Its vector will also be +/// created through [`sp_io::storage::append`]. **Draining** advances the internal indices identical +/// to Iteration. It additionally persists the increments to storage and thereby 'drains' elements. +/// Completely drained pages are deleted from storage. +/// +/// # Further Observations +/// +/// - The encoded layout of a page is exactly its [`Page::values`]. The [`Page::next`] offset is +/// stored in the [`StoragePagedNMapMeta`] instead. There is no particular reason for this, +/// besides having all management state handy in one location. +/// - The PoV complexity of iterating compared to a `StorageValue>` is improved for +/// "shortish" iterations and worse for total iteration. The append complexity is identical in the +/// asymptotic case when using an `Appender`, and worse in all. For example when appending just +/// one value. +/// - It does incur a read overhead on the host side as compared to a `StorageValue>`. +#[derive(Default)] +pub struct StoragePagedNMap { + /// Phantom data. + pub _phantom: PhantomData<(Prefix, Key, Value, HeapSize, MaxPages)>, +} + +// FAIL-CI: TODO add test for Value MEL bound to be <= HeapSize + +/// The state of a [`StoragePagedNMap`]. +/// +/// This struct doubles as [`frame_support::storage::StorageList::Appender`]. +#[derive( + Encode, Decode, CloneNoBound, PartialEqNoBound, EqNoBound, DebugNoBound, DefaultNoBound, +)] +pub struct StoragePagedNMapMeta { + /// The first page that could contain a value. + /// + /// Can be >0 when pages were deleted. + pub first_page: PageIndex, + /// The first index inside `first_page` that could contain a value. + /// + /// Can be >0 when values were deleted. + pub first_value_offset: ValueIndex, + + /// The last page that could contain data. + /// + /// Appending starts at this page index. + pub last_page: PageIndex, + /// The last value inside `last_page` that could contain a value. + /// + /// Appending starts at this index. If the page does not hold a value at this index, then the + /// whole list is empty. The only case where this can happen is when both are `0`. + pub last_page_byte_offset: u32, + + /// The total number of items currently present in the list. + pub total_items: u64, + /// Phantom data. + pub _phantom: PhantomData<(Prefix, Key, KArg, Value, HeapSize, MaxPages)>, +} + +pub struct StoragePagedNMapAppender { + /// The inner metadata. + pub meta: StoragePagedNMapMeta, + /// The key that values will be appended to. + pub key: KArg, +} + +impl + frame_support::storage::StorageListAppender + for StoragePagedNMapAppender +where + Prefix: StorageInstance, + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, + Value: FullCodec, + HeapSize: Get, +{ + fn append(&mut self, item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike, + { + self.append_one(item); + } +} + +impl + StoragePagedNMapMeta +where + Prefix: StorageInstance, + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, + Value: FullCodec, + HeapSize: Get, +{ + /// Read the metadata from storage. + pub fn from_storage(key: KArg) -> Option { + let mk = Self::storage_key(key); + + sp_io::storage::get(&mk).and_then(|raw| Self::decode(&mut &raw[..]).ok()) + } + + /// The key under which the metadata is stored. + pub fn storage_key(key: KArg) -> Vec { + meta_key::(key) + } + + /// Write the metadata to storage. + pub fn store(&self, k: KArg) { + let key = Self::storage_key(k); + self.using_encoded(|enc| sp_io::storage::set(&key, enc)); + } + + /// Reset the metadata to default and delete it from storage. + pub fn reset(&mut self, key: KArg) { + *self = Default::default(); + Self::delete(key); + } + + /// Delete the metadata from storage. + pub fn delete(key: KArg) { + sp_io::storage::clear(&Self::storage_key(key)); + } +} + +impl + StoragePagedNMapAppender +where + Prefix: StorageInstance, + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, + Value: FullCodec, + HeapSize: Get, +{ + pub fn append_one(&mut self, item: EncodeLikeValue) + where + EncodeLikeValue: EncodeLike, + { + let enc_size = item.encoded_size() as u32; + if (self.meta.last_page_byte_offset.saturating_add(enc_size)) > HeapSize::get() { + self.meta.last_page.saturating_inc(); + self.meta.last_page_byte_offset = 0; + } + let pk = page_key::(self.key.clone(), self.meta.last_page); + self.meta.last_page_byte_offset.saturating_accrue(enc_size); + self.meta.total_items.saturating_inc(); + sp_io::storage::append(&pk, item.encode()); + self.meta.store(self.key.clone()); + } +} + +/// A page that was decoded from storage and caches its values. +pub struct Page { + /// The index of the page. + pub(crate) index: PageIndex, + /// The remaining values of the page, to be drained by [`Page::next`]. + pub(crate) values: SkipIter>, + + pub(crate) key: KArg, + _phantom: PhantomData, +} + +impl< + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, + V: FullCodec, + > Page +{ + /// Read the page with `index` from storage and assume the first value at `value_index`. + pub fn from_storage( + k: KArg, + index: PageIndex, + value_index: ValueIndex, + ) -> Option { + let key = page_key::(k.clone(), index); + let values = sp_io::storage::get(&key) + .and_then(|raw| sp_std::vec::Vec::::decode(&mut &raw[..]).ok())?; + if values.is_empty() { + // Dont create empty pages. + return None + } + let values = values.into_iter().skip(value_index as usize); + + Some(Self { index, values, key: k, _phantom: PhantomData }) + } + + /// Whether no more values can be read from this page. + pub fn eof(&self) -> bool { + self.values.len() == 0 + } + + /// Delete this page from storage. + pub fn delete(&self) { + delete_page::(self.key.clone(), self.index); + } +} + +/// Delete a page with `index` from storage. +// Does not live under `Page` since it does not require the `Value` generic. +pub(crate) fn delete_page< + Prefix: StorageInstance, + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, +>( + key: KArg, + index: PageIndex, +) { + let key = page_key::(key, index); + sp_io::storage::clear(&key); +} + +/// Storage key of a page with `index`. +// Does not live under `Page` since it does not require the `Value` generic. +pub(crate) fn page_key< + Prefix: StorageInstance, + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, +>( + key: KArg, + index: PageIndex, +) -> Vec { + let k1 = StoragePagedNMapPrefix::::final_prefix(); + let k2 = Key::final_key(key); + + [&k1[..], &k2[..], b"page", &index.encode()[..]].concat() +} + +pub(crate) fn meta_key< + Prefix: StorageInstance, + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, +>( + key: KArg, +) -> Vec { + let k1 = StoragePagedNMapPrefix::::final_prefix(); + let k2 = Key::final_key(key); + + [&k1[..], &k2[..], b"meta"].concat() +} + +impl Iterator for Page { + type Item = V; + + fn next(&mut self) -> Option { + self.values.next() + } +} + +/// Iterates over values of a [`StoragePagedNMap`]. +/// +/// Can optionally drain the iterated values. +pub struct StoragePagedNMapIterator { + // Design: we put the Page into the iterator to have fewer storage look-ups. Yes, these + // look-ups would be cached anyway, but bugging the overlay on each `.next` call still seems + // like a poor trade-off than caching it in the iterator directly. Iterating and modifying is + // not allowed at the same time anyway, just like with maps. Note: if Page is empty then + // the iterator did not find any data upon setup or ran out of pages. + page: Option>, + /// Whether all visited pages should be removed. + drain: bool, + /// The cached metadata. + meta: StoragePagedNMapMeta, + /// The key that we are iterating on. + key: KArg, +} + +impl + StoragePagedNMapIterator +where + Prefix: StorageInstance, + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, + Value: FullCodec, + HeapSize: Get, +{ + /// Read self from the storage. + pub fn from_meta( + meta: StoragePagedNMapMeta, + key: KArg, + drain: bool, + ) -> Self { + let page = Page::::from_storage::( + key.clone(), + meta.first_page, + meta.first_value_offset, + ); + Self { page, drain, meta, key } + } +} + +impl Iterator + for StoragePagedNMapIterator +where + Prefix: StorageInstance, + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, + Value: FullCodec, + HeapSize: Get, +{ + type Item = Value; + + fn next(&mut self) -> Option { + let page = self.page.as_mut()?; + let value = match page.next() { + Some(value) => value, + None => { + defensive!("There are no empty pages in storage; nuking the list"); + self.meta.reset(self.key.clone()); + self.page = None; + return None + }, + }; + + if page.eof() { + if self.drain { + page.delete::(); + self.meta.first_value_offset = 0; + self.meta.first_page.saturating_inc(); + self.meta.total_items.saturating_dec(); + } + + debug_assert!(!self.drain || self.meta.first_page == page.index + 1); + self.page = + Page::from_storage::(self.key.clone(), page.index.saturating_add(1), 0); + if self.drain { + if self.page.is_none() { + self.meta.reset(self.key.clone()); + } else { + self.meta.store(self.key.clone()); + } + } + } else { + if self.drain { + self.meta.first_value_offset.saturating_inc(); + self.meta.total_items.saturating_dec(); + self.meta.store(self.key.clone()); + } + } + Some(value) + } +} + +impl + frame_support::storage::StorageKeyedList + for StoragePagedNMap +where + Prefix: StorageInstance, + Key: KeyGenerator, + KArg: EncodeLikeTuple + TupleToEncodedIter + Clone, + Value: FullCodec, + HeapSize: Get, +{ + type Iterator = StoragePagedNMapIterator; + type Appender = StoragePagedNMapAppender; + + fn len(key: KArg) -> u64 { + Self::meta(key).total_items + } + + fn iter(key: KArg) -> Self::Iterator { + StoragePagedNMapIterator::from_meta(Self::meta(key.clone()), key, false) + } + + fn drain(key: KArg) -> Self::Iterator { + StoragePagedNMapIterator::from_meta(Self::meta(key.clone()), key, true) + } + + fn appender(key: KArg) -> Self::Appender { + Self::appender(key) + } +} + +impl + StoragePagedNMap +where + Prefix: StorageInstance, + Key: KeyGenerator, + Value: FullCodec, + HeapSize: Get, +{ + /// Provides a fast append iterator. + /// + /// The list should not be modified while appending. Also don't call it recursively. + fn appender + TupleToEncodedIter + Clone>( + key: KArg, + ) -> StoragePagedNMapAppender { + StoragePagedNMapAppender { meta: Self::meta(key.clone()), key } + } + + /// Return the metadata of the map. + pub fn meta + TupleToEncodedIter + Clone>( + key: KArg, + ) -> StoragePagedNMapMeta { + // Use default here to not require a setup migration. + StoragePagedNMapMeta::from_storage(key).unwrap_or_default() + } + + /// Return the elements of the map as a vector. + #[cfg(feature = "std")] + pub fn vec + TupleToEncodedIter + Clone>( + key: KArg, + ) -> Vec { + >::iter(key).collect() + } + + /// Return and remove the elements of the map as a vector. + #[cfg(feature = "std")] + pub fn take + TupleToEncodedIter + Clone>( + key: KArg, + ) -> Vec { + >::drain(key).collect() + } +} + +/// Provides the final prefix for a [`StoragePagedNMap`]. +/// +/// It solely exists so that when re-using it from the iterator and meta struct, none of the un-used +/// generics bleed through. Otherwise when only having the `StoragePrefixedContainer` implementation +/// on the list directly, the iterator and metadata need to muster *all* generics, even the ones +/// that are completely useless for prefix calculation. +pub(crate) struct StoragePagedNMapPrefix(PhantomData); + +impl frame_support::storage::StoragePrefixedContainer for StoragePagedNMapPrefix +where + Prefix: StorageInstance, +{ + fn pallet_prefix() -> &'static [u8] { + Prefix::pallet_prefix().as_bytes() + } + + fn storage_prefix() -> &'static [u8] { + Prefix::STORAGE_PREFIX.as_bytes() + } +} + +impl frame_support::storage::StoragePrefixedContainer + for StoragePagedNMap +where + Prefix: StorageInstance, + Value: FullCodec, + HeapSize: Get, +{ + fn pallet_prefix() -> &'static [u8] { + StoragePagedNMapPrefix::::pallet_prefix() + } + + fn storage_prefix() -> &'static [u8] { + StoragePagedNMapPrefix::::storage_prefix() + } +} + +// Needed for FRAME +impl crate::traits::StorageInfoTrait + for StoragePagedNMap +where + Prefix: StorageInstance, + Value: FullCodec + codec::MaxEncodedLen, + HeapSize: Get, + MaxPages: Get>, +{ + fn storage_info() -> Vec { + vec![StorageInfo { + pallet_name: Self::pallet_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxPages::get(), + max_size: Some(HeapSize::get()), + }] + } +} + +// Needed for FRAME +impl StorageEntryMetadataBuilder + for StoragePagedNMap +where + Prefix: StorageInstance, + Value: FullCodec + scale_info::StaticTypeInfo, +{ + fn build_metadata(docs: Vec<&'static str>, entries: &mut Vec) { + let docs = if cfg!(feature = "no-metadata-docs") { vec![] } else { docs }; + + let entry = StorageEntryMetadataIR { + name: Prefix::STORAGE_PREFIX, + modifier: crate::storage::types::StorageEntryModifierIR::Optional, // FAIL-CI + ty: StorageEntryTypeIR::Map { + hashers: vec![crate::Twox64Concat::METADATA], + key: scale_info::meta_type::(), + value: scale_info::meta_type::>(), + }, + default: None::.encode(), // FAIL-CI + docs, + }; + + entries.push(entry); + } +} + +// Needed for FRAME +/*impl Get + for KeyLenOf> +where + Prefix: StorageInstance, + HeapSize: Get, + MaxPages: Get>, +{ + fn get() -> u32 { + page_key::(&None, u32::MAX).len() as u32 + } +}*/ + +/// Prelude for (doc)tests. +#[cfg(feature = "std")] +#[allow(dead_code)] +pub(crate) mod mock { + pub use super::*; + pub use crate::pallet_prelude::NMapKey; + use codec::Compact; + pub use frame_support::{parameter_types, Blake2_128Concat}; + + parameter_types! { + pub const HeapSize: u32 = 20; + pub const MaxPages: Option = Some(20); + } + + pub struct Prefix; + impl StorageInstance for Prefix { + fn pallet_prefix() -> &'static str { + "FinalKeysNone" + } + const STORAGE_PREFIX: &'static str = "PagedMap"; + } + pub struct Prefix2; + impl StorageInstance for Prefix2 { + fn pallet_prefix() -> &'static str { + "FinalKeysNone" + } + const STORAGE_PREFIX: &'static str = "PagedMap2"; + } + + pub type NMap = StoragePagedNMap; + pub type KeyGen = (NMapKey,); + pub type Key = (u32,); + + pub type NMapCompact = + StoragePagedNMap,), Compact, HeapSize>; +} + +#[cfg(test)] +mod tests { + use super::mock::*; + use crate::storage::lists::StorageKeyedList; + use codec::Encode; + use sp_core::twox_128; + use sp_io::TestExternalities; + + #[test] + fn append_works() { + TestExternalities::default().execute_with(|| { + >::append_many((123,), 0..1000); + assert_eq!(NMap::vec((123,)), (0..1000).collect::>()); + }); + } + + /// Draining all works. + #[test] + fn simple_drain_works() { + TestExternalities::default().execute_with(|| { + let _g = crate::StorageNoopGuard::default(); // All in all a No-Op + NMap::append_many((123,), 0..1000); + + assert_eq!(NMap::take((123,)), (0..1000).collect::>()); + + assert_eq!(NMap::meta((123,)), Default::default()); + + // all gone + assert_eq!(NMap::vec((123,)), Vec::::new()); + // Need to delete the metadata manually. + StoragePagedNMapMeta::,), (u32,), u32, (), ()>::delete((123,)); + }); + } + + /// Drain half of the elements and iterator the rest. + #[test] + fn partial_drain_works() { + TestExternalities::default().execute_with(|| { + NMap::append_many((123,), 0..100); + + let vals = NMap::drain((123,)).take(50).collect::>(); + assert_eq!(vals, (0..50).collect::>()); + + let meta = NMap::meta((123,)); + // Will switch over to `10/0`, but will in the next call. + assert_eq!((meta.first_page, meta.first_value_offset), (10, 0)); + assert_eq!(NMap::len((123,)), 50); + + // 50 gone, 50 to go + assert_eq!(NMap::vec((123,)), (50..100).collect::>()); + }); + } + + /// Draining, appending and iterating work together. + #[test] + fn drain_append_iter_works() { + TestExternalities::default().execute_with(|| { + for r in 1..=100 { + NMap::append_many((123,), 0..12); + NMap::append_many((123,), 0..12); + + let dropped = NMap::drain((123,)).take(12).collect::>(); + assert_eq!(dropped, (0..12).collect::>()); + + assert_eq!(NMap::vec((123,)), (0..12).cycle().take(r * 12).collect::>()); + assert_eq!(NMap::len((123,)) as usize, r * 12); + } + }); + } + + /// Pages are removed ASAP. + #[test] + fn drain_eager_page_removal() { + TestExternalities::default().execute_with(|| { + NMap::append_many((123,), 0..9); + + assert!(sp_io::storage::exists(&page_key::((123,), 0))); + assert!(sp_io::storage::exists(&page_key::((123,), 1))); + + assert_eq!(NMap::drain((123,)).take(5).count(), 5); + // Page 0 is eagerly removed. + assert!(!sp_io::storage::exists(&page_key::((123,), 0))); + assert!(sp_io::storage::exists(&page_key::((123,), 1))); + }); + } + + /// Appending encodes pages as `Vec`. + #[test] + fn append_storage_layout() { + TestExternalities::default().execute_with(|| { + NMap::append_many((123,), 0..9); + + let key = page_key::((123,), 0); + let raw = sp_io::storage::get(&key).expect("Page should be present"); + let vec = Vec::::decode(&mut &raw[..]).unwrap(); + assert_eq!(vec.len(), 5, "First page contains 5"); + + let key = page_key::((123,), 1); + let raw = sp_io::storage::get(&key).expect("Page should be present"); + let vec = Vec::::decode(&mut &raw[..]).unwrap(); + assert_eq!(vec.len(), 4, "Second page contains 4"); + + let meta = sp_io::storage::get(&meta_key::((123,))) + .expect("Meta should be present"); + let meta: StoragePagedNMapMeta = + Decode::decode(&mut &meta[..]).unwrap(); + assert_eq!(meta.first_page, 0); + assert_eq!(meta.first_value_offset, 0); + assert_eq!(meta.last_page, 1); + assert_eq!(meta.last_page_byte_offset, 16); + + let page = Page::::from_storage::((123,), 0, 0).unwrap(); + assert_eq!(page.index, 0); + assert_eq!(page.values.count(), 5); + + let page = Page::::from_storage::((123,), 1, 0).unwrap(); + assert_eq!(page.index, 1); + assert_eq!(page.values.count(), 4); + }); + } + + #[test] + fn final_prefix_correct() { + let got = StoragePagedNMapPrefix::::final_prefix(); + let want = [twox_128(b"FinalKeysNone"), twox_128(b"PagedMap")].concat(); + + assert_eq!(want, got); + } + + #[test] + fn page_key_correct() { + let got = page_key::((123,), 11); + let pallet_prefix = StoragePagedNMapPrefix::::final_prefix(); + let hashed_key = Blake2_128Concat::hash(&123u32.encode()); + let want = [&pallet_prefix[..], &hashed_key, b"page", 11u32.encode().as_slice()].concat(); + + assert_eq!(want.len(), 32 + (16 + 4) + 4 + 4); + assert!(want.starts_with(&pallet_prefix[..])); + assert_eq!(got, want); + } + + #[test] + fn meta_key_correct() { + let got = meta_key::((123,)); + let pallet_prefix = StoragePagedNMapPrefix::::final_prefix(); + let hashed_key = Blake2_128Concat::hash(&123u32.encode()); + let want = [&pallet_prefix[..], hashed_key.as_slice(), b"meta"].concat(); + + assert_eq!(want.len(), 32 + (16 + 4) + 4); + assert!(want.starts_with(&pallet_prefix[..])); + assert_eq!(got.len(), want.len()); + assert_eq!(got, want); + } + + #[test] + fn peekable_drain_also_deletes() { + TestExternalities::default().execute_with(|| { + NMap::append_many((123,), 0..10); + + let mut iter = NMap::drain((123,)).peekable(); + assert_eq!(iter.peek(), Some(&0)); + // `peek` does remove one element... + assert_eq!(NMap::iter((123,)).count(), 9); + }); + } + + #[test] + fn heap_size_works() { + use codec::Compact; + TestExternalities::default().execute_with(|| { + NMap::append_many((123,), 0..100); + // FAIL-CI add more prefix + NMapCompact::append_many((123,), (0..100).map(|i| Compact(i))); + + // They should have the same number of items: + assert_eq!(NMap::meta((123,)).total_items, NMapCompact::meta((123,)).total_items); // FAIL-CI check tracking + assert_eq!(NMap::meta((123,)).total_items, 100); + // But the compact variant should have more values per page: + }); + } +} diff --git a/substrate/frame/support/test/tests/final_keys.rs b/substrate/frame/support/test/tests/final_keys.rs index 64f56d520035..512d1b0e7526 100644 --- a/substrate/frame/support/test/tests/final_keys.rs +++ b/substrate/frame/support/test/tests/final_keys.rs @@ -19,7 +19,12 @@ use codec::Encode; use frame_support::{derive_impl, storage::unhashed, StoragePrefixedMap}; use frame_system::pallet_prelude::BlockNumberFor; -use sp_core::sr25519; +use frame_support::{ + pallet_prelude::*, + storage::{types::StoragePagedListMeta, StorageKeyedList, StoragePrefixedContainer}, +}; + +use sp_core::{sr25519, ConstU32}; use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, TestExternalities, @@ -32,7 +37,6 @@ use sp_runtime::{ #[frame_support::pallet] mod no_instance { use super::*; - use frame_support::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -73,6 +77,17 @@ mod no_instance { ValueQuery, >; + #[pallet::storage] + pub type PagedList = StoragePagedList<_, u32, ConstU32<40>>; + + #[pallet::storage] + pub type PagedNMap = StoragePagedNMap< + _, + (NMapKey, NMapKey), + u32, + ConstU32<40>, + >; + #[pallet::genesis_config] pub struct GenesisConfig { pub value: u32, @@ -105,7 +120,6 @@ mod no_instance { #[frame_support::pallet] mod instance { use super::*; - use frame_support::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -149,6 +163,17 @@ mod instance { ValueQuery, >; + #[pallet::storage] + pub type PagedList, I: 'static = ()> = StoragePagedList<_, u32, ConstU32<40>>; + + #[pallet::storage] + pub type PagedNMap, I: 'static = ()> = StoragePagedNMap< + _, + (NMapKey, NMapKey), + u32, + ConstU32<40>, + >; + #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { pub value: u32, @@ -209,10 +234,11 @@ frame_support::construct_runtime!( } ); -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; + type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; @@ -258,6 +284,78 @@ fn final_keys_no_instance() { assert_eq!(unhashed::get::(&k), Some(3u32)); assert_eq!(&k[..32], &>::final_prefix()); }); + // The metadata key of a PagedList is correct. + TestExternalities::default().execute_with(|| { + >::append_many(0..123); + let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"PagedList")].concat(); + k.extend(b"meta"); + assert_eq!( + unhashed::get::, u32, ()>>(&k), + Some(StoragePagedListMeta { + total_items: 123, + last_page: 12, + last_page_byte_offset: 12, + ..Default::default() + }) + ); + assert_eq!(&k[..32], &>::final_prefix()); + }); + // The page key of a PagedList is correct. + TestExternalities::default().execute_with(|| { + >::append_many(0..400); + for page in 0u32..10 { + let items = page * 10; + let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"PagedList")].concat(); + k.extend(b"page"); + k.extend(page.encode()); + assert_eq!( + unhashed::get_raw(&k), + Some((items..items + 10).collect::>().encode()) + ); + assert_eq!(&k[..32], &>::final_prefix()); + } + }); + // The metadata key of a PagedMap is correct. + TestExternalities::default().execute_with(|| { + for key in 0..10 { + let key2 = key * 123; // random... + >::append_many((key, key2), 0..123); + let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"PagedNMap")].concat(); + k.extend(key.using_encoded(blake2_128_concat)); + k.extend(key2.using_encoded(twox_64_concat)); + k.extend(b"meta"); + assert_eq!( + unhashed::get::, u32, ()>>(&k), + Some(StoragePagedListMeta { + total_items: 123, + last_page: 12, + last_page_byte_offset: 12, + ..Default::default() + }) + ); + assert_eq!(&k[..32], &>::final_prefix()); + } + }); + // The page key of a PagedMap is correct. + TestExternalities::default().execute_with(|| { + for key in 0..10 { + let key2 = key * 123; // random... + >::append_many((key, key2), 0..400); + for page in 0u32..10 { + let items = page * 10; + let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"PagedNMap")].concat(); + k.extend(key.using_encoded(blake2_128_concat)); + k.extend(key2.using_encoded(twox_64_concat)); + k.extend(b"page"); + k.extend(page.encode()); + assert_eq!( + unhashed::get_raw(&k), + Some((items..items + 10).collect::>().encode()) + ); + assert_eq!(&k[..32], &>::final_prefix()); + } + } + }); } #[test] @@ -293,6 +391,78 @@ fn final_keys_default_instance() { assert_eq!(unhashed::get::(&k), Some(3u32)); assert_eq!(&k[..32], &>::final_prefix()); }); + // The metadata key of a PagedList is correct. + TestExternalities::default().execute_with(|| { + >::append_many(0..123); + let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"PagedList")].concat(); + k.extend(b"meta"); + assert_eq!( + unhashed::get::, u32, ()>>(&k), + Some(StoragePagedListMeta { + total_items: 123, + last_page: 12, + last_page_byte_offset: 12, + ..Default::default() + }) + ); + assert_eq!(&k[..32], &>::final_prefix()); + }); + // The page key of a PagedList is correct. + TestExternalities::default().execute_with(|| { + >::append_many(0..400); + for page in 0u32..10 { + let items = page * 10; + let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"PagedList")].concat(); + k.extend(b"page"); + k.extend(page.encode()); + assert_eq!( + unhashed::get_raw(&k), + Some((items..items + 10).collect::>().encode()) + ); + assert_eq!(&k[..32], &>::final_prefix()); + } + }); + // The metadata key of a PagedMap is correct. + TestExternalities::default().execute_with(|| { + for key in 0..10 { + let key2 = key * 123; // random... + >::append_many((key, key2), 0..123); + let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"PagedNMap")].concat(); + k.extend(key.using_encoded(blake2_128_concat)); + k.extend(key2.using_encoded(twox_64_concat)); + k.extend(b"meta"); + assert_eq!( + unhashed::get::, u32, ()>>(&k), + Some(StoragePagedListMeta { + total_items: 123, + last_page: 12, + last_page_byte_offset: 12, + ..Default::default() + }) + ); + assert_eq!(&k[..32], &>::final_prefix()); + } + }); + // The page key of a PagedMap is correct. + TestExternalities::default().execute_with(|| { + for key in 0..10 { + let key2 = key * 123; // random... + >::append_many((key, key2), 0..400); + for page in 0u32..10 { + let items = page * 10; + let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"PagedNMap")].concat(); + k.extend(key.using_encoded(blake2_128_concat)); + k.extend(key2.using_encoded(twox_64_concat)); + k.extend(b"page"); + k.extend(page.encode()); + assert_eq!( + unhashed::get_raw(&k), + Some((items..items + 10).collect::>().encode()) + ); + assert_eq!(&k[..32], &>::final_prefix()); + } + } + }); } #[test] @@ -328,4 +498,89 @@ fn final_keys_instance_2() { assert_eq!(unhashed::get::(&k), Some(3u32)); assert_eq!(&k[..32], &>::final_prefix()); }); + // The metadata key of a PagedList is correct. + TestExternalities::default().execute_with(|| { + >::append_many(0..123); + let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"PagedList")].concat(); + k.extend(b"meta"); + assert_eq!( + unhashed::get::< + StoragePagedListMeta, u32, ()>, + >(&k), + Some(StoragePagedListMeta { + total_items: 123, + last_page: 12, + last_page_byte_offset: 12, + ..Default::default() + }) + ); + assert_eq!(&k[..32], &>::final_prefix()); + }); + // The page key of a PagedList is correct. + TestExternalities::default().execute_with(|| { + >::append_many(0..400); + for page in 0u32..10 { + let items = page * 10; + let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"PagedList")].concat(); + k.extend(b"page"); + k.extend(page.encode()); + assert_eq!( + unhashed::get_raw(&k), + Some((items..items + 10).collect::>().encode()) + ); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + } + }); + // The metadata key of a PagedMap is correct. + TestExternalities::default().execute_with(|| { + for key in 0..10 { + let key2 = key * 123; // random... + >::append_many((key, key2), 0..123); + let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"PagedNMap")].concat(); + k.extend(key.using_encoded(blake2_128_concat)); + k.extend(key2.using_encoded(twox_64_concat)); + k.extend(b"meta"); + assert_eq!( + unhashed::get::< + StoragePagedListMeta, u32, ()>, + >(&k), + Some(StoragePagedListMeta { + total_items: 123, + last_page: 12, + last_page_byte_offset: 12, + ..Default::default() + }) + ); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + } + }); + // The page key of a PagedMap is correct. + TestExternalities::default().execute_with(|| { + for key in 0..10 { + let key2 = key * 123; // random... + >::append_many((key, key2), 0..400); + for page in 0u32..10 { + let items = page * 10; + let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"PagedNMap")].concat(); + k.extend(key.using_encoded(blake2_128_concat)); + k.extend(key2.using_encoded(twox_64_concat)); + k.extend(b"page"); + k.extend(page.encode()); + assert_eq!( + unhashed::get_raw(&k), + Some((items..items + 10).collect::>().encode()) + ); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + } + } + }); } From 7301f480947fb4283c9c83319c3a50ec2a60f1f9 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 27 May 2024 19:22:38 +0200 Subject: [PATCH 2/2] Clippy Signed-off-by: Oliver Tale-Yazdi --- substrate/frame/paged-list/src/mock.rs | 6 +----- substrate/frame/paged-list/src/tests.rs | 4 ++-- substrate/frame/support/src/storage/lists.rs | 10 +++++----- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/substrate/frame/paged-list/src/mock.rs b/substrate/frame/paged-list/src/mock.rs index d014a703db68..5d019784eccd 100644 --- a/substrate/frame/paged-list/src/mock.rs +++ b/substrate/frame/paged-list/src/mock.rs @@ -20,11 +20,7 @@ #![cfg(feature = "std")] use crate::{Config, ListPrefix}; -use frame_support::{ - derive_impl, - storage::types::StoragePagedListMeta, - traits::{ConstU16, ConstU64}, -}; +use frame_support::{derive_impl, storage::types::StoragePagedListMeta, traits::ConstU64}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, diff --git a/substrate/frame/paged-list/src/tests.rs b/substrate/frame/paged-list/src/tests.rs index f1a2b3c7d923..07bf2d31f7b1 100644 --- a/substrate/frame/paged-list/src/tests.rs +++ b/substrate/frame/paged-list/src/tests.rs @@ -20,8 +20,8 @@ #![cfg(test)] -use crate::{mock::*, *}; -use frame_support::storage::{StorageList, StoragePrefixedContainer}; +use crate::mock::*; +use frame_support::storage::StorageList; #[docify::export] #[test] diff --git a/substrate/frame/support/src/storage/lists.rs b/substrate/frame/support/src/storage/lists.rs index 632b66ad0a34..f706eeadfc98 100644 --- a/substrate/frame/support/src/storage/lists.rs +++ b/substrate/frame/support/src/storage/lists.rs @@ -76,10 +76,10 @@ pub trait StorageKeyedList { /// Append iterator for fast append operations. type Appender: StorageListAppender; - /// Number of elements in the list. + /// Number of elements in the list under `key`. fn len(key: K) -> u64; - /// List the elements in append order. + /// Get the elements in append order. fn iter(key: K) -> Self::Iterator; /// Drain the elements in append order. @@ -94,7 +94,7 @@ pub trait StorageKeyedList { /// Append a single element. /// /// Should not be called repeatedly; use `append_many` instead. - /// Worst case linear `O(len)` with `len` being the number if elements in the list. + /// Worst case linear `O(len)` with `len` being the number of elements in the map. fn append_one(key: K, item: EncodeLikeValue) where EncodeLikeValue: EncodeLike, @@ -105,8 +105,8 @@ pub trait StorageKeyedList { /// Append many elements. /// /// Should not be called repeatedly; use `appender` instead. - /// Worst case linear `O(len + items.count())` with `len` beings the number if elements in the - /// list. + /// Worst case linear `O(len + items.count())` with `len` beings the number of elements in the + /// map. fn append_many(key: K, items: I) where EncodeLikeValue: EncodeLike,