diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index aa749ed68..f4108f111 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -73,7 +73,7 @@ jobs: strategy: matrix: rust: [msrv, latest] - test-protocol: [22] + test-protocol: [23] sys: - os: ubuntu-latest target: wasm32-unknown-unknown diff --git a/Makefile b/Makefile index cf585225d..02445c567 100644 --- a/Makefile +++ b/Makefile @@ -6,8 +6,8 @@ test: test-opt: cargo hack --locked --each-feature test --profile test-opt -MIN_PROTOCOL := 22 -MAX_PROTOCOL := 22 +MIN_PROTOCOL := 23 +MAX_PROTOCOL := 23 test-all-protocols: for i in $$(seq $(MIN_PROTOCOL) $$(($(MAX_PROTOCOL) + 1))); do \ diff --git a/soroban-env-common/src/meta.rs b/soroban-env-common/src/meta.rs index f79d53854..e5e960e7d 100644 --- a/soroban-env-common/src/meta.rs +++ b/soroban-env-common/src/meta.rs @@ -49,7 +49,7 @@ pub const ENV_META_V0_SECTION_NAME: &str = "contractenvmetav0"; // protocol number for testing purposes. #[cfg(feature = "next")] soroban_env_macros::generate_env_meta_consts!( - ledger_protocol_version: 23, + ledger_protocol_version: 24, pre_release_version: 1, ); @@ -59,6 +59,6 @@ soroban_env_macros::generate_env_meta_consts!( // network protocol number. #[cfg(not(feature = "next"))] soroban_env_macros::generate_env_meta_consts!( - ledger_protocol_version: 22, + ledger_protocol_version: 23, pre_release_version: 0, ); diff --git a/soroban-env-host/src/host.rs b/soroban-env-host/src/host.rs index f64506d63..25577b925 100644 --- a/soroban-env-host/src/host.rs +++ b/soroban-env-host/src/host.rs @@ -82,12 +82,11 @@ pub struct CoverageScoreboard { pub vm_to_vm_calls: usize, } -// The soroban 22.x host only supports protocol 22 and later, having -// adopted a new version of wasmi with a new fuel metering system, it -// cannot accurately replay earlier contracts. Earlier protocols -// must run on Soroban 21.x or earlier. +// The soroban 23.x host only supports protocol 23 and later, having adopted a +// new module caching system. Earlier protocols must run on Soroban 22.x or +// earlier. -pub(crate) const MIN_LEDGER_PROTOCOL_VERSION: u32 = 22; +pub(crate) const MIN_LEDGER_PROTOCOL_VERSION: u32 = 23; #[derive(Clone, Default)] struct HostImpl { @@ -163,14 +162,6 @@ struct HostImpl { #[cfg(any(test, feature = "recording_mode"))] suppress_diagnostic_events: RefCell, - // This flag marks the call of `build_module_cache` that would happen - // in enforcing mode. In recording mode we need to use this flag to - // determine whether we need to rebuild module cache after the host - // invocation has been done. - #[doc(hidden)] - #[cfg(any(test, feature = "recording_mode"))] - need_to_build_module_cache: RefCell, - #[cfg(any(test, feature = "testutils"))] pub(crate) invocation_meter: RefCell, } @@ -324,14 +315,6 @@ impl_checked_borrow_helpers!( try_borrow_suppress_diagnostic_events_mut ); -#[cfg(any(test, feature = "recording_mode"))] -impl_checked_borrow_helpers!( - need_to_build_module_cache, - bool, - try_borrow_need_to_build_module_cache, - try_borrow_need_to_build_module_cache_mut -); - impl Debug for HostImpl { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "HostImpl(...)") @@ -380,16 +363,17 @@ impl Host { coverage_scoreboard: Default::default(), #[cfg(any(test, feature = "recording_mode"))] suppress_diagnostic_events: RefCell::new(false), - #[cfg(any(test, feature = "recording_mode"))] - need_to_build_module_cache: RefCell::new(false), #[cfg(any(test, feature = "testutils"))] invocation_meter: Default::default(), })) } + #[cfg(any(test, feature="testutils"))] + // This builds a module cache instance for just the contracts stored + // in the host's storage map, and is used only in testing. pub fn build_module_cache_if_needed(&self) -> Result<(), HostError> { if self.try_borrow_module_cache()?.is_none() { - let cache = ModuleCache::new(self)?; + let cache = ModuleCache::new_for_stored_contracts(self)?; *self.try_borrow_module_cache_mut()? = Some(cache); } Ok(()) @@ -397,18 +381,8 @@ impl Host { // Install a module cache from _outside_ the Host. Doing this is potentially // delicate: the cache must contain all contracts that will be run by the - // host, and will not be further populated during execution. This is - // only allowed if the cache is of "reusable" type, i.e. it was created - // using `ModuleCache::new_reusable`. + // host, and will not be further populated during execution. pub fn set_module_cache(&self, cache: ModuleCache) -> Result<(), HostError> { - if !cache.is_reusable() { - return Err(self.err( - ScErrorType::Context, - ScErrorCode::InternalError, - "module cache not reusable", - &[], - )); - } *self.try_borrow_module_cache_mut()? = Some(cache); Ok(()) } @@ -438,14 +412,14 @@ impl Host { } #[cfg(any(test, feature = "recording_mode"))] - pub fn clear_module_cache(&self) -> Result<(), HostError> { + pub fn forget_module_cache(&self) -> Result<(), HostError> { *self.try_borrow_module_cache_mut()? = None; Ok(()) } #[cfg(any(test, feature = "recording_mode"))] pub fn rebuild_module_cache(&self) -> Result<(), HostError> { - self.clear_module_cache()?; + self.forget_module_cache()?; self.build_module_cache_if_needed() } diff --git a/soroban-env-host/src/host/frame.rs b/soroban-env-host/src/host/frame.rs index 94d0e7607..7dea6caf6 100644 --- a/soroban-env-host/src/host/frame.rs +++ b/soroban-env-host/src/host/frame.rs @@ -217,19 +217,6 @@ impl Host { // recording auth mode. This is a no-op for the enforcing mode. self.try_borrow_authorization_manager()? .maybe_emulate_authentication(self)?; - // See explanation for this line in [crate::vm::Vm::parse_module] -- it exists - // to add-back module-parsing costs that were suppressed during the invocation. - if self.in_storage_recording_mode()? { - if *self.try_borrow_need_to_build_module_cache()? { - // Host function calls that upload Wasm and create contracts - // don't use the module cache and thus don't need to have it - // rebuilt. - self.rebuild_module_cache()?; - } - } - // Reset the flag for building the module cache. This is only relevant - // for tests that keep reusing the same host for several invocations. - *(self.try_borrow_need_to_build_module_cache_mut()?) = false; } let mut auth_snapshot = None; if let Some(rp) = orp { @@ -701,16 +688,6 @@ impl Host { } fn instantiate_vm(&self, id: &Hash, wasm_hash: &Hash) -> Result, HostError> { - #[cfg(any(test, feature = "recording_mode"))] - { - if !self.in_storage_recording_mode()? { - self.build_module_cache_if_needed()?; - } else { - *(self.try_borrow_need_to_build_module_cache_mut()?) = true; - } - } - #[cfg(not(any(test, feature = "recording_mode")))] - self.build_module_cache_if_needed()?; let contract_id = id.metered_clone(self)?; let parsed_module = if let Some(cache) = &*self.try_borrow_module_cache()? { // Check that storage thinks the entry exists before @@ -721,7 +698,7 @@ impl Host { .try_borrow_storage_mut()? .has_with_host(&wasm_key, self, None)? { - cache.get_module(self, wasm_hash)? + cache.get_module(wasm_hash)? } else { None } @@ -733,8 +710,7 @@ impl Host { }; // We can get here a few ways: // - // 1. We are running/replaying a protocol that has no - // module cache. + // 1. We are in simulation so don't have a module cache. // // 2. We have a module cache, but it somehow doesn't have // the module requested. This in turn has two @@ -745,8 +721,10 @@ impl Host { // // - User uploaded the wasm _in this transaction_ so we // didn't cache it when starting the transaction (and - // couldn't due to wasmi locking its engine while - // running). + // couldn't add it: additions use a shared wasmi engine + // that owns all the cache entries, and that engine is + // locked while we're running; uploads use a throwaway + // engine for validation purposes). // // 3. Even more pathological: the module cache was built, // and contained the module, but someone _removed_ the @@ -767,9 +745,7 @@ impl Host { #[cfg(any(test, feature = "recording_mode"))] // In recording mode: if a contract was present in the initial snapshot image, it is part of // the set of contracts that would have been built into a module cache in enforcing mode; - // we want to defer the cost of parsing those (simulating them as cache hits) and then charge - // once for each such contract the simulated-module-cache-build that happens at the end of - // the frame in [`Self::pop_context`]. + // we want to suppress the cost of parsing those (simulating them as cache hits). // // If a contract is _not_ in the initial snapshot image, it's because someone just uploaded // it during execution. Those would be cache misses in enforcing mode, and so it is right to @@ -783,7 +759,7 @@ impl Host { .get_snapshot_value(self, &contact_code_key)? .is_some() { - crate::vm::ModuleParseCostMode::PossiblyDeferredIfRecording + crate::vm::ModuleParseCostMode::SuppressedToEmulateCacheHit } else { crate::vm::ModuleParseCostMode::Normal } diff --git a/soroban-env-host/src/test/budget_metering.rs b/soroban-env-host/src/test/budget_metering.rs index d9bd4bd15..83b976c48 100644 --- a/soroban-env-host/src/test/budget_metering.rs +++ b/soroban-env-host/src/test/budget_metering.rs @@ -128,7 +128,7 @@ fn test_vm_fuel_metering() -> Result<(), HostError> { .test_budget(cpu_required, mem_required) .enable_model(ContractCostType::WasmInsnExec, 6, 0, 0, 0) .enable_model(ContractCostType::MemAlloc, 0, 0, 0, 1); - host.clear_module_cache()?; + host.forget_module_cache()?; host.call(id_obj, sym, args)?; host.with_budget(|budget| { assert_eq!(budget.get_cpu_insns_consumed()?, cpu_required); @@ -142,7 +142,7 @@ fn test_vm_fuel_metering() -> Result<(), HostError> { .test_budget(cpu_required, mem_required) .enable_model(ContractCostType::WasmInsnExec, 6, 0, 0, 0) .enable_model(ContractCostType::MemAlloc, 0, 0, 0, 1); - host.clear_module_cache()?; + host.forget_module_cache()?; let res = host.try_call(id_obj, sym, args); assert!(HostError::result_matches_err(res, budget_err)); host.with_budget(|budget| { @@ -157,7 +157,7 @@ fn test_vm_fuel_metering() -> Result<(), HostError> { .test_budget(cpu_required, mem_required) .enable_model(ContractCostType::WasmInsnExec, 6, 0, 0, 0) .enable_model(ContractCostType::MemAlloc, 0, 0, 0, 1); - host.clear_module_cache()?; + host.forget_module_cache()?; let res = host.try_call(id_obj, sym, args); assert!(HostError::result_matches_err(res, budget_err)); host.with_budget(|budget| { diff --git a/soroban-env-host/src/test/bytes.rs b/soroban-env-host/src/test/bytes.rs index 157733012..138e95d5b 100644 --- a/soroban-env-host/src/test/bytes.rs +++ b/soroban-env-host/src/test/bytes.rs @@ -494,7 +494,7 @@ fn instantiate_oversized_bytes_from_linear_memory() -> Result<(), HostError> { // constructing a big bytes will cause budget limit exceeded error let num_bytes: u32 = 16_000_000; let wasm_long = wasm::wasm_module_with_large_bytes_from_linear_memory(num_bytes, 7); - host.clear_module_cache()?; + host.forget_module_cache()?; host.budget_ref().reset_unlimited()?; let contract_id_obj2 = host.register_test_contract_wasm(&wasm_long.as_slice()); host.budget_ref().reset_default()?; diff --git a/soroban-env-host/src/test/hostile.rs b/soroban-env-host/src/test/hostile.rs index 61f914ba9..8952c7b1f 100644 --- a/soroban-env-host/src/test/hostile.rs +++ b/soroban-env-host/src/test/hostile.rs @@ -606,7 +606,7 @@ fn excessive_logging() -> Result<(), HostError> { // moderate logging { - host.clear_module_cache()?; + host.forget_module_cache()?; host.budget_ref().reset_limits(2_000_000, 500_000)?; let res = host.call( contract_id_obj, @@ -626,7 +626,7 @@ fn excessive_logging() -> Result<(), HostError> { // excessive logging { - host.clear_module_cache()?; + host.forget_module_cache()?; host.budget_ref().reset_limits(2_000_000, 500_000)?; let res = host.call( contract_id_obj, @@ -648,7 +648,7 @@ fn excessive_logging() -> Result<(), HostError> { // increasing the shadow budget should make everything happy again { - host.clear_module_cache()?; + host.forget_module_cache()?; host.budget_ref().reset_limits(2_000_000, 500_000)?; host.set_shadow_budget_limits(2_000_000, 1_000_000)?; let res = host.call( diff --git a/soroban-env-host/src/test/invocation.rs b/soroban-env-host/src/test/invocation.rs index ce843ac44..3f7cb43a0 100644 --- a/soroban-env-host/src/test/invocation.rs +++ b/soroban-env-host/src/test/invocation.rs @@ -218,7 +218,7 @@ fn contract_failure_with_debug_on_off_affects_no_metering() -> Result<(), HostEr let invoke_cross_contract_indirect_with_err = || -> Result<(u64, u64, u64, u64), HostError> { // try call -- add will trap, and add_with will trap, but we will get an error - host.rebuild_module_cache()?; + host.build_module_cache_if_needed()?; host.as_budget().reset_default()?; let res = host.try_call(id0_obj, sym, args); HostError::result_matches_err( diff --git a/soroban-env-host/src/test/lifecycle.rs b/soroban-env-host/src/test/lifecycle.rs index b06454a7c..4a2ef019b 100644 --- a/soroban-env-host/src/test/lifecycle.rs +++ b/soroban-env-host/src/test/lifecycle.rs @@ -1093,7 +1093,7 @@ mod cap_54_55_56 { let wasm = get_contract_wasm_ref(&host, contract_id); let module_cache = host.try_borrow_module_cache()?; if let Some(module_cache) = &*module_cache { - assert!(module_cache.get_module(&*host, &wasm).is_ok()); + assert!(module_cache.get_module(&wasm).is_ok()); } else { panic!("expected module cache"); } @@ -1409,7 +1409,7 @@ mod cap_54_55_56 { // Check that the module cache did not get populated with the new wasm. if let Some(module_cache) = &*host.try_borrow_module_cache()? { - assert!(module_cache.get_module(&*host, &wasm_hash)?.is_none()); + assert!(module_cache.get_module(&wasm_hash)?.is_none()); } else { panic!("expected module cache"); } diff --git a/soroban-env-host/src/test/map.rs b/soroban-env-host/src/test/map.rs index 217c2f08c..8ada0a5de 100644 --- a/soroban-env-host/src/test/map.rs +++ b/soroban-env-host/src/test/map.rs @@ -487,7 +487,7 @@ fn instantiate_oversized_map_from_linear_memory() -> Result<(), HostError> { let num_vals = 400_000; let wasm_long = wasm::wasm_module_with_large_map_from_linear_memory(num_vals, U32Val::from(7).to_val()); - host.clear_module_cache()?; + host.forget_module_cache()?; host.budget_ref().reset_unlimited()?; let contract_id_obj2 = host.register_test_contract_wasm(&wasm_long.as_slice()); host.budget_ref().reset_default()?; diff --git a/soroban-env-host/src/test/vec.rs b/soroban-env-host/src/test/vec.rs index 06cb85f51..db0c20be6 100644 --- a/soroban-env-host/src/test/vec.rs +++ b/soroban-env-host/src/test/vec.rs @@ -457,7 +457,7 @@ fn instantiate_oversized_vec_from_linear_memory() -> Result<(), HostError> { let num_vals = 1_100_000; let wasm_long = wasm::wasm_module_with_large_vector_from_linear_memory(num_vals, U32Val::from(7).to_val()); - host.clear_module_cache()?; + host.forget_module_cache()?; host.budget_ref().reset_unlimited()?; let contract_id_obj2 = host.register_test_contract_wasm(&wasm_long.as_slice()); host.budget_ref().reset_default()?; diff --git a/soroban-env-host/src/testutils.rs b/soroban-env-host/src/testutils.rs index d70ceb62a..c259efe11 100644 --- a/soroban-env-host/src/testutils.rs +++ b/soroban-env-host/src/testutils.rs @@ -1,3 +1,4 @@ +use crate::budget::AsBudget; use crate::e2e_invoke::ledger_entry_to_ledger_key; use crate::storage::EntryWithLiveUntil; use crate::ErrorHandler; @@ -311,6 +312,9 @@ impl Host { self.set_source_account(prev_account)?; } self.set_auth_manager(prev_auth_manager)?; + self.as_budget().with_shadow_mode(|| { + self.rebuild_module_cache() + }); Ok(contract_address) } diff --git a/soroban-env-host/src/vm.rs b/soroban-env-host/src/vm.rs index 4949f9f27..29f1581d2 100644 --- a/soroban-env-host/src/vm.rs +++ b/soroban-env-host/src/vm.rs @@ -127,15 +127,14 @@ impl Host { } } -// In one very narrow context -- when recording, and with a module cache -- we -// defer the cost of parsing a module until we pop a control frame. -// Unfortunately we have to thread this information from the call site to here. -// See comment below where this type is used. +// In one very narrow context -- when recording the invocation of a contract +// that was _not_ just uploaded, and therefore _should_ have had a cache hit -- +// we suppress the cost of parsing a module, to emulate the cache hit. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum ModuleParseCostMode { Normal, #[cfg(any(test, feature = "recording_mode"))] - PossiblyDeferredIfRecording, + SuppressedToEmulateCacheHit, } impl Vm { @@ -196,7 +195,7 @@ impl Vm { } /// Instantiates a VM given the arguments provided in [`Self::new`], - /// or [`Self::new_from_module_cache`] + /// or [`Self::from_parsed_module`] fn instantiate( host: &Host, contract_id: Hash, @@ -297,41 +296,36 @@ impl Vm { ParsedModule::new_with_isolated_engine(host, wasm, cost_inputs) } - /// This method exists to support [crate::storage::FootprintMode::Recording] - /// when running in protocol versions that feature the [ModuleCache]. + /// This method exists to support simulation of transaction application, + /// a.k.a. [crate::storage::FootprintMode::Recording] mode. /// - /// There are two ways we can get to here: - /// - /// 1. When we're running in a protocol that doesn't support the - /// [ModuleCache] at all. In this case, we just parse the module and - /// charge for it as normal. - /// - /// 2. When we're in a protocol that _does_ support the [ModuleCache] but - /// are _also_ in [crate::storage::FootprintMode::Recording] mode and - /// _also_ being instantiated from [Host::call_contract_fn]. Then the - /// [ModuleCache] _did not get built_ during host setup (because we had - /// no footprint yet to buid the cache from), so our caller - /// [Host::call_contract_fn] sees no module cache, and so each call winds - /// up calling us here, reparsing each module as it's called, and then - /// throwing it away. - /// - /// When we are in case 2, we don't want to charge for all those reparses: - /// we want to charge only for the post-parse instantiations _as if_ we had - /// had the cache. The cache will actually be added in [Host::pop_context] - /// _after_ a top-level recording-mode invocation completes, by reading the - /// storage and parsing all the modules in it, in order to charge for - /// parsing each used module _once_ and thereby produce a mostly-correct - /// total cost. - /// - /// We still charge the reparses to the shadow budget, to avoid a DoS risk, - /// and we still charge the instantiations to the real budget, to behave the - /// same as if we had a cache. - /// - /// Finally, for those scratching their head about the overall structure: - /// all of this happens as a result of the "module cache" not being - /// especially cache-like (i.e. not being populated lazily, on-access). It's - /// populated all at once, up front, because wasmi does not allow adding - /// modules to an engine that's currently running. + /// With the arrival of long-lived module caches, we no longer charge for + /// parsing modules during the majority of _real_ executions, because wasm + /// modules are parsed and cached outside the lifecycle of the soroban host + /// altogether. + /// + /// But there are some wrinkles to this: + /// + /// 1. When a user uploads a new contract, the initial parse happens in a + /// throwaway VM, and is limited by the standard budget. We do this + /// simply to protect the VM from malicious input. + /// + /// 2. When a user uploads a contract _and invokes it in the same tx_, it + /// is not in the cache yet (the cache is locked during execution) so + /// the execution causes another fresh, charged-for parse. This is + /// correct. A contract is (and should be) cheaper to run after it's + /// been committed to the ledger and cached in the module cache. + /// + /// 3. When we are _simulating_ tx apply, we don't want to oblige the + /// simulation environment to always have a reusable fully-populated + /// module cache, so to simulate the "no charge" experience most + /// contract invocations get, we parse on demand and _suppress the + /// cost_ of parsing the module, directing it to the shadow budget + /// instead. That is what this function exists to accomplish. + /// + /// 4. Unless! If we are simulating the upload-and-immediately-invoke + /// scenario, then we want to _not_ suppress the charge, since it would + /// be charged in the real non-simulated version of the scenario also. #[cfg(any(test, feature = "recording_mode"))] fn parse_module( host: &Host, @@ -339,12 +333,12 @@ impl Vm { cost_inputs: VersionedContractCodeCostInputs, cost_mode: ModuleParseCostMode, ) -> Result, HostError> { - if cost_mode == ModuleParseCostMode::PossiblyDeferredIfRecording { - if host.in_storage_recording_mode()? { - return host.budget_ref().with_observable_shadow_mode(|| { - ParsedModule::new_with_isolated_engine(host, wasm, cost_inputs) - }); - } + // When we're recording, we _might_ want to suppress the cost of parsing + // if we're in case 3 above. We can tell by the cost_mode. + if cost_mode == ModuleParseCostMode::SuppressedToEmulateCacheHit { + return host.budget_ref().with_observable_shadow_mode(|| { + ParsedModule::new_with_isolated_engine(host, wasm, cost_inputs) + }); } ParsedModule::new_with_isolated_engine(host, wasm, cost_inputs) } diff --git a/soroban-env-host/src/vm/module_cache.rs b/soroban-env-host/src/vm/module_cache.rs index 1736ea018..65126fc52 100644 --- a/soroban-env-host/src/vm/module_cache.rs +++ b/soroban-env-host/src/vm/module_cache.rs @@ -1,17 +1,16 @@ -use super::{ - func_info::HOST_FUNCTIONS, - parsed_module::{CompilationContext, ParsedModule, VersionedContractCodeCostInputs}, -}; +use super::parsed_module::{CompilationContext, ParsedModule, VersionedContractCodeCostInputs}; use crate::{ - budget::{get_wasmi_config, AsBudget, Budget}, - host::metered_clone::{MeteredClone, MeteredContainer}, + budget::get_wasmi_config, + host::metered_clone::MeteredClone, xdr::{Hash, ScErrorCode, ScErrorType}, - Host, HostError, MeteredOrdMap, + Host, HostError, }; use std::{ - collections::{BTreeMap, BTreeSet}, + collections::BTreeMap, sync::{Arc, Mutex, MutexGuard}, }; +#[cfg(any(test, feature = "testutils"))] +use crate::budget::AsBudget; /// A [ModuleCache] is a cache of a set of Wasm modules that have been parsed /// but not yet instantiated, along with a shared and reusable [Engine] storing @@ -32,10 +31,10 @@ static_assertions::assert_impl_all!(ModuleCache: Send, Sync); // The module cache was originally designed as an immutable object // established at host creation time and never updated. In order to support -// longer-lived modules caches, we allow construction of unmetered, "reusable" -// module maps, that imply various changes: +// longer-lived modules caches it was changed to a new form that is +// a little unlike the rest of soroban, and works differently: // -// - Modules can be added post-construction. +// - Modules can be added post-construction, it's not immutable. // - Adding an existing module is a harmless no-op, not an error. // - The linkers are set to "maximal" mode to cover all possible imports. // - The cache easily scales to a large number of modules, unlike MeteredOrdMap. @@ -44,14 +43,11 @@ static_assertions::assert_impl_all!(ModuleCache: Send, Sync); // - The cache is mutable and shared among all copies, using a mutex. #[derive(Clone)] -enum ModuleCacheMap { - MeteredSingleUseMap(MeteredOrdMap, Budget>), - UnmeteredReusableMap(Arc>>>), -} +struct ModuleCacheMap(Arc>>>); impl Default for ModuleCacheMap { fn default() -> Self { - Self::MeteredSingleUseMap(MeteredOrdMap::new()) + Self(Arc::new(Mutex::new(BTreeMap::new()))) } } @@ -63,71 +59,38 @@ impl ModuleCacheMap { .map_err(|_| HostError::from((ScErrorType::Context, ScErrorCode::InternalError))) } - fn is_reusable(&self) -> bool { - matches!(self, Self::UnmeteredReusableMap(_)) - } - - fn contains_key(&self, key: &Hash, budget: &Budget) -> Result { - match self { - Self::MeteredSingleUseMap(map) => map.contains_key(key, budget), - Self::UnmeteredReusableMap(map) => Ok(Self::lock_map(map)?.contains_key(key)), - } + fn contains_key(&self, key: &Hash) -> Result { + Ok(Self::lock_map(&self.0)?.contains_key(key)) } - fn get(&self, key: &Hash, budget: &Budget) -> Result>, HostError> { - match self { - Self::MeteredSingleUseMap(map) => Ok(map.get(key, budget)?.map(|rc| rc.clone())), - Self::UnmeteredReusableMap(map) => { - Ok(Self::lock_map(map)?.get(key).map(|rc| rc.clone())) - } - } + fn get(&self, key: &Hash) -> Result>, HostError> { + Ok(Self::lock_map(&self.0)?.get(key).map(|rc| rc.clone())) } fn insert( - &mut self, + &self, key: Hash, value: Arc, - budget: &Budget, ) -> Result<(), HostError> { - match self { - Self::MeteredSingleUseMap(map) => { - *map = map.insert(key, value, budget)?; - } - Self::UnmeteredReusableMap(map) => { - Self::lock_map(map)?.insert(key, value); - } - } + Self::lock_map(&self.0)?.insert(key, value); Ok(()) } } impl ModuleCache { - pub fn new(host: &Host) -> Result { - let wasmi_config = get_wasmi_config(host.as_budget())?; - let wasmi_engine = wasmi::Engine::new(&wasmi_config); - let modules = ModuleCacheMap::MeteredSingleUseMap(MeteredOrdMap::new()); - let wasmi_linker = wasmi::Linker::new(&wasmi_engine); - let mut cache = Self { - wasmi_engine, - modules, - wasmi_linker, - }; - - // Now add the contracts and rebuild linkers restricted to them. + #[cfg(any(test, feature="testutils"))] + pub fn new_for_stored_contracts(host: &Host) -> Result { + let mut cache = Self::new(host)?; cache.add_stored_contracts(host)?; - cache.wasmi_linker = cache.make_minimal_wasmi_linker_for_cached_modules(host)?; Ok(cache) } - pub fn new_reusable(context: &Ctx) -> Result { + pub fn new(context: &Ctx) -> Result { let wasmi_config = get_wasmi_config(context.as_budget())?; let wasmi_engine = wasmi::Engine::new(&wasmi_config); - - let modules = ModuleCacheMap::UnmeteredReusableMap(Arc::new(Mutex::new(BTreeMap::new()))); - + let modules = ModuleCacheMap::default(); let wasmi_linker = Host::make_maximal_wasmi_linker(context, &wasmi_engine)?; - Ok(Self { wasmi_engine, modules, @@ -135,28 +98,11 @@ impl ModuleCache { }) } - pub fn is_reusable(&self) -> bool { - self.modules.is_reusable() - } - + #[cfg(any(test, feature="testutils"))] pub fn add_stored_contracts(&mut self, host: &Host) -> Result<(), HostError> { use crate::xdr::{ContractCodeEntry, ContractCodeEntryExt, LedgerEntryData, LedgerKey}; let storage = host.try_borrow_storage()?; for (k, v) in storage.map.iter(host.as_budget())? { - // In recording mode we build the module cache *after* the contract invocation has - // finished. This means that if any new Wasm has been uploaded, then we will add it to - // the cache. However, in the 'real' flow we build the cache first, so any new Wasm - // upload won't be cached. That's why we should look at the storage in its initial - // state, which is conveniently provided by the recording mode snapshot. - #[cfg(any(test, feature = "recording_mode"))] - let init_value = if host.in_storage_recording_mode()? { - storage.get_snapshot_value(host, k)? - } else { - v.clone() - }; - #[cfg(any(test, feature = "recording_mode"))] - let v = &init_value; - if let LedgerKey::ContractCode(_) = &**k { if let Some((e, _)) = v { if let LedgerEntryData::ContractCode(ContractCodeEntry { code, hash, ext }) = @@ -168,7 +114,7 @@ impl ModuleCache { // the actual execution into a `ContractFunctionSet`. // They should never be called, so we do not have to go // as far as making a fake `ParsedModule` for them. - if cfg!(any(test, feature = "testutils")) && code.as_slice().is_empty() { + if code.as_slice().is_empty() { continue; } @@ -195,7 +141,7 @@ impl ModuleCache { } pub fn parse_and_cache_module_simple( - &mut self, + &self, context: &Ctx, curr_ledger_protocol: u32, wasm: &[u8], @@ -216,7 +162,7 @@ impl ModuleCache { } pub fn parse_and_cache_module( - &mut self, + &self, context: &Ctx, curr_ledger_protocol: u32, contract_id: &Hash, @@ -225,20 +171,9 @@ impl ModuleCache { ) -> Result<(), HostError> { if self .modules - .contains_key(contract_id, context.as_budget())? + .contains_key(contract_id)? { - if self.modules.is_reusable() { - return Ok(()); - } else { - return Err(context.error( - crate::Error::from_type_and_code( - ScErrorType::Context, - ScErrorCode::InternalError, - ), - "module cache already contains contract", - &[], - )); - } + return Ok(()); } let parsed_module = ParsedModule::new( context, @@ -249,71 +184,23 @@ impl ModuleCache { )?; self.modules.insert( contract_id.metered_clone(context.as_budget())?, - parsed_module, - context.as_budget(), + parsed_module )?; Ok(()) } - fn with_minimal_import_symbols( - &self, - host: &Host, - callback: impl FnOnce(&BTreeSet<(&str, &str)>) -> Result, - ) -> Result { - let mut import_symbols = BTreeSet::new(); - let ModuleCacheMap::MeteredSingleUseMap(modules) = &self.modules else { - return Err(host.err( - ScErrorType::Context, - ScErrorCode::InternalError, - "with_import_symbols called on non-MeteredSingleUseMap cache", - &[], - )); - }; - for module in modules.values(host.as_budget())? { - module.with_import_symbols(host, |module_symbols| { - for hf in HOST_FUNCTIONS { - let sym = (hf.mod_str, hf.fn_str); - if module_symbols.contains(&sym) { - import_symbols.insert(sym); - } - } - Ok(()) - })?; - } - // We approximate the cost of `BTreeSet` with the cost of initializng a - // `Vec` with the same elements, and we are doing it after the set has - // been created. The element count has been limited/charged during the - // parsing phase, so there is no DOS factor. We don't charge for - // insertion/lookups, since they should be cheap and number of - // operations on the set is limited (only used during `Linker` - // creation). - Vec::<(&str, &str)>::charge_bulk_init_cpy(import_symbols.len() as u64, host)?; - callback(&import_symbols) - } - - fn make_minimal_wasmi_linker_for_cached_modules( - &self, - host: &Host, - ) -> Result, HostError> { - self.with_minimal_import_symbols(host, |symbols| { - Host::make_minimal_wasmi_linker_for_symbols(host, &self.wasmi_engine, symbols) - }) - } - - pub fn contains_module( + pub fn contains_module( &self, wasm_hash: &Hash, - context: &Ctx, ) -> Result { - self.modules.contains_key(wasm_hash, context.as_budget()) + self.modules.contains_key(wasm_hash) } - pub fn get_module( + pub fn get_module( &self, - context: &Ctx, wasm_hash: &Hash, ) -> Result>, HostError> { - if let Some(m) = self.modules.get(wasm_hash, context.as_budget())? { + if let Some(m) = self.modules.get(wasm_hash)? { Ok(Some(m.clone())) } else { Ok(None) diff --git a/soroban-env-macros/src/lib.rs b/soroban-env-macros/src/lib.rs index 7d2aff15b..3a3e7b568 100644 --- a/soroban-env-macros/src/lib.rs +++ b/soroban-env-macros/src/lib.rs @@ -24,9 +24,9 @@ use crate::xdr::{Limits, ScEnvMetaEntry, ScEnvMetaEntryInterfaceVersion, WriteXd // `meta.rs`, since this is at the lower layer (`meta.rs` is compile-time // generated by routines here) #[cfg(not(feature = "next"))] -pub(crate) const LEDGER_PROTOCOL_VERSION: u32 = 22; -#[cfg(feature = "next")] pub(crate) const LEDGER_PROTOCOL_VERSION: u32 = 23; +#[cfg(feature = "next")] +pub(crate) const LEDGER_PROTOCOL_VERSION: u32 = 24; struct MetaInput { pub interface_version: ScEnvMetaEntryInterfaceVersion,