From 55e0d2aeb677caf85e512a120d7b9b47e6d512f7 Mon Sep 17 00:00:00 2001 From: edu-stx <116000646+edu-stx@users.noreply.github.com> Date: Sun, 8 Jan 2023 23:31:14 +0000 Subject: [PATCH 001/158] docs: fix pox-2.clar typos --- src/chainstate/stacks/boot/pox-2.clar | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/chainstate/stacks/boot/pox-2.clar b/src/chainstate/stacks/boot/pox-2.clar index e07addb450..8e56084a74 100644 --- a/src/chainstate/stacks/boot/pox-2.clar +++ b/src/chainstate/stacks/boot/pox-2.clar @@ -346,7 +346,7 @@ (update-list (unwrap-panic (replace-at? moved-reward-list moved-cycle-index set-index)))) (map-set stacking-state { stacker: moved-stacker } (merge moved-state { reward-set-indexes: update-list }))) - ;; otherwise, we dont need to update stacking-state after move + ;; otherwise, we don't need to update stacking-state after move true)) ;; if not moving, just noop true) @@ -636,7 +636,7 @@ ;; to issue the stacking lock. ;; The caller specifies: ;; * amount-ustx: the total amount of ustx the delegate may be allowed to lock -;; * until-burn-ht: an optional burn height at which this delegation expiration +;; * until-burn-ht: an optional burn height at which this delegation expires ;; * pox-addr: an optional address to which any rewards *must* be sent (define-public (delegate-stx (amount-ustx uint) (delegate-to principal) @@ -765,7 +765,7 @@ (let ((amount-ustx (get stacked-amount partial-stacked)) ;; reward-cycle must point to an existing record in reward-cycle-total-stacked - ;; infallable; getting something from partial-stacked-by-cycle succeeded so this must succeed + ;; infallible; getting something from partial-stacked-by-cycle succeeded so this must succeed (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) ;; reward-cycle and reward-cycle-index must point to an existing record in reward-cycle-pox-address-list (existing-entry (unwrap! (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }) @@ -1279,7 +1279,7 @@ (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, reward-cycle: reward-cycle, sender: sender }) ) -;; How any uSTX have voted to reject PoX in a given reward cycle? +;; How many uSTX have voted to reject PoX in a given reward cycle? ;; *New in Stacks 2.1* (define-read-only (get-total-pox-rejection (reward-cycle uint)) (match (map-get? stacking-rejection { reward-cycle: reward-cycle }) From e9b8ba533a8f14e8830a8b56bc08b2e10b6d0e5b Mon Sep 17 00:00:00 2001 From: bestmike007 Date: Fri, 27 Jan 2023 14:41:54 +0000 Subject: [PATCH 002/158] feat: support stacks 2.1 in mocknet controller Signed-off-by: bestmike007 --- .../src/burnchains/mocknet_controller.rs | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index af8f4b8c1d..330a0d9178 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -12,7 +12,9 @@ use stacks::chainstate::burn::operations::{ LeaderKeyRegisterOp, PreStxOp, StackStxOp, TransferStxOp, UserBurnSupportOp, }; use stacks::chainstate::burn::BlockSnapshot; -use stacks::core::{StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, STACKS_EPOCH_MAX}; +use stacks::core::{ + StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_1, STACKS_EPOCH_MAX, +}; use stacks::types::chainstate::{BurnchainHeaderHash, PoxId}; use stacks::util::get_epoch_time_secs; use stacks::util::hash::Sha256Sum; @@ -99,13 +101,22 @@ impl BurnchainController for MocknetController { fn get_stacks_epochs(&self) -> Vec { match &self.config.burnchain.epochs { Some(epochs) => epochs.clone(), - None => vec![StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }], + None => vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 1, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ], } } From 64f1567daac38261e5505605324a0d3140e84025 Mon Sep 17 00:00:00 2001 From: bestmike007 Date: Fri, 27 Jan 2023 14:51:54 +0000 Subject: [PATCH 003/158] chore: update mocknet config to disable public ip address discovery Signed-off-by: bestmike007 --- testnet/stacks-node/conf/mocknet-miner-conf.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/conf/mocknet-miner-conf.toml b/testnet/stacks-node/conf/mocknet-miner-conf.toml index 31d8ea830a..71add782b1 100644 --- a/testnet/stacks-node/conf/mocknet-miner-conf.toml +++ b/testnet/stacks-node/conf/mocknet-miner-conf.toml @@ -8,6 +8,9 @@ miner = true wait_time_for_microblocks = 10000 use_test_genesis_chainstate = true +[connection_options] +public_ip_address = "127.0.0.1:20444" + [burnchain] chain = "bitcoin" mode = "mocknet" From 0174151f6cfdd16e572ec42c691fcc846640e145 Mon Sep 17 00:00:00 2001 From: bestmike007 Date: Wed, 8 Feb 2023 02:48:28 +0000 Subject: [PATCH 004/158] chore: transition to epoch 2.05 before 2.1 Signed-off-by: bestmike007 --- .../stacks-node/src/burnchains/mocknet_controller.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index 330a0d9178..003770b6f8 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -13,7 +13,8 @@ use stacks::chainstate::burn::operations::{ }; use stacks::chainstate::burn::BlockSnapshot; use stacks::core::{ - StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_1, STACKS_EPOCH_MAX, + StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + PEER_VERSION_EPOCH_2_1, STACKS_EPOCH_MAX, }; use stacks::types::chainstate::{BurnchainHeaderHash, PoxId}; use stacks::util::get_epoch_time_secs; @@ -110,8 +111,15 @@ impl BurnchainController for MocknetController { network_epoch: PEER_VERSION_EPOCH_2_0, }, StacksEpoch { - epoch_id: StacksEpochId::Epoch21, + epoch_id: StacksEpochId::Epoch2_05, start_height: 1, + end_height: 2, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 2, end_height: STACKS_EPOCH_MAX, block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_1, From 3b3e9205f3c17ec8865ae9f4ba24e974c390a429 Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Tue, 14 Feb 2023 20:12:50 +0100 Subject: [PATCH 005/158] docs: normalize bitwise operations names --- clarity/src/vm/docs/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index e4511d5e7c..938389caef 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -578,7 +578,7 @@ const XOR_API: SimpleFunctionAPI = SimpleFunctionAPI { }; const BITWISE_XOR_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Xor"), + name: None, snippet: "bit-xor ${1:expr-1} ${2:expr-2}", signature: "(bit-xor i1 i2...)", description: @@ -592,7 +592,7 @@ const BITWISE_XOR_API: SimpleFunctionAPI = SimpleFunctionAPI { }; const BITWISE_AND_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise And"), + name: None, snippet: "bit-and ${1:expr-1} ${2:expr-2}", signature: "(bit-and i1 i2...)", description: "Returns the result of bitwise and'ing a variable number of integer inputs.", @@ -605,7 +605,7 @@ const BITWISE_AND_API: SimpleFunctionAPI = SimpleFunctionAPI { }; const BITWISE_OR_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Or"), + name: None, snippet: "bit-or ${1:expr-1} ${2:expr-2}", signature: "(bit-or i1 i2...)", description: @@ -618,7 +618,7 @@ const BITWISE_OR_API: SimpleFunctionAPI = SimpleFunctionAPI { }; const BITWISE_NOT_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Not"), + name: None, snippet: "bit-not ${1:expr-1}", signature: "(bit-not i1)", description: "Returns the one's compliement (sometimes also called the bitwise compliment or not operator) of `i1`, effectively reversing the bits in `i1`. @@ -632,7 +632,7 @@ In other words, every bit that is `1` in ì1` will be `0` in the result. Conver }; const BITWISE_LEFT_SHIFT_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Left Shift"), + name: None, snippet: "bit-shift-left ${1:expr-1} ${2:expr-2}", signature: "(bit-shift-left i1 shamt)", description: "Shifts all the bits in `i1` to the left by the number of places specified in `shamt` modulo 128 (the bit width of Clarity integers). @@ -652,7 +652,7 @@ should use `*`, `/`, and `pow` instead of the shift operators. }; const BITWISE_RIGHT_SHIFT_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Right Shift"), + name: None, snippet: "bit-shift-right ${1:expr-1} ${2:expr-2}", signature: "(bit-shift-right i1 shamt)", description: "Shifts all the bits in `i1` to the right by the number of places specified in `shamt` modulo 128 (the bit width of Clarity integers). From 66a542f7fe8bb38bd34a3623e52523e9244938fb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 14 Feb 2023 22:23:15 -0500 Subject: [PATCH 006/158] feat: set mainline testnet and mainnet block heights for 2.1 --- src/core/mod.rs | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index ce83436c94..678455937e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -104,18 +104,14 @@ pub const BITCOIN_MAINNET_FIRST_BLOCK_HASH: &str = "0000000000000000000ab248c8e35c574514d052a83dbc12669e19bc43df486e"; pub const BITCOIN_MAINNET_INITIAL_REWARD_START_BLOCK: u64 = 651389; pub const BITCOIN_MAINNET_STACKS_2_05_BURN_HEIGHT: u64 = 713_000; +pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; -// TODO: Pick a real height for Stacks 2.1. -pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 2_000_000; - -pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2_412_530; -pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1671825973; +pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; +pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = - "000000000000002a57f75a9bf78dde774da64899ff85ded8a9075f6b4959c959"; -pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_412_531; - -// TODO: Pick a real height for Stacks 2.1. -pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_412_532; + "000000000000010dd0863ec3d7a0bae17c1957ae1de9cbcdae8e77aad33e3b8c"; +pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_104_380; +pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; @@ -155,7 +151,6 @@ pub const POX_MAX_NUM_CYCLES: u8 = 12; pub const POX_TESTNET_STACKING_THRESHOLD_25: u128 = 8000; pub const POX_TESTNET_CYCLE_LENGTH: u128 = 1050; -// TODO: pick a real value for Stacks 2.1 pub const POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT as u32) + 1; pub const POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = From 6ac6ae6c606171e247e8a343ba0d769c67e7ba11 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 14 Feb 2023 22:23:33 -0500 Subject: [PATCH 007/158] fix: transfer_stx, not TransferStx --- testnet/stacks-node/src/tests/neon_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 859f210140..fed67995f4 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1702,7 +1702,7 @@ fn stx_transfer_btc_integration_test() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); - if !burnchain_op.contains_key("TransferStx") { + if !burnchain_op.contains_key("transfer_stx") { panic!("unexpected btc transaction type"); } found_btc_tx = true; From 2672e0ca2ff2eecd3ed74077f0c00ce3f4eceb7d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 16 Feb 2023 16:06:51 -0500 Subject: [PATCH 008/158] chore: address 2.1 PR feedback --- clarity/src/vm/analysis/type_checker/v2_1/mod.rs | 1 - src/net/chat.rs | 7 ------- 2 files changed, 8 deletions(-) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 9bf8cb9e04..c9bcd88eab 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -15,7 +15,6 @@ // along with this program. If not, see . pub mod contexts; -//mod maps; pub mod natives; use stacks_common::types::StacksEpochId; diff --git a/src/net/chat.rs b/src/net/chat.rs index 1cd1f5b8c2..88337f40b7 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -1075,7 +1075,6 @@ impl ConversationP2P { /// Handle an inbound NAT-punch request -- just tell the peer what we think their IP/port are. /// No authentication from the peer is necessary. fn handle_natpunch_request(&self, chain_view: &BurnchainView, nonce: u32) -> StacksMessage { - // monitoring::increment_p2p_msg_nat_punch_request_received_counter(); monitoring::increment_msg_counter("p2p_nat_punch_request".to_string()); let natpunch_data = NatPunchData { @@ -1244,7 +1243,6 @@ impl ConversationP2P { chain_view: &BurnchainView, message: &mut StacksMessage, ) -> Result, net_error> { - // monitoring::increment_p2p_msg_ping_received_counter(); monitoring::increment_msg_counter("p2p_ping".to_string()); let ping_data = match message.payload { @@ -1268,7 +1266,6 @@ impl ConversationP2P { chain_view: &BurnchainView, preamble: &Preamble, ) -> Result { - // monitoring::increment_p2p_msg_get_neighbors_received_counter(); monitoring::increment_msg_counter("p2p_get_neighbors".to_string()); let epoch = self.get_current_epoch(chain_view.burn_block_height); @@ -1477,7 +1474,6 @@ impl ConversationP2P { preamble: &Preamble, get_blocks_inv: &GetBlocksInv, ) -> Result { - // monitoring::increment_p2p_msg_get_blocks_inv_received_counter(); monitoring::increment_msg_counter("p2p_get_blocks_inv".to_string()); let mut response = ConversationP2P::make_getblocksinv_response( @@ -2041,7 +2037,6 @@ impl ConversationP2P { // already have public key; match payload let reply_opt = match msg.payload { StacksMessageType::Handshake(_) => { - // monitoring::increment_p2p_msg_authenticated_handshake_received_counter(); monitoring::increment_msg_counter("p2p_authenticated_handshake".to_string()); debug!("{:?}: Got Handshake", &self); @@ -2113,7 +2108,6 @@ impl ConversationP2P { let solicited = self.connection.is_solicited(&msg); let reply_opt = match msg.payload { StacksMessageType::Handshake(_) => { - // monitoring::increment_p2p_msg_unauthenticated_handshake_received_counter(); monitoring::increment_msg_counter("p2p_unauthenticated_handshake".to_string()); test_debug!("{:?}: Got unauthenticated Handshake", &self); let (reply_opt, handled) = @@ -2189,7 +2183,6 @@ impl ConversationP2P { nack_payload, ); - // monitoring::increment_p2p_msg_nack_sent_counter(); monitoring::increment_msg_counter("p2p_nack_sent".to_string()); // unauthenticated, so don't forward it (but do consume it, and do nack it) From c3975cd3269b4398de7399a93fc2ded02f94d4e0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 16 Feb 2023 16:09:23 -0500 Subject: [PATCH 009/158] chore: Stacks 2.1 changelog --- CHANGELOG.md | 106 +++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 90 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c28a30c5a3..4214926a39 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,32 +5,106 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [Unreleased - Stacks 2.1] +## [2.1] -This release will contain consensus-breaking changes. +This is a **consensus-breaking** release that introduces a _lot_ of new +functionality. Details on the how and why can be found in [SIP-015](https://github.com/stacksgov/sips/blob/feat/sip-015/sips/sip-015/sip-015-network-upgrade.md), +[SIP-018](https://github.com/MarvinJanssen/sips/blob/feat/signed-structured-data/sips/sip-018/sip-018-signed-structured-data.md), +and [SIP-20](https://github.com/obycode/sips/blob/bitwise-ops/sips/sip-020/sip-020-bitwise-ops.md). + +The changelog for this release is a high-level summary of these SIPs. ### Added -- Clarity function `stx-transfer?` now takes a 4th optional argument, which is a memo. -- Added a new parser which will be used to parse Clarity code beginning with 2.1, - resolving several bugs in the old parser and improving performance. -- Documentation will indicate explicitly which Clarity version introduced each - keyword or function. -- Clarity2 improvements to traits (see #3251 for details): +- There is a new `.pox-2` contract for implementing proof-of-transfer. This PoX + contract enables re-stacking while the user's STX are locked, and incrementing +the amount stacked on top of a locked batch of STX. +- The Clarity function `stx-account` has been added, which returns the account's + locked and unlocked balances. +- The Clarity functions `principal-destruct` and `principal-construct?` + functions have been added, which provide the means to convert between a +`principal` instance and the `buff`s and `string-ascii`s that constitute it. +- The Clarity function `get-burn-block-info?` has been extended to support + fetching the burnchain header hash of _any_ burnchain block starting from the +sortition height of the Stacks genesis block, and to support fetching the PoX +addresses and rewards paid by miners for a particular burnchain block height. +- The Clarity function `slice` has been added for obtaining a sub-sequence of a + `buff`, `string-ascii`, `string-utf8`, or `list`. +- Clarity functions for converting between `string-ascii`, `string-utf8`, + `uint`, and `int` have been added. +- Clarity functions for converting between big- and little-endian +`buff` representations of `int` and `uint` have been added. +- The Clarity function `stx-transfer-memo?` has been added, which behaves the + same as `stx-transfer?` but also takes a memo argument. +- The Clarity function `is-standard` has been added to identify whether or not a + `principal` instance is a standard or contract principal. +- Clarity functions have been added for converting an arbitrary Clarity type to + and from its canonical byte string representation. +- The Clarity function `replace-at?` has been added for replacing a single item + in a `list`, `string-ascii`, `string-utf8`, or `buff`. +- The Clarity global variable `tx-sponsor?` has been added, which evaluates to + the sponsor of the transaction if the transaction is sponsored. +- The Clarity global variable `chain-id` has been added, which evaluates to the + 4-byte chain ID of this Stacks network. +- The Clarity parser has been rewritten to be about 3x faster than the parser in + Stacks 2.05.x.x.x. +- Clarity trait semantics have been refined and made more explicit, so as to + avoid certain corner cases where a trait reference might be downgraded to a * Trait values can be passed to compatible sub-trait types * Traits can be embedded in compound types, e.g. `(optional )` * Traits can be assigned to a let-variable - Fixes to unexpected behavior in traits - * A trait with duplicate function names is now an error (#3214) - * Aliased trait names do not interfere with local trait definitions (#3215) + * A trait with duplicate function names is now an error + * Aliased trait names do not interfere with local trait definitions +`principal` in Clarity 1. +- The comparison functions `<`, `<=`, `>`, and `>=` now work on `string-ascii`, + `string-utf8`, and `buff` based on byte-by-byte comparison (note that this is +_not_ lexicographic comparison). +- It is now possible to call `delegate-stx` from a burnchain transaction, just + as it is for `stack-stx` and `transfer-stx`. -## Upcoming +### Changed -### Added -- Added prometheus output for "transactions in last block" (#3138). -- Added envrionement variable `STACKS_LOG_FORMAT_TIME` to set the time format - stacks-node uses for logging. - Example: `STACKS_LOG_FORMAT_TIME="%Y-%m-%d %H:%M:%S" cargo stacks-node` +- The `delegate-stx` function in `.pox-2` can be called while the user's STX are + locked. +- If a batch of STX is not enough to clinch even a single reward slot, then the + STX are automatically unlocked at the start of the reward cycle in which they +are rendered useless in this capacity. +- The PoX sunset has been removed. PoX rewards will continue in perpetuity. +- Support for segwit and taproot addresses (v0 and v1 witness programs) has been + added for Stacking. +- The Clarity function `get-block-info?` now supports querying a block's total + burnchain spend by miners who tried to mine it, the spend by the winner, and +the total block reward (coinbase plus transaction fees). +- A block's coinbase transaction may specify an alternative recipient principal, + which can be either a standard or contract principal. +- A smart contract transaction can specify which version of Clarity to use. If + no version is given, then the epoch-default version will be used (in Stacks +2.1, this is Clarity 2). +- The Stacks node now includes the number of PoX anchor blocks in its + fork-choice rules. The best Stacks fork is the fork that (1) is on the best +Bitcoin fork, (2) has the most PoX anchor blocks known, and (3) is the longest. +- On-burnchain operations -- `stack-stx`, `delegate-stx`, and `transfer-stx` -- + can take effect within six (6) burnchain blocks in which they are mined, +instead of one. +- Transaction fees are debited from accounts _before_ the transaction is + processed. +- All smart contract analysis errors are now treated as runtime errors, meaning + that smart contract transactions which don't pass analysis will still be mined +(so miners get paid for partially validating them). +- The default Clarity version is now 2. Users can opt for version 1 by using + the new smart contract transaction wire format and explicitly setting version +1. + +### Fixed + +- The authorization of a `contract-caller` in `.pox-2` for stacking will now + expire at the user-specified height, if given. +- The Clarity function `principal-of?` now works on mainnet. +- One or more late block-commits no longer result in the miner losing its + sortition weight. +- Documentation will indicate explicitly which Clarity version introduced each + keyword or function. ## [2.05.0.6.0] From b6ad3bcee9d58f1ad7eaaf27855c8b01a0bc8cfa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 16 Feb 2023 17:06:28 -0500 Subject: [PATCH 010/158] fix: clean up changelog --- CHANGELOG.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4214926a39..d2864df84a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ the amount stacked on top of a locked batch of STX. - The Clarity functions `principal-destruct` and `principal-construct?` functions have been added, which provide the means to convert between a `principal` instance and the `buff`s and `string-ascii`s that constitute it. -- The Clarity function `get-burn-block-info?` has been extended to support +- The Clarity function `get-burn-block-info?` has been added to support fetching the burnchain header hash of _any_ burnchain block starting from the sortition height of the Stacks genesis block, and to support fetching the PoX addresses and rewards paid by miners for a particular burnchain block height. @@ -50,13 +50,13 @@ addresses and rewards paid by miners for a particular burnchain block height. Stacks 2.05.x.x.x. - Clarity trait semantics have been refined and made more explicit, so as to avoid certain corner cases where a trait reference might be downgraded to a +`principal` in Clarity 1. * Trait values can be passed to compatible sub-trait types * Traits can be embedded in compound types, e.g. `(optional )` * Traits can be assigned to a let-variable - Fixes to unexpected behavior in traits * A trait with duplicate function names is now an error * Aliased trait names do not interfere with local trait definitions -`principal` in Clarity 1. - The comparison functions `<`, `<=`, `>`, and `>=` now work on `string-ascii`, `string-utf8`, and `buff` based on byte-by-byte comparison (note that this is _not_ lexicographic comparison). @@ -94,7 +94,6 @@ instead of one. (so miners get paid for partially validating them). - The default Clarity version is now 2. Users can opt for version 1 by using the new smart contract transaction wire format and explicitly setting version -1. ### Fixed From bc9f3e50e74fcc6a79d5e15b91311b39f41cd17e Mon Sep 17 00:00:00 2001 From: Hugo Caillard <911307+hugocaillard@users.noreply.github.com> Date: Fri, 17 Feb 2023 14:43:13 +0100 Subject: [PATCH 011/158] docs: fix principal -construct -destruct api reference --- clarity/src/vm/docs/mod.rs | 185 +++++++++++++++++++------------------ 1 file changed, 93 insertions(+), 92 deletions(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 938389caef..ca1ed78666 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -320,7 +320,7 @@ Note: This function is only available starting with Stacks 2.1.", "#, }; -const principal_destruct_API: SimpleFunctionAPI = SimpleFunctionAPI { +const PRINCPIPAL_DESTRUCT_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "principal-destruct? ${1:principal-address}", signature: "(principal-destruct? principal-address)", @@ -356,58 +356,6 @@ Note: This function is only available starting with Stacks 2.1.", "#, }; -const PRINCIPAL_CONSTRUCT_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: None, - snippet: "principal-construct? ${1:version} ${2:pub-key-hash}", - signature: "(principal-construct? (buff 1) (buff 20) [(string-ascii 40)])", - description: "A principal value represents either a set of keys, or a smart contract. -The former, called a _standard principal_, -is encoded as a `(buff 1)` *version byte*, indicating the type of account -and the type of network that this principal can spend tokens on, -and a `(buff 20)` *public key hash*, characterizing the principal's unique identity. -The latter, a _contract principal_, is encoded as a standard principal concatenated with -a `(string-ascii 40)` *contract name* that identifies the code body. - -The `principal-construct?` function allows users to create either standard or contract principals, -depending on which form is used. To create a standard principal, -`principal-construct?` would be called with two arguments: it -takes as input a `(buff 1)` which encodes the principal address's -`version-byte`, a `(buff 20)` which encodes the principal address's `hash-bytes`. -To create a contract principal, `principal-construct?` would be called with -three arguments: the `(buff 1)` and `(buff 20)` to represent the standard principal -that created the contract, and a `(string-ascii 40)` which encodes the contract's name. -On success, this function returns either a standard principal or contract principal, -depending on whether or not the third `(string-ascii 40)` argument is given. - -This function returns a `Response`. On success, the `ok` value is a `Principal`. -The `err` value is a value tuple with the form `{ error_code: uint, value: (optional principal) }`. - -If the single-byte `version-byte` is in the valid range `0x00` to `0x1f`, but is not an appropriate -version byte for the current network, then the error will be `u0`, and `value` will contain -`(some principal)`, where the wrapped value is the principal. If the `version-byte` is not in this range, -however, then the `value` will be `none`. - -If the `version-byte` is a `buff` of length 0, if the single-byte `version-byte` is a -value greater than `0x1f`, or the `hash-bytes` is a `buff` of length not equal to 20, then `error_code` -will be `u1` and `value` will be `None`. - -If a name is given, and the name is either an empty string or contains ASCII characters -that are not allowed in contract names, then `error_code` will be `u2`. - -Note: This function is only available starting with Stacks 2.1.", - example: r#" -(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (ok ST3X6QWWETNBZWGBK6DRGTR1KX50S74D3425Q1TPK) -(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo") ;; Returns (ok ST3X6QWWETNBZWGBK6DRGTR1KX50S74D3425Q1TPK.foo) -(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u0) (value (some SP3X6QWWETNBZWGBK6DRGTR1KX50S74D3433WDGJY)))) -(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo") ;; Returns (err (tuple (error_code u0) (value (some SP3X6QWWETNBZWGBK6DRGTR1KX50S74D3433WDGJY.foo)))) -(principal-construct? 0x 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u1) (value none))) -(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a3) ;; Returns (err (tuple (error_code u1) (value none))) -(principal-construct? 0x20 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u1) (value none))) -(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "") ;; Returns (err (tuple (error_code u2) (value none))) -(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo[") ;; Returns (err (tuple (error_code u2) (value none))) -"#, -}; - const STRING_TO_INT_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "string-to-int? ${1:string}", @@ -893,7 +841,7 @@ fn make_for_simple_native( } else { panic!( "Attempted to auto-generate docs for non-simple native function: {:?}", - api.name + name ) } }; @@ -1818,6 +1766,59 @@ The `addrs` list contains the same PoX address values passed into the PoX smart " }; +const PRINCIPAL_CONSTRUCT_API: SpecialAPI = SpecialAPI { + input_type: "(buff 1) (buff 20) [(string-ascii 40)]", + output_type: "(response principal { error_code: uint, principal: (option principal) })", + snippet: "principal-construct? ${1:version} ${2:pub-key-hash}", + signature: "(principal-construct? (buff 1) (buff 20) [(string-ascii 40)])", + description: "A principal value represents either a set of keys, or a smart contract. +The former, called a _standard principal_, +is encoded as a `(buff 1)` *version byte*, indicating the type of account +and the type of network that this principal can spend tokens on, +and a `(buff 20)` *public key hash*, characterizing the principal's unique identity. +The latter, a _contract principal_, is encoded as a standard principal concatenated with +a `(string-ascii 40)` *contract name* that identifies the code body. + +The `principal-construct?` function allows users to create either standard or contract principals, +depending on which form is used. To create a standard principal, +`principal-construct?` would be called with two arguments: it +takes as input a `(buff 1)` which encodes the principal address's +`version-byte`, a `(buff 20)` which encodes the principal address's `hash-bytes`. +To create a contract principal, `principal-construct?` would be called with +three arguments: the `(buff 1)` and `(buff 20)` to represent the standard principal +that created the contract, and a `(string-ascii 40)` which encodes the contract's name. +On success, this function returns either a standard principal or contract principal, +depending on whether or not the third `(string-ascii 40)` argument is given. + +This function returns a `Response`. On success, the `ok` value is a `Principal`. +The `err` value is a value tuple with the form `{ error_code: uint, value: (optional principal) }`. + +If the single-byte `version-byte` is in the valid range `0x00` to `0x1f`, but is not an appropriate +version byte for the current network, then the error will be `u0`, and `value` will contain +`(some principal)`, where the wrapped value is the principal. If the `version-byte` is not in this range, +however, then the `value` will be `none`. + +If the `version-byte` is a `buff` of length 0, if the single-byte `version-byte` is a +value greater than `0x1f`, or the `hash-bytes` is a `buff` of length not equal to 20, then `error_code` +will be `u1` and `value` will be `None`. + +If a name is given, and the name is either an empty string or contains ASCII characters +that are not allowed in contract names, then `error_code` will be `u2`. + +Note: This function is only available starting with Stacks 2.1.", + example: r#" +(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (ok ST3X6QWWETNBZWGBK6DRGTR1KX50S74D3425Q1TPK) +(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo") ;; Returns (ok ST3X6QWWETNBZWGBK6DRGTR1KX50S74D3425Q1TPK.foo) +(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u0) (value (some SP3X6QWWETNBZWGBK6DRGTR1KX50S74D3433WDGJY)))) +(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo") ;; Returns (err (tuple (error_code u0) (value (some SP3X6QWWETNBZWGBK6DRGTR1KX50S74D3433WDGJY.foo)))) +(principal-construct? 0x 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u1) (value none))) +(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a3) ;; Returns (err (tuple (error_code u1) (value none))) +(principal-construct? 0x20 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u1) (value none))) +(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "") ;; Returns (err (tuple (error_code u2) (value none))) +(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo[") ;; Returns (err (tuple (error_code u2) (value none))) +"#, +}; + const DEFINE_TOKEN_API: DefineAPI = DefineAPI { input_type: "TokenName, ", snippet: "define-fungible-token ${1:token-name} ${2:total-supply}", @@ -2415,35 +2416,35 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { use crate::vm::functions::NativeFunctions::*; let name = function.get_name(); match function { - Add => make_for_simple_native(&ADD_API, &Add, name), - ToUInt => make_for_simple_native(&TO_UINT_API, &ToUInt, name), - ToInt => make_for_simple_native(&TO_INT_API, &ToInt, name), - Subtract => make_for_simple_native(&SUB_API, &Subtract, name), - Multiply => make_for_simple_native(&MUL_API, &Multiply, name), - Divide => make_for_simple_native(&DIV_API, &Divide, name), - BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, &BuffToIntLe, name), - BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, &BuffToUIntLe, name), - BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, &BuffToIntBe, name), - BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, &BuffToUIntBe, name), - IsStandard => make_for_simple_native(&IS_STANDARD_API, &IsStandard, name), - PrincipalDestruct => make_for_simple_native(&principal_destruct_API, &IsStandard, name), - PrincipalConstruct => make_for_simple_native(&PRINCIPAL_CONSTRUCT_API, &IsStandard, name), - StringToInt => make_for_simple_native(&STRING_TO_INT_API, &StringToInt, name), - StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, &StringToUInt, name), - IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, &IntToAscii, name), - IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, &IntToUtf8, name), - CmpGeq => make_for_simple_native(&GEQ_API, &CmpGeq, name), - CmpLeq => make_for_simple_native(&LEQ_API, &CmpLeq, name), - CmpLess => make_for_simple_native(&LESS_API, &CmpLess, name), - CmpGreater => make_for_simple_native(&GREATER_API, &CmpGreater, name), - Modulo => make_for_simple_native(&MOD_API, &Modulo, name), - Power => make_for_simple_native(&POW_API, &Power, name), - Sqrti => make_for_simple_native(&SQRTI_API, &Sqrti, name), - Log2 => make_for_simple_native(&LOG2_API, &Log2, name), - BitwiseXor => make_for_simple_native(&XOR_API, &BitwiseXor, name), - And => make_for_simple_native(&AND_API, &And, name), - Or => make_for_simple_native(&OR_API, &Or, name), - Not => make_for_simple_native(&NOT_API, &Not, name), + Add => make_for_simple_native(&ADD_API, &function, name), + ToUInt => make_for_simple_native(&TO_UINT_API, &function, name), + ToInt => make_for_simple_native(&TO_INT_API, &function, name), + Subtract => make_for_simple_native(&SUB_API, &function, name), + Multiply => make_for_simple_native(&MUL_API, &function, name), + Divide => make_for_simple_native(&DIV_API, &function, name), + BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, &function, name), + BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, &function, name), + BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, &function, name), + BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, &function, name), + IsStandard => make_for_simple_native(&IS_STANDARD_API, &function, name), + PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, &function, name), + PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, &function), + StringToInt => make_for_simple_native(&STRING_TO_INT_API, &function, name), + StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, &function, name), + IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, &function, name), + IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, &function, name), + CmpGeq => make_for_simple_native(&GEQ_API, &function, name), + CmpLeq => make_for_simple_native(&LEQ_API, &function, name), + CmpLess => make_for_simple_native(&LESS_API, &function, name), + CmpGreater => make_for_simple_native(&GREATER_API, &function, name), + Modulo => make_for_simple_native(&MOD_API, &function, name), + Power => make_for_simple_native(&POW_API, &function, name), + Sqrti => make_for_simple_native(&SQRTI_API, &function, name), + Log2 => make_for_simple_native(&LOG2_API, &function, name), + BitwiseXor => make_for_simple_native(&XOR_API, &function, name), + And => make_for_simple_native(&AND_API, &function, name), + Or => make_for_simple_native(&OR_API, &function, name), + Not => make_for_simple_native(&NOT_API, &function, name), Equals => make_for_special(&EQUALS_API, function), If => make_for_special(&IF_API, function), Let => make_for_special(&LET_API, function), @@ -2507,20 +2508,20 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { BurnAsset => make_for_special(&BURN_ASSET, function), GetTokenSupply => make_for_special(&GET_TOKEN_SUPPLY, function), AtBlock => make_for_special(&AT_BLOCK, function), - GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, &GetStxBalance, name), - StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, &StxGetAccount, name), + GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, &function, name), + StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, &function, name), StxTransfer => make_for_special(&STX_TRANSFER, function), StxTransferMemo => make_for_special(&STX_TRANSFER_MEMO, function), - StxBurn => make_for_simple_native(&STX_BURN, &StxBurn, name), + StxBurn => make_for_simple_native(&STX_BURN, &function, name), ToConsensusBuff => make_for_special(&TO_CONSENSUS_BUFF, function), FromConsensusBuff => make_for_special(&FROM_CONSENSUS_BUFF, function), ReplaceAt => make_for_special(&REPLACE_AT, function), - BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, &BitwiseXor2, name), - BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, &BitwiseAnd, name), - BitwiseOr => make_for_simple_native(&BITWISE_OR_API, &BitwiseOr, name), - BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, &BitwiseNot, name), - BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, &BitwiseLShift, name), - BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, &BitwiseRShift, name), + BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, &function, name), + BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, &function, name), + BitwiseOr => make_for_simple_native(&BITWISE_OR_API, &function, name), + BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, &function, name), + BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, &function, name), + BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, &function, name), } } From 9eabcc5b20cba7340801e0785115fa4106969a0c Mon Sep 17 00:00:00 2001 From: Hugo C <911307+hugocaillard@users.noreply.github.com> Date: Fri, 17 Feb 2023 15:54:52 +0100 Subject: [PATCH 012/158] docs: improve principal-construct output type --- clarity/src/vm/docs/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index ca1ed78666..0b78120e19 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1767,7 +1767,7 @@ The `addrs` list contains the same PoX address values passed into the PoX smart }; const PRINCIPAL_CONSTRUCT_API: SpecialAPI = SpecialAPI { - input_type: "(buff 1) (buff 20) [(string-ascii 40)]", + input_type: "(buff 1), (buff 20), [(string-ascii 40)]", output_type: "(response principal { error_code: uint, principal: (option principal) })", snippet: "principal-construct? ${1:version} ${2:pub-key-hash}", signature: "(principal-construct? (buff 1) (buff 20) [(string-ascii 40)])", From 58a376fca1b51b1cbacb32b89c65fdfcc20a270f Mon Sep 17 00:00:00 2001 From: Diwaker Gupta <15990+diwakergupta@users.noreply.github.com> Date: Wed, 22 Feb 2023 14:44:25 -0500 Subject: [PATCH 013/158] chore: delete circle.yml CircleCI hasn't been used in 11+ months. Fixes #3072 --- circle.yml | 76 ------------------------------------------------------ 1 file changed, 76 deletions(-) delete mode 100644 circle.yml diff --git a/circle.yml b/circle.yml deleted file mode 100644 index 131712a04a..0000000000 --- a/circle.yml +++ /dev/null @@ -1,76 +0,0 @@ -version: 2.1 -executors: - docker-publisher: - docker: - - image: circleci/buildpack-deps:stretch -jobs: - test_demo: - working_directory: /test - docker: - - image: rust:1.45-stretch - steps: - - checkout - - run: - command: | - cargo build - - run: - command: | - ./target/debug/stacks-inspect local initialize db && - ./target/debug/stacks-inspect local check sample-contracts/tokens.clar db && - ./target/debug/stacks-inspect local launch S1G2081040G2081040G2081040G208105NK8PE5.tokens sample-contracts/tokens.clar db && - ./target/debug/stacks-inspect local check sample-contracts/names.clar db && - ./target/debug/stacks-inspect local launch S1G2081040G2081040G2081040G208105NK8PE5.names sample-contracts/names.clar db && - ./target/debug/stacks-inspect local execute db S1G2081040G2081040G2081040G208105NK8PE5.tokens mint! SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR u100000 - - run: - command: | - echo "(get-balance 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR)" | ./target/debug/stacks-inspect local eval S1G2081040G2081040G2081040G208105NK8PE5.tokens db - unit_tests_with_cov: - machine: true - working_directory: ~/blockstack - steps: - - checkout - - run: - name: Coverage via tarpaulin - command: | - docker run --security-opt seccomp=unconfined -v "${PWD}:/volume" xd009642/tarpaulin \ - bash -c "cargo tarpaulin -v --workspace -t 300 -o Xml" - no_output_timeout: 200m - - run: - name: Upload to codecov.io - command: | - bash <(curl -s https://codecov.io/bash) - unit_tests: - docker: - - image: rust:1.40-stretch - working_directory: ~/blockstack - steps: - - checkout - - run: - no_output_timeout: 200m - command: | - cargo test -j 1 --workspace --exclude clarity - all_tests: - docker: - - image: rust:1.40-stretch - working_directory: ~/blockstack - steps: - - checkout - - run: - no_output_timeout: 200m - command: | - cargo test --workspace && cargo test -- --ignored --test-threads 1 -workflows: - version: 2 - test: - jobs: - - unit_tests - - test_demo -# disable `all_tests` for now, because the circle builder -# OOMs on compile... -# - all_tests: -# filters: -# branches: -# only: -# - master -# - /.*net.*/ -# - /.*marf.*/ From 5a4681758af36ae05aa7e33e745f51c089c34b76 Mon Sep 17 00:00:00 2001 From: Stjepan Golemac Date: Fri, 24 Feb 2023 09:22:04 +0100 Subject: [PATCH 014/158] fix: Exclude benchmarks from compilation --- Cargo.lock | 1203 ++++++++++------- Cargo.toml | 6 + ...block_limits.rs => block_limits.rs.backup} | 0 benches/{c32_bench.rs => c32_bench.rs.backup} | 0 ...ench.rs => large_contract_bench.rs.backup} | 0 .../{marf_bench.rs => marf_bench.rs.backup} | 0 6 files changed, 716 insertions(+), 493 deletions(-) rename benches/{block_limits.rs => block_limits.rs.backup} (100%) rename benches/{c32_bench.rs => c32_bench.rs.backup} (100%) rename benches/{large_contract_bench.rs => large_contract_bench.rs.backup} (100%) rename benches/{marf_bench.rs => marf_bench.rs.backup} (100%) diff --git a/Cargo.lock b/Cargo.lock index 4aa1ca1c4a..5f4e37fb49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ "gimli", ] @@ -29,7 +29,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] @@ -85,18 +85,27 @@ checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "anyhow" -version = "1.0.53" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0" +checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "assert-json-diff" @@ -121,9 +130,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ "concurrent-queue", "event-listener", @@ -142,31 +151,30 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" +checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" dependencies = [ + "async-lock", "async-task", "concurrent-queue", "fastrand", "futures-lite", - "once_cell", "slab", ] [[package]] name = "async-global-executor" -version = "2.0.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", "async-io", - "async-mutex", + "async-lock", "blocking", "futures-lite", - "num_cpus", "once_cell", ] @@ -188,46 +196,39 @@ dependencies = [ [[package]] name = "async-io" -version = "1.6.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a811e6a479f2439f0c04038796b5cfb3d2ad56c230e0f2d3f7b04d68cfee607b" +checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" dependencies = [ + "async-lock", + "autocfg", "concurrent-queue", "futures-lite", "libc", "log", - "once_cell", "parking", "polling", "slab", "socket2", "waker-fn", - "winapi 0.3.9", + "windows-sys 0.42.0", ] [[package]] name = "async-lock" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-mutex" -version = "1.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" dependencies = [ "event-listener", + "futures-lite", ] [[package]] name = "async-std" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ "async-attributes", "async-channel", @@ -243,7 +244,6 @@ dependencies = [ "kv-log-macro", "log", "memchr", - "num_cpus", "once_cell", "pin-project-lite", "pin-utils", @@ -253,15 +253,15 @@ dependencies = [ [[package]] name = "async-task" -version = "4.1.0" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d306121baf53310a3fd342d88dc0824f6bbeace68347593658525565abee8" +checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "atomic-waker" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" +checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" [[package]] name = "atty" @@ -269,22 +269,22 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi 0.3.9", ] [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", @@ -297,9 +297,9 @@ dependencies = [ [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base64" @@ -309,9 +309,15 @@ checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "bitflags" @@ -337,16 +343,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] @@ -360,16 +366,16 @@ dependencies = [ [[package]] name = "blocking" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046e47d4b2d391b1f6f8b407b1deb8dee56c1852ccd868becf2710f601b5f427" +checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" dependencies = [ "async-channel", + "async-lock", "async-task", "atomic-waker", "fastrand", "futures-lite", - "once_cell", ] [[package]] @@ -396,12 +402,12 @@ dependencies = [ "rstest", "rstest_reuse", "rusqlite", - "secp256k1 0.24.2", + "secp256k1 0.24.3", "serde", "serde_derive", "serde_json", "serde_stacker", - "sha2 0.10.2", + "sha2 0.10.6", "sha3", "siphasher", "slog", @@ -415,18 +421,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", -] - [[package]] name = "buf_redux" version = "0.8.4" @@ -439,9 +433,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byte-tools" @@ -457,30 +451,21 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" - -[[package]] -name = "cache-padded" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "cast" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" -dependencies = [ - "rustc_version 0.4.0", -] +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.72" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cfg-if" @@ -496,14 +481,16 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.45", + "wasm-bindgen", "winapi 0.3.9", ] @@ -513,7 +500,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] @@ -552,20 +539,30 @@ dependencies = [ [[package]] name = "clear_on_drop" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9cc5db465b294c3fa986d5bbb0f3017cd850bff6dd6c52f9ccff8b4d21b7b08" +checksum = "38508a63f4979f0048febc9966fadbd48e5dab31fd0ec6a3f151bbf4a74f7423" dependencies = [ "cc", ] +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + [[package]] name = "concurrent-queue" -version = "1.2.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ - "cache-padded", + "crossbeam-utils", ] [[package]] @@ -581,21 +578,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" dependencies = [ "aes-gcm", - "base64 0.13.0", + "base64 0.13.1", "hkdf", "hmac", "percent-encoding", - "rand 0.8.4", + "rand 0.8.5", "sha2 0.9.9", "time 0.2.27", "version_check", ] +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -608,18 +611,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "crc32fast" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "criterion" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", @@ -643,9 +646,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" dependencies = [ "cast", "itertools", @@ -653,9 +656,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.2" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -663,9 +666,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", @@ -674,34 +677,33 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.7" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c00d6d2ea26e8b151d99093005cb442fb9a37aeaca582a03ec70946f49ab5ed9" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ + "autocfg", "cfg-if 1.0.0", "crossbeam-utils", - "lazy_static", - "memoffset", + "memoffset 0.7.1", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.7" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e5bed1f1c269533fa816a0a5492b3545209a205ca1a54842be180eb63a16a6" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if 1.0.0", - "lazy_static", ] [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "typenum", ] @@ -711,19 +713,18 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "subtle", ] [[package]] name = "csv" -version = "1.1.6" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" dependencies = [ - "bstr", "csv-core", - "itoa 0.4.8", + "itoa", "ryu", "serde", ] @@ -739,9 +740,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.21" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccc0a48a9b826acdf4028595adc9db92caea352f7af011a3034acd172a52a0aa" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", "syn", @@ -770,6 +771,50 @@ dependencies = [ "zeroize", ] +[[package]] +name = "cxx" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "digest" version = "0.8.1" @@ -785,16 +830,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] name = "digest" -version = "0.10.3" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", ] @@ -840,24 +885,24 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encoding_rs" -version = "0.8.30" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "event-listener" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "extend" @@ -891,9 +936,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -906,11 +951,10 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -932,9 +976,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" dependencies = [ "futures-core", "futures-sink", @@ -942,15 +986,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" [[package]] name = "futures-lite" @@ -969,21 +1013,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" dependencies = [ "futures-core", "futures-io", @@ -1006,9 +1050,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -1027,13 +1071,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -1048,15 +1092,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "gloo-timers" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d12a7f4e95cfe710f1d624fb1210b7d961a5fb05c4fd942f4feab06e61f590e" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", @@ -1066,9 +1110,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.11" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f1f717ddc7b2ba36df7e871fd88db79326551d3d6f1fc406fbfd28b582ff8e" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -1100,9 +1144,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashlink" @@ -1115,18 +1159,18 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84c647447a07ca16f5fbd05b633e535cc41a08d2d74ab1e08648df53be9cb89" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "bitflags", "bytes", "headers-core", "http", "httpdate", "mime", - "sha-1", + "sha1 0.10.5", ] [[package]] @@ -1147,6 +1191,15 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + [[package]] name = "hkdf" version = "0.10.0" @@ -1169,20 +1222,20 @@ dependencies = [ [[package]] name = "http" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa", ] [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -1198,7 +1251,7 @@ dependencies = [ "anyhow", "async-channel", "async-std", - "base64 0.13.0", + "base64 0.13.1", "cookie", "futures-lite", "infer", @@ -1213,9 +1266,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.5.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -1225,9 +1278,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.16" +version = "0.14.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" +checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" dependencies = [ "bytes", "futures-channel", @@ -1238,7 +1291,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 0.4.8", + "itoa", "pin-project-lite", "socket2", "tokio", @@ -1249,9 +1302,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", @@ -1260,25 +1313,48 @@ dependencies = [ "tokio-rustls", ] +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi 0.3.9", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "idna" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" -version = "1.8.0" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", - "hashbrown 0.11.2", + "hashbrown 0.12.3", ] [[package]] @@ -1316,45 +1392,42 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.1" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" +checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - -[[package]] -name = "itoa" -version = "1.0.1" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "js-sys" -version = "0.3.56" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +dependencies = [ + "cpufeatures", +] [[package]] name = "kernel32-sys" @@ -1383,15 +1456,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.117" +version = "0.2.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" [[package]] name = "libflate" -version = "1.1.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d57e534717ac3e0b8dc459fe338bdfb4e29d7eea8fd0926ba649ddd3f4765f" +checksum = "97822bf791bd4d5b403713886a5fbe8bf49520fe78e323b0dc480ca1a03e50b0" dependencies = [ "adler32", "crc32fast", @@ -1400,9 +1473,9 @@ dependencies = [ [[package]] name = "libflate_lz77" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a734c0493409afcd49deee13c006a04e3586b9761a03543c6272c9c51f2f5a" +checksum = "a52d3a8bfc85f250440e4424db7d857e241a3aebbbe301f3eb606ab15c39acbf" dependencies = [ "rle-decode-fast", ] @@ -1419,26 +1492,29 @@ dependencies = [ ] [[package]] -name = "log" -version = "0.4.14" +name = "link-cplusplus" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" dependencies = [ - "cfg-if 1.0.0", - "value-bag", + "cc", ] [[package]] -name = "matches" -version = "0.1.9" +name = "log" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if 1.0.0", + "value-bag", +] [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" @@ -1449,6 +1525,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.16" @@ -1457,9 +1542,9 @@ checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] name = "mime_guess" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", @@ -1467,12 +1552,11 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", - "autocfg", ] [[package]] @@ -1488,7 +1572,7 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow 0.2.2", + "miow", "net2", "slab", "winapi 0.2.8", @@ -1496,15 +1580,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.14" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", - "miow 0.3.7", - "ntapi", - "winapi 0.3.9", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.45.0", ] [[package]] @@ -1519,15 +1602,6 @@ dependencies = [ "ws2_32-sys", ] -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "multipart" version = "0.18.0" @@ -1540,7 +1614,7 @@ dependencies = [ "mime", "mime_guess", "quick-error", - "rand 0.8.4", + "rand 0.8.5", "safemem", "tempfile", "twoway", @@ -1548,9 +1622,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.37" +version = "0.2.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "74d0df99cfcd2530b2e694f6e17e7f37b8e26bb23983ac530c0c97408837c631" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1559,31 +1633,22 @@ dependencies = [ [[package]] name = "nix" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" dependencies = [ "bitflags", "cc", "cfg-if 1.0.0", "libc", - "memoffset", -] - -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", + "memoffset 0.6.5", ] [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -1591,37 +1656,46 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +dependencies = [ + "hermit-abi 0.2.6", + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ - "hermit-abi", "libc", ] [[package]] name = "object" -version = "0.27.1" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.9.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "oorandom" @@ -1649,16 +1723,17 @@ checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.1.3" +version = "2.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" dependencies = [ + "thiserror", "ucd-trie", ] @@ -1670,18 +1745,18 @@ checksum = "28b9b4df73455c861d7cbf8be42f01d3b373ed7f02e378d55fa84eafc6f638b1" [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -1690,9 +1765,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -1702,15 +1777,15 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "plotters" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" +checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" dependencies = [ "num-traits", "plotters-backend", @@ -1721,30 +1796,31 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" +checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" [[package]] name = "plotters-svg" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" +checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" dependencies = [ "plotters-backend", ] [[package]] name = "polling" -version = "2.2.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" +checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" dependencies = [ + "autocfg", "cfg-if 1.0.0", "libc", "log", "wepoll-ffi", - "winapi 0.3.9", + "windows-sys 0.42.0", ] [[package]] @@ -1760,9 +1836,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-error" @@ -1790,17 +1866,17 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.19" +version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1819,15 +1895,15 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.27.1" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "psm" -version = "0.1.16" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd136ff4382c4753fc061cb9e4712ab2af263376b95bbd5bd8cd50c020b78e69" +checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" dependencies = [ "cc", ] @@ -1840,9 +1916,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.15" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -1857,19 +1933,18 @@ dependencies = [ "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc 0.2.0", + "rand_hc", ] [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", + "rand_core 0.6.4", ] [[package]] @@ -1889,7 +1964,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -1903,11 +1978,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.8", ] [[package]] @@ -1919,81 +1994,64 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", -] - [[package]] name = "rayon" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" dependencies = [ - "autocfg", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "lazy_static", "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.8", "redox_syscall", + "thiserror", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" - [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "remove_dir_all" @@ -2006,11 +2064,11 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.9" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" +checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" dependencies = [ - "base64 0.13.0", + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -2022,18 +2080,19 @@ dependencies = [ "hyper-rustls", "ipnet", "js-sys", - "lazy_static", "log", "mime", + "once_cell", "percent-encoding", "pin-project-lite", "rustls", - "rustls-pemfile", + "rustls-pemfile 1.0.2", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", + "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -2059,11 +2118,11 @@ dependencies = [ [[package]] name = "ripemd" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1facec54cb5e0dc08553501fa740091086d0259ad0067e0d4103448e4cb22ed3" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest 0.10.3", + "digest 0.10.6", ] [[package]] @@ -2143,14 +2202,14 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.5", + "semver 1.0.16", ] [[package]] name = "rustls" -version = "0.20.2" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -2164,20 +2223,29 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +dependencies = [ + "base64 0.21.0", ] [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" [[package]] name = "safemem" @@ -2196,9 +2264,9 @@ dependencies = [ [[package]] name = "scoped-tls" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" @@ -2206,6 +2274,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" + [[package]] name = "sct" version = "0.7.0" @@ -2218,9 +2292,9 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.21.2" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7883017d5b21f011ef8040ea9c6c7ac90834c0df26a69e4c0b06276151f125" +checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" dependencies = [ "secp256k1-sys 0.4.2", "serde", @@ -2228,9 +2302,9 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9512ffd81e3a3503ed401f79c33168b9148c75038956039166cd750eaa037c3" +checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" dependencies = [ "secp256k1-sys 0.6.1", "serde", @@ -2274,9 +2348,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.5" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0486718e92ec9a68fbed73bb5ef687d71103b142595b406835649bebd33f72c7" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" [[package]] name = "semver-parser" @@ -2295,9 +2369,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.136" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] @@ -2314,9 +2388,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.136" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", @@ -2325,11 +2399,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.78" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" +checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" dependencies = [ - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -2347,9 +2421,9 @@ dependencies = [ [[package]] name = "serde_stacker" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c92391a63e3b83f77334d8beaaf11bac4c900f3769483e543bf76a81bf8ee2" +checksum = "35f73df5c3072392d6a2abb8588d06db7f57b83dc95d5bbb96da71cd8468fcfd" dependencies = [ "serde", "stacker", @@ -2362,22 +2436,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa", "ryu", "serde", ] [[package]] name = "sha-1" -version = "0.9.8" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ - "block-buffer 0.9.0", "cfg-if 1.0.0", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest 0.10.6", ] [[package]] @@ -2389,6 +2461,17 @@ dependencies = [ "sha1_smol", ] +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.6", +] + [[package]] name = "sha1_smol" version = "1.0.0" @@ -2422,13 +2505,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.3", + "digest 0.10.6", "sha2-asm 0.6.2", ] @@ -2452,11 +2535,11 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.3", + "digest 0.10.6", "keccak", ] @@ -2471,15 +2554,18 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a86232ab60fa71287d7f2ddae4a7073f6b7aac33631c3015abb556f08c6d0a3e" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" [[package]] name = "slab" -version = "0.4.5" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +dependencies = [ + "autocfg", +] [[package]] name = "slog" @@ -2489,40 +2575,40 @@ checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" [[package]] name = "slog-json" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f7f7a952ce80fca9da17bf0a53895d11f8aa1ba063668ca53fc72e7869329e9" +checksum = "3e1e53f61af1e3c8b852eef0a9dee29008f55d6dd63794f3f12cef786cf0f219" dependencies = [ - "chrono", "serde", "serde_json", "slog", + "time 0.3.19", ] [[package]] name = "slog-term" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95c1e7e5aab61ced6006149ea772770b84a0d16ce0f7885def313e4829946d76" +checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c" dependencies = [ "atty", - "chrono", "slog", "term", "thread_local", + "time 0.3.19", ] [[package]] name = "smallvec" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi 0.3.9", @@ -2536,9 +2622,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "stacker" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90939d5171a4420b3ff5fbc8954d641e7377335454c259dcb80786f3f21dc9b4" +checksum = "c886bd4480155fd3ef527d45e9ac8dd7118a898a46530b7b94c3e21866259fce" dependencies = [ "cc", "cfg-if 1.0.0", @@ -2563,12 +2649,12 @@ dependencies = [ "rstest", "rstest_reuse", "rusqlite", - "secp256k1 0.21.2", + "secp256k1 0.21.3", "serde", "serde_derive", "serde_json", "serde_stacker", - "sha2 0.10.2", + "sha2 0.10.6", "sha3", "slog", "slog-json", @@ -2661,7 +2747,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "sha1", + "sha1 0.6.1", "syn", ] @@ -2676,7 +2762,7 @@ name = "stx-genesis" version = "0.1.0" dependencies = [ "libflate", - "sha2 0.10.2", + "sha2 0.10.6", ] [[package]] @@ -2687,13 +2773,13 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.86" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" +checksum = "d56e159d99e6c2b93995d171050271edb50ecc5288fbc7cc17de8fdce4e58c14" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] @@ -2721,6 +2807,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "termcolor" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +dependencies = [ + "winapi-util", +] + [[package]] name = "textwrap" version = "0.11.0" @@ -2732,18 +2827,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" dependencies = [ "proc-macro2", "quote", @@ -2752,18 +2847,19 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", @@ -2780,11 +2876,31 @@ dependencies = [ "libc", "standback", "stdweb", - "time-macros", + "time-macros 0.1.1", "version_check", "winapi 0.3.9", ] +[[package]] +name = "time" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53250a3b3fed8ff8fd988587d8925d26a83ac3845d9e03b220b37f34c2b8d6c2" +dependencies = [ + "itoa", + "libc", + "num_threads", + "serde", + "time-core", + "time-macros 0.2.7", +] + +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + [[package]] name = "time-macros" version = "0.1.1" @@ -2795,6 +2911,15 @@ dependencies = [ "time-macros-impl", ] +[[package]] +name = "time-macros" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a460aeb8de6dcb0f381e1ee05f1cd56fcf5a5f6eb8187ff3d8f0b11078d38b7c" +dependencies = [ + "time-core", +] + [[package]] name = "time-macros-impl" version = "0.1.2" @@ -2820,32 +2945,34 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.16.1" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a" +checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" dependencies = [ + "autocfg", "bytes", "libc", "memchr", - "mio 0.7.14", + "mio 0.8.6", "num_cpus", "pin-project-lite", - "winapi 0.3.9", + "socket2", + "windows-sys 0.42.0", ] [[package]] @@ -2861,9 +2988,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite", @@ -2872,51 +2999,50 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.15.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "pin-project", "tokio", "tungstenite", ] [[package]] name = "tokio-util" -version = "0.6.9" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", "futures-sink", - "log", "pin-project-lite", "tokio", + "tracing", ] [[package]] name = "toml" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.30" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d8d93354fe2a8e50d5953f5ae2e47a3fc2ef03292e7ea46e3cc38f549525fb9" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if 1.0.0", "log", @@ -2926,32 +3052,32 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.22" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" -version = "0.14.0" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", "bytes", "http", "httparse", "log", - "rand 0.8.4", + "rand 0.8.5", "sha-1", "thiserror", "url", @@ -2969,15 +3095,15 @@ dependencies = [ [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "unicase" @@ -2990,30 +3116,30 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" + +[[package]] +name = "unicode-ident" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" - -[[package]] -name = "unicode-xid" -version = "0.2.2" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "universal-hash" @@ -3021,7 +3147,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "subtle", ] @@ -3033,13 +3159,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", "serde", ] @@ -3052,9 +3177,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "value-bag" -version = "1.0.0-alpha.8" +version = "1.0.0-alpha.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79923f7731dc61ebfba3633098bf3ac533bbd35ccd8c57e7088d9a5eebe0263f" +checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" dependencies = [ "ctor", "version_check", @@ -3101,9 +3226,9 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e" +checksum = "ed7b8be92646fc3d18b06147664ebc5f48d222686cb11a8755e561a735aacc6d" dependencies = [ "bytes", "futures-channel", @@ -3117,6 +3242,7 @@ dependencies = [ "multipart", "percent-encoding", "pin-project", + "rustls-pemfile 0.2.1", "scoped-tls", "serde", "serde_json", @@ -3141,11 +3267,17 @@ version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -3153,13 +3285,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -3168,9 +3300,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.29" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3180,9 +3312,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3190,9 +3322,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", @@ -3203,15 +3335,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "web-sys" -version = "0.3.56" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", @@ -3229,9 +3361,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ "webpki", ] @@ -3288,11 +3420,92 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" + [[package]] name = "winreg" -version = "0.7.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ "winapi 0.3.9", ] @@ -3309,6 +3522,10 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.2" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" +<<<<<<< Updated upstream checksum = "7c88870063c39ee00ec285a2f8d6a966e5b6fb2becc4e8dac77ed0d370ed6006" +======= +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +>>>>>>> Stashed changes diff --git a/Cargo.toml b/Cargo.toml index 35e52d1e1f..f1a16eb783 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,6 +33,7 @@ path = "src/clarity_cli_main.rs" name = "blockstack-cli" path = "src/blockstack_cli.rs" +<<<<<<< Updated upstream [[bench]] name = "marf_bench" harness = false @@ -48,6 +49,11 @@ harness = false [[bench]] name = "c32_bench" harness = false +======= +[[bin]] +name = "relay-server" +path = "contrib/tools/relay-server/src/main.rs" +>>>>>>> Stashed changes [dependencies] rand = "0.7.3" diff --git a/benches/block_limits.rs b/benches/block_limits.rs.backup similarity index 100% rename from benches/block_limits.rs rename to benches/block_limits.rs.backup diff --git a/benches/c32_bench.rs b/benches/c32_bench.rs.backup similarity index 100% rename from benches/c32_bench.rs rename to benches/c32_bench.rs.backup diff --git a/benches/large_contract_bench.rs b/benches/large_contract_bench.rs.backup similarity index 100% rename from benches/large_contract_bench.rs rename to benches/large_contract_bench.rs.backup diff --git a/benches/marf_bench.rs b/benches/marf_bench.rs.backup similarity index 100% rename from benches/marf_bench.rs rename to benches/marf_bench.rs.backup From 9800e7e8a66463d059577966864dd019aefde787 Mon Sep 17 00:00:00 2001 From: Stjepan Golemac Date: Fri, 24 Feb 2023 09:28:39 +0100 Subject: [PATCH 015/158] fix: Resolve conflicts and the remaining two errors --- Cargo.lock | 4 ---- Cargo.toml | 22 ---------------------- testnet/stacks-node/src/tests/epoch_21.rs | 4 +--- 3 files changed, 1 insertion(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5f4e37fb49..b20f432762 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3524,8 +3524,4 @@ dependencies = [ name = "zeroize" version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -<<<<<<< Updated upstream -checksum = "7c88870063c39ee00ec285a2f8d6a966e5b6fb2becc4e8dac77ed0d370ed6006" -======= checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" ->>>>>>> Stashed changes diff --git a/Cargo.toml b/Cargo.toml index f1a16eb783..4a68b9056e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,28 +33,6 @@ path = "src/clarity_cli_main.rs" name = "blockstack-cli" path = "src/blockstack_cli.rs" -<<<<<<< Updated upstream -[[bench]] -name = "marf_bench" -harness = false - -[[bench]] -name = "large_contract_bench" -harness = false - -[[bench]] -name = "block_limits" -harness = false - -[[bench]] -name = "c32_bench" -harness = false -======= -[[bin]] -name = "relay-server" -path = "contrib/tools/relay-server/src/main.rs" ->>>>>>> Stashed changes - [dependencies] rand = "0.7.3" rand_chacha = "=0.2.2" diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 3b0675bcb0..d6e4732955 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -4711,7 +4711,7 @@ fn trait_invocation_cross_epoch() { test_observer::spawn(); - let (mut conf, miner_account) = neon_integration_test_conf(); + let (mut conf, _) = neon_integration_test_conf(); let mut initial_balances = vec![InitialBalance { address: spender_addr.clone(), amount: 200_000_000, @@ -4728,8 +4728,6 @@ fn trait_invocation_cross_epoch() { epochs[3].start_height = epoch_2_1; conf.burnchain.epochs = Some(epochs); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let reward_cycle_len = 2000; From 24d064f05526baa5e1ab253deac5e9ebe2066a4c Mon Sep 17 00:00:00 2001 From: Stjepan Golemac Date: Mon, 27 Feb 2023 13:55:14 +0100 Subject: [PATCH 016/158] clean: remove benchmark files --- benches/block_limits.rs.backup | 678 ------------------------- benches/c32_bench.rs.backup | 35 -- benches/large_contract_bench.rs.backup | 158 ------ benches/marf_bench.rs.backup | 136 ----- 4 files changed, 1007 deletions(-) delete mode 100644 benches/block_limits.rs.backup delete mode 100644 benches/c32_bench.rs.backup delete mode 100644 benches/large_contract_bench.rs.backup delete mode 100644 benches/marf_bench.rs.backup diff --git a/benches/block_limits.rs.backup b/benches/block_limits.rs.backup deleted file mode 100644 index 6ab56baf52..0000000000 --- a/benches/block_limits.rs.backup +++ /dev/null @@ -1,678 +0,0 @@ -extern crate blockstack_lib; -extern crate rand; -extern crate serde_json; - -use std::fs; -use std::path::PathBuf; -use std::process; -use std::{env, time::Instant}; - -use blockstack_lib::clarity_vm::clarity::ClarityInstance; -use blockstack_lib::clarity_vm::database::MemoryBackingStore; -use blockstack_lib::core::StacksEpochId; -use blockstack_lib::types::chainstate::BlockHeaderHash; -use blockstack_lib::types::chainstate::BurnchainHeaderHash; -use blockstack_lib::types::chainstate::VRFSeed; -use blockstack_lib::types::proof::ClarityMarfTrieId; -use blockstack_lib::vm::ast::build_ast; -use blockstack_lib::vm::contexts::GlobalContext; -use blockstack_lib::vm::costs::LimitedCostTracker; -use blockstack_lib::vm::errors::InterpreterResult; -use blockstack_lib::vm::{eval_all, ContractContext}; -use rand::Rng; - -use blockstack_lib::clarity_vm::database::marf::MarfedKV; -use blockstack_lib::types::chainstate::{StacksAddress, StacksBlockId}; -use blockstack_lib::util::boot::boot_code_id; -use blockstack_lib::{ - vm::costs::ExecutionCost, - vm::{ - database::{HeadersDB, NULL_BURN_STATE_DB}, - types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}, - Value, - }, -}; - -struct TestHeadersDB; - -impl HeadersDB for TestHeadersDB { - fn get_stacks_block_header_hash_for_block( - &self, - id_bhh: &StacksBlockId, - ) -> Option { - Some(BlockHeaderHash(id_bhh.0.clone())) - } - - fn get_burn_header_hash_for_block( - &self, - id_bhh: &StacksBlockId, - ) -> Option { - Some(BurnchainHeaderHash(id_bhh.0.clone())) - } - - fn get_vrf_seed_for_block(&self, _id_bhh: &StacksBlockId) -> Option { - Some(VRFSeed([0; 32])) - } - - fn get_burn_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { - Some(1) - } - - fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { - if id_bhh == &StacksBlockId::sentinel() { - Some(0) - } else { - let mut bytes = [0; 4]; - bytes.copy_from_slice(&id_bhh.0[0..4]); - let height = u32::from_le_bytes(bytes); - Some(height) - } - } - - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { - None - } -} - -fn as_hash160(inp: u32) -> [u8; 20] { - let mut out = [0; 20]; - out[0..4].copy_from_slice(&inp.to_le_bytes()); - out -} - -fn as_hash(inp: u32) -> [u8; 32] { - let mut out = [0; 32]; - out[0..4].copy_from_slice(&inp.to_le_bytes()); - out -} - -fn transfer_test(buildup_count: u32, scaling: u32, genesis_size: u32) -> ExecutionCost { - let start = Instant::now(); - - let marf = setup_chain_state(genesis_size); - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let principals: Vec = (0..(buildup_count - 1)) - .into_iter() - .map(|i| StandardPrincipalData(0, as_hash160(i)).into()) - .collect(); - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&principals[ix - 1]); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - Ok(()) - }) - .unwrap() - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // transfer phase - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - let mut rng = rand::thread_rng(); - for _i in 0..scaling { - let from = rng.gen_range(0, principals.len()); - let to = (from + rng.gen_range(1, principals.len())) % principals.len(); - - conn.as_transaction(|tx| { - tx.run_stx_transfer(&principals[from], &principals[to], 10, &BuffData::empty()) - .unwrap() - }); - } - - let this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "{} transfers in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - this_cost -} - -fn setup_chain_state(scaling: u32) -> MarfedKV { - let pre_initialized_path = format!("/tmp/block_limit_bench_{}.marf", scaling); - let out_path = "/tmp/block_limit_bench_last.marf"; - - if fs::metadata(&pre_initialized_path).is_err() { - let marf = MarfedKV::open(&pre_initialized_path, None).unwrap(); - let mut clarity_instance = ClarityInstance::new(false, marf); - let mut conn = clarity_instance.begin_test_genesis_block( - &StacksBlockId::sentinel(), - &StacksBlockId(as_hash(0)), - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - conn.as_transaction(|tx| { - for j in 0..scaling { - tx.with_clarity_db(|db| { - let addr = StandardPrincipalData(0, as_hash160(j + 1)).into(); - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&addr); - stx_account_0.credit(1); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1).unwrap(); - Ok(()) - }) - .unwrap(); - } - }); - - conn.commit_to_block(&StacksBlockId(as_hash(0))); - }; - - if fs::metadata(&out_path).is_err() { - let path = PathBuf::from(out_path); - fs::create_dir_all(&path).expect("Error creating directory"); - } - - fs::copy( - &format!("{}/marf.sqlite", pre_initialized_path), - &format!("{}/marf.sqlite", out_path), - ) - .unwrap(); - return MarfedKV::open(out_path, None).unwrap(); -} - -fn test_via_raw_contract( - eval: &str, - scaling: u32, - buildup_count: u32, - genesis_size: u32, -) -> ExecutionCost { - let start = Instant::now(); - - let marf = setup_chain_state(genesis_size); - - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let stacker: PrincipalData = StandardPrincipalData(0, as_hash160(0)).into(); - - let contract_id = - QualifiedContractIdentifier::new(StandardPrincipalData(0, as_hash160(0)), "test".into()); - - let mut smart_contract = "".to_string(); - for _i in 0..scaling { - smart_contract.push_str(&format!("{}\n", eval)); - } - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&stacker); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1_000_000).unwrap(); - Ok(()) - }) - .unwrap(); - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // execute the block - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - let exec_cost = conn.as_transaction(|tx| { - let analysis_cost = tx.cost_so_far(); - let (contract_ast, contract_analysis) = tx - .analyze_smart_contract(&contract_id, &smart_contract) - .unwrap(); - tx.initialize_smart_contract( - &contract_id, - &contract_ast, - &smart_contract, - None, - |_, _| false, - ) - .unwrap(); - - let mut initialize_cost = tx.cost_so_far(); - initialize_cost.sub(&analysis_cost).unwrap(); - - tx.save_analysis(&contract_id, &contract_analysis) - .expect("FATAL: failed to store contract analysis"); - - initialize_cost - }); - - let _this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "Completed raw execution scaled at {} in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - exec_cost -} - -fn smart_contract_test(scaling: u32, buildup_count: u32, genesis_size: u32) -> ExecutionCost { - let start = Instant::now(); - - let marf = setup_chain_state(genesis_size); - - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let stacker: PrincipalData = StandardPrincipalData(0, as_hash160(0)).into(); - - let contract_id = - QualifiedContractIdentifier::new(StandardPrincipalData(0, as_hash160(0)), "test".into()); - - let mut smart_contract = "".to_string(); - for i in 0..scaling { - smart_contract.push_str(&format!("(define-public (foo-{}) (ok (+ u2 u3)))\n", i)); - } - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&stacker); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1_000_000).unwrap(); - Ok(()) - }) - .unwrap(); - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // execute the block - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - conn.as_transaction(|tx| { - let (contract_ast, contract_analysis) = tx - .analyze_smart_contract(&contract_id, &smart_contract) - .unwrap(); - tx.initialize_smart_contract( - &contract_id, - &contract_ast, - &smart_contract, - None, - |_, _| false, - ) - .unwrap(); - - tx.save_analysis(&contract_id, &contract_analysis) - .expect("FATAL: failed to store contract analysis"); - }); - - let this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "Completed smart-contract scaled at {} in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - this_cost -} - -fn expensive_contract_test(scaling: u32, buildup_count: u32, genesis_size: u32) -> ExecutionCost { - let start = Instant::now(); - - let marf = setup_chain_state(genesis_size); - - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let stacker: PrincipalData = StandardPrincipalData(0, as_hash160(0)).into(); - - let contract_id = - QualifiedContractIdentifier::new(StandardPrincipalData(0, as_hash160(0)), "test".into()); - - let smart_contract = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..scaling) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - contract_id.clone(), - contract_id.clone() - )) - .collect::>() - .join(" ") - ); - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&stacker); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1_000_000).unwrap(); - Ok(()) - }) - .unwrap(); - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // execute the block - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - conn.as_transaction(|tx| { - let (contract_ast, contract_analysis) = tx - .analyze_smart_contract(&contract_id, &smart_contract) - .unwrap(); - tx.initialize_smart_contract(&contract_id, &contract_ast, &smart_contract, |_, _| false) - .unwrap(); - - tx.save_analysis(&contract_id, &contract_analysis) - .expect("FATAL: failed to store contract analysis"); - }); - - let this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "Completed smart-contract scaled at {} in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - this_cost -} - -pub fn execute_in_epoch(program: &str, epoch: StacksEpochId) -> InterpreterResult> { - let contract_id = QualifiedContractIdentifier::transient(); - let mut contract_context = ContractContext::new(contract_id.clone()); - let mut marf = MemoryBackingStore::new(); - let conn = marf.as_clarity_db(); - let mut global_context = GlobalContext::new(false, conn, LimitedCostTracker::new_free(), epoch); - global_context.execute(|g| { - let parsed = build_ast(&contract_id, program, &mut ())?.expressions; - eval_all(&parsed, &mut contract_context, g) - }) -} - -fn execute(program: &str) -> InterpreterResult> { - let epoch_200_result = execute_in_epoch(program, StacksEpochId::Epoch20); - let epoch_205_result = execute_in_epoch(program, StacksEpochId::Epoch2_05); - assert_eq!( - epoch_200_result, epoch_205_result, - "Epoch 2.0 and 2.05 should have same execution result, but did not for program `{}`", - program - ); - epoch_205_result -} - -fn stack_stx_test(buildup_count: u32, genesis_size: u32, scaling: u32) -> ExecutionCost { - let start = Instant::now(); - let marf = setup_chain_state(genesis_size); - - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let stackers: Vec = (0..scaling) - .into_iter() - .map(|i| StandardPrincipalData(0, as_hash160(i)).into()) - .collect(); - - let stacker_balance = (buildup_count as u128 - 1) * 1_000_000; - - let pox_addrs: Vec = (0..50u64) - .map(|ix| { - execute(&format!( - "{{ version: 0x00, hashbytes: 0x000000000000000000000000{} }}", - &blockstack_lib::util::hash::to_hex(&ix.to_le_bytes()) - )) - .unwrap() - .unwrap() - }) - .collect(); - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - for stacker in stackers.iter() { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(stacker); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1_000_000).unwrap(); - } - Ok(()) - }) - .unwrap(); - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // do the stack-stx block - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - conn.as_transaction(|tx| { - for stacker in stackers.iter() { - let result = tx - .run_contract_call( - stacker, - None, - &boot_code_id("pox", false), - "stack-stx", - &[ - Value::UInt(stacker_balance), - pox_addrs[0].clone(), - Value::UInt(buildup_count as u128 + 2), - Value::UInt(12), - ], - |_, _| false, - ) - .unwrap() - .0; - if let Err(v) = result.expect_result() { - panic!("Stacking failed: {}", v); - } - } - }); - - let this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "Completed {} stack-stx ops in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - this_cost -} - -fn main() { - let argv: Vec<_> = env::args().collect(); - - if argv.len() < 3 { - eprintln!( - "Usage: {} [test-name] [scalar-0] ... [scalar-n] - -transfer -smart-contract -stack-stx -clarity-transfer -clarity-verify -clarity-raw -", - argv[0] - ); - process::exit(1); - } - - let block_build_up = argv[2].parse().expect("Invalid scalar"); - let genesis_size = argv[3].parse().expect("Invalid scalar"); - let scaling = argv[4].parse().expect("Invalid scalar"); - - let result = match argv[1].as_str() { - "transfer" => transfer_test(block_build_up, scaling, genesis_size), - "smart-contract" => smart_contract_test(scaling, block_build_up, genesis_size), - "clarity-transfer" => test_via_raw_contract(r#"(stx-transfer? u1 tx-sender 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR)"#, - scaling, block_build_up, genesis_size), - "expensive-contract" => expensive_contract_test(scaling, block_build_up, genesis_size), - "clarity-verify" => test_via_raw_contract("(secp256k1-verify 0xde5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f04 - 0x8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a1301 - 0x03adb8de4bfb65db2cfd6120d55c6526ae9c52e675db7e47308636534ba7786110)", - scaling, block_build_up, genesis_size), - "stack-stx" => stack_stx_test(block_build_up, genesis_size, scaling), - _ => { - eprintln!("bad test name"); - process::exit(1); - } - }; - - println!("{}", serde_json::to_string(&result).unwrap()); -} diff --git a/benches/c32_bench.rs.backup b/benches/c32_bench.rs.backup deleted file mode 100644 index 3fc4a93381..0000000000 --- a/benches/c32_bench.rs.backup +++ /dev/null @@ -1,35 +0,0 @@ -extern crate blockstack_lib; -extern crate criterion; -extern crate rand; - -use blockstack_lib::address::c32::{c32_address, c32_address_decode}; -use blockstack_lib::address::c32_old::c32_address_decode as c32_address_decode_old; -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; -use rand::Rng; - -fn bench_c32_decoding(c: &mut Criterion) { - let mut group = c.benchmark_group("C32 Decoding"); - - let mut addrs: Vec = vec![]; - for _ in 0..5 { - // random version - let random_version: u8 = rand::thread_rng().gen_range(0, 31); - // random 20 bytes - let random_bytes = rand::thread_rng().gen::<[u8; 20]>(); - let addr = c32_address(random_version, &random_bytes).unwrap(); - addrs.push(addr); - } - - for addr in addrs.iter() { - group.bench_with_input(BenchmarkId::new("Legacy", addr), addr, |b, i| { - b.iter(|| c32_address_decode_old(i)) - }); - group.bench_with_input(BenchmarkId::new("Updated", addr), addr, |b, i| { - b.iter(|| c32_address_decode(i)) - }); - } - group.finish(); -} - -criterion_group!(benches, bench_c32_decoding); -criterion_main!(benches); diff --git a/benches/large_contract_bench.rs.backup b/benches/large_contract_bench.rs.backup deleted file mode 100644 index 0e57793f2e..0000000000 --- a/benches/large_contract_bench.rs.backup +++ /dev/null @@ -1,158 +0,0 @@ -#[macro_use] -extern crate criterion; -extern crate blockstack_lib; -extern crate rand; - -use blockstack_lib::clarity_vm::clarity::ClarityInstance; -use blockstack_lib::clarity_vm::database::marf::MarfedKV; -use blockstack_lib::types::chainstate::StacksBlockId; -use blockstack_lib::types::proof::ClarityMarfTrieId; -use blockstack_lib::vm::database::NULL_BURN_STATE_DB; -use blockstack_lib::{vm::database::NULL_HEADER_DB, vm::types::QualifiedContractIdentifier}; -use criterion::Criterion; - -pub fn rollback_log_memory_test() { - let marf = MarfedKV::temporary(); - let mut clarity_instance = ClarityInstance::new(false, marf); - let EXPLODE_N = 100; - - let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); - - { - let mut conn = clarity_instance.begin_block( - &StacksBlockId::sentinel(), - &StacksBlockId::from_bytes(&[0 as u8; 32]).unwrap(), - &NULL_HEADER_DB, - &NULL_BURN_STATE_DB, - ); - - let define_data_var = "(define-data-var XZ (buff 1048576) \"a\")"; - - let mut contract = define_data_var.to_string(); - for i in 0..20 { - let cur_size = format!("{}", 2u32.pow(i)); - contract.push_str("\n"); - contract.push_str(&format!( - "(var-set XZ (concat (unwrap-panic (as-max-len? (var-get XZ) u{})) - (unwrap-panic (as-max-len? (var-get XZ) u{}))))", - cur_size, cur_size - )); - } - for i in 0..EXPLODE_N { - let exploder = format!("(define-data-var var-{} (buff 1048576) (var-get XZ))", i); - contract.push_str("\n"); - contract.push_str(&exploder); - } - - conn.as_transaction(|conn| { - let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract(&contract_identifier, &contract) - .unwrap(); - - assert!(format!( - "{:?}", - conn.initialize_smart_contract(&contract_identifier, &ct_ast, &contract, |_, _| { - false - }) - .unwrap_err() - ) - .contains("MemoryBalanceExceeded")); - }); - } -} - -pub fn ccall_memory_test() { - let marf = MarfedKV::temporary(); - let mut clarity_instance = ClarityInstance::new(false, marf); - let COUNT_PER_CONTRACT = 20; - let CONTRACTS = 5; - - { - let mut conn = clarity_instance.begin_block( - &StacksBlockId::sentinel(), - &StacksBlockId::from_bytes(&[0 as u8; 32]).unwrap(), - &NULL_HEADER_DB, - &NULL_BURN_STATE_DB, - ); - - let define_data_var = "(define-constant buff-0 \"a\")\n"; - - let mut contract = define_data_var.to_string(); - for i in 0..20 { - contract.push_str(&format!( - "(define-constant buff-{} (concat buff-{} buff-{}))\n", - i + 1, - i, - i - )); - } - - for i in 0..COUNT_PER_CONTRACT { - contract.push_str(&format!("(define-constant var-{} buff-20)\n", i)); - } - - contract.push_str("(define-public (call)\n"); - - let mut contracts = vec![]; - - for i in 0..CONTRACTS { - let mut my_contract = contract.clone(); - if i == 0 { - my_contract.push_str("(ok 1))\n"); - } else { - my_contract.push_str(&format!("(contract-call? .contract-{} call))\n", i - 1)); - } - my_contract.push_str("(call)\n"); - contracts.push(my_contract); - } - - for (i, contract) in contracts.into_iter().enumerate() { - let contract_name = format!("contract-{}", i); - let contract_identifier = QualifiedContractIdentifier::local(&contract_name).unwrap(); - - if i < (CONTRACTS - 1) { - conn.as_transaction(|conn| { - let (ct_ast, ct_analysis) = conn - .analyze_smart_contract(&contract_identifier, &contract) - .unwrap(); - conn.initialize_smart_contract( - &contract_identifier, - &ct_ast, - &contract, - |_, _| false, - ) - .unwrap(); - conn.save_analysis(&contract_identifier, &ct_analysis) - .unwrap(); - }) - } else { - conn.as_transaction(|conn| { - let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract(&contract_identifier, &contract) - .unwrap(); - assert!(format!( - "{:?}", - conn.initialize_smart_contract( - &contract_identifier, - &ct_ast, - &contract, - |_, _| false - ) - .unwrap_err() - ) - .contains("MemoryBalanceExceeded")); - }) - } - } - } -} - -pub fn basic_usage_benchmark(c: &mut Criterion) { - c.bench_function("rollback_log_memory_test", |b| { - b.iter(|| rollback_log_memory_test()) - }); - c.bench_function("ccall_memory_test", |b| b.iter(|| ccall_memory_test())); -} - -criterion_group!(benches, basic_usage_benchmark); -criterion_main!(benches); diff --git a/benches/marf_bench.rs.backup b/benches/marf_bench.rs.backup deleted file mode 100644 index 169796ac33..0000000000 --- a/benches/marf_bench.rs.backup +++ /dev/null @@ -1,136 +0,0 @@ -#[macro_use] -extern crate criterion; -extern crate blockstack_lib; -extern crate rand; - -use blockstack_lib::chainstate::stacks::Error; -use blockstack_lib::types::proof::ClarityMarfTrieId; -use criterion::Criterion; -use rand::prelude::*; -use std::fs; - -use blockstack_lib::chainstate::stacks::index::{marf::MARF, storage::TrieFileStorage}; -use blockstack_lib::types::chainstate::{MARFValue, StacksBlockId}; - -pub fn begin( - marf: &mut MARF, - chain_tip: &StacksBlockId, - next_chain_tip: &StacksBlockId, -) -> Result<(), Error> { - let mut tx = marf.begin_tx()?; - tx.begin(chain_tip, next_chain_tip)?; - Ok(()) -} - -fn benchmark_marf_usage( - filename: &str, - blocks: u32, - writes_per_block: u32, - reads_per_block: u32, - batch: bool, -) { - if fs::metadata(filename).is_ok() { - fs::remove_file(filename).unwrap(); - }; - let f = TrieFileStorage::open(filename).unwrap(); - let mut block_header = StacksBlockId::from_bytes(&[0u8; 32]).unwrap(); - let mut marf = MARF::from_storage(f); - - begin(&mut marf, &StacksBlockId::sentinel(), &block_header).unwrap(); - - let mut rng = rand::thread_rng(); - - let mut values = vec![]; - - for i in 0..blocks { - if batch { - let mut batch_keys = Vec::new(); - let mut batch_vals = Vec::new(); - for k in 0..writes_per_block { - let key = format!("{}::{}", i, k); - let mut value = [0u8; 40]; - rng.fill_bytes(&mut value); - batch_keys.push(key.clone()); - batch_vals.push(MARFValue(value.clone())); - values.push((key, MARFValue(value))); - } - marf.insert_batch(&batch_keys, batch_vals).unwrap(); - } else { - for k in 0..writes_per_block { - let key = format!("{}::{}", i, k); - let mut value = [0u8; 40]; - rng.fill_bytes(&mut value); - marf.insert(&key, MARFValue(value.clone())).unwrap(); - values.push((key, MARFValue(value))); - } - } - - for _k in 0..reads_per_block { - let (key, value) = values.as_slice().choose(&mut rng).unwrap(); - assert_eq!( - marf.get_with_proof(&block_header, key).unwrap().unwrap().0, - *value - ); - } - - let mut next_block_header = (i + 1).to_le_bytes().to_vec(); - next_block_header.resize(32, 0); - let next_block_header = StacksBlockId::from_bytes(next_block_header.as_slice()).unwrap(); - - marf.commit().unwrap(); - begin(&mut marf, &block_header, &next_block_header).unwrap(); - block_header = next_block_header; - } - marf.commit().unwrap(); -} - -fn benchmark_marf_read(filename: &str, reads: u32, block: u32, writes_per_block: u32) { - let f = TrieFileStorage::open(filename).unwrap(); - let mut block_header = block.to_le_bytes().to_vec(); - block_header.resize(32, 0); - let block_header = StacksBlockId::from_bytes(block_header.as_slice()).unwrap(); - - let mut marf = MARF::from_storage(f); - - let mut rng = rand::thread_rng(); - - for _i in 0..reads { - let i: u32 = rng.gen_range(0, block); - let k: u32 = rng.gen_range(0, writes_per_block); - let key = format!("{}::{}", i, k); - marf.get_with_proof(&block_header, &key).unwrap().unwrap().0; - } -} - -pub fn basic_usage_benchmark(c: &mut Criterion) { - c.bench_function("marf_setup_1000b_5kW", |b| { - b.iter(|| benchmark_marf_usage("/tmp/db.1k.sqlite", 1000, 5000, 0, false)) - }); - c.bench_function("marf_setup_400b_5kW", |b| { - b.iter(|| benchmark_marf_usage("/tmp/db.400.sqlite", 1000, 5000, 0, false)) - }); - c.bench_function("marf_read_1000b_1kW", |b| { - b.iter(|| benchmark_marf_read("/tmp/db.1k.sqlite", 1000, 1000, 5000)) - }); - c.bench_function("marf_read_400b_1kW", |b| { - b.iter(|| benchmark_marf_read("/tmp/db.400.sqlite", 1000, 400, 5000)) - }); - - c.bench_function("marf_usage_1b_10kW_0kR", |b| { - b.iter(|| benchmark_marf_usage("/tmp/foo.bar.z.sqlite", 1, 10000, 0, false)) - }); - c.bench_function("marf_usage_10b_1kW_2kR", |b| { - b.iter(|| benchmark_marf_usage("/tmp/foo.bar.z.sqlite", 10, 1000, 2000, false)) - }); - c.bench_function("marf_usage_100b_5kW_20kR", |b| { - b.iter(|| benchmark_marf_usage("/tmp/foo.bar.z.sqlite", 20, 5000, 20000, false)) - }); - c.bench_function("marf_usage_batches_10b_1kW_2kR", |b| { - b.iter(|| benchmark_marf_usage("/tmp/foo.bar.z.sqlite", 10, 1000, 2000, true)) - }); -} - -pub fn scaling_read_ratio(_c: &mut Criterion) {} - -criterion_group!(benches, basic_usage_benchmark); -criterion_main!(benches); From bb43fc1fc719dc4dc7b9d28581341b06b50f826d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 27 Feb 2023 17:29:02 -0500 Subject: [PATCH 017/158] fix: don't assume that the bitcoin node always gives a non-zero number of headers --- src/burnchains/bitcoin/spv.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/burnchains/bitcoin/spv.rs b/src/burnchains/bitcoin/spv.rs index 4518dd3930..713a148788 100644 --- a/src/burnchains/bitcoin/spv.rs +++ b/src/burnchains/bitcoin/spv.rs @@ -831,6 +831,11 @@ impl SpvClient { assert!(self.readwrite, "SPV header DB is open read-only"); let num_headers = block_headers.len(); + if num_headers == 0 { + // nothing to do + return Ok(()); + } + let first_header_hash = block_headers[0].header.bitcoin_hash(); let last_header_hash = block_headers[block_headers.len() - 1].header.bitcoin_hash(); let total_work_before = self.update_chain_work()?; From ebce622b843442c5406c05f1198e2ab71280d1b5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 1 Mar 2023 12:22:34 -0500 Subject: [PATCH 018/158] fix: add unit test and change log entry --- CHANGELOG.md | 6 ++++++ src/burnchains/bitcoin/spv.rs | 20 ++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2864df84a..3225ac33ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Fixed + +- Handle the case where a bitcoin node returns zero headers (#3588) + ## [2.1] This is a **consensus-breaking** release that introduces a _lot_ of new diff --git a/src/burnchains/bitcoin/spv.rs b/src/burnchains/bitcoin/spv.rs index 713a148788..28746fcdaf 100644 --- a/src/burnchains/bitcoin/spv.rs +++ b/src/burnchains/bitcoin/spv.rs @@ -1832,4 +1832,24 @@ mod test { let deserialized: Vec> = deserialize(&encoded_tx).unwrap(); } + + #[test] + fn test_handle_headers_empty() { + let headers_path = "/tmp/test-spv-handle_headers_empty.dat"; + if fs::metadata(headers_path).is_ok() { + fs::remove_file(headers_path).unwrap(); + } + + let mut spv_client = SpvClient::new( + headers_path, + 0, + None, + BitcoinNetworkType::Regtest, + true, + false, + ) + .unwrap(); + + spv_client.handle_headers(1, vec![]).unwrap(); + } } From 804303892ca12542e7123fb3139589b97c8f569b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 1 Mar 2023 12:58:34 -0500 Subject: [PATCH 019/158] fix: use explicit version number --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3225ac33ab..7c53ca6371 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [Unreleased] +## [2.1.0.0.1] ### Fixed From 58cc729d75e11aa1ee390afa17dcdf10995f556a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 3 Mar 2023 21:13:04 -0500 Subject: [PATCH 020/158] fix: always_use_affirmation_maps defaults to false --- testnet/stacks-node/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index fbd35c81ed..a3dbfcd91f 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1729,7 +1729,7 @@ impl NodeConfig { marf_defer_hashing: true, pox_sync_sample_secs: 30, use_test_genesis_chainstate: None, - always_use_affirmation_maps: true, + always_use_affirmation_maps: false, require_affirmed_anchor_blocks: true, fault_injection_hide_blocks: false, } From 9f7a650e1823f0f0faa0ca8cd29cc95244c1b495 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 3 Mar 2023 21:17:11 -0500 Subject: [PATCH 021/158] fix: update changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c53ca6371..cafbdd3669 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,9 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Fixed - Handle the case where a bitcoin node returns zero headers (#3588) +- The default value for `always_use_affirmation_maps` is now set to `false`, + instead of `true`. This was preventing testnet nodes from reaching the chain +tip with the default configuration. ## [2.1] From 2261900c1e4c1be232af5e2df94398e47d91c734 Mon Sep 17 00:00:00 2001 From: Igor Sylvester <1248500+igorsyl@users.noreply.github.com> Date: Tue, 7 Mar 2023 18:02:42 -0600 Subject: [PATCH 022/158] Import wiki https://github.com/stacks-network/stacks-blockchain/wiki/How-to-Create-and-Review-PRs --- DEVINFO.md | 174 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 DEVINFO.md diff --git a/DEVINFO.md b/DEVINFO.md new file mode 100644 index 0000000000..3ba00be410 --- /dev/null +++ b/DEVINFO.md @@ -0,0 +1,174 @@ +# How to Create and Review PRs + +## Overview + +Blockchain software development requires a much higher degree of rigor than most other kinds of software. This is because with blockchains, **there is no roll-back** from a bad deployment. There is essentially zero room for consensus bugs. If you ship a consensus bug, that bug could not only have catastrophic consequences for users (i.e. they lose all their money), but also be intractable to fix, mitigate, or remove. This is because unlike nearly every other kind of networked software, **the state of the blockchain is what the users' computers say it is.** If you want to make changes, you _must_ get _user_ buy-in, and this is necessarily time-consuming and not at all guaranteed to succeed. + +Therefore, making changes to the codebase is necessarily a review-intensive process. No one wants bugs, but **no one can afford consensus bugs**. This page describes how to make and review _non-consensus_ changes. The process for consensus changes includes not only the entirety of this document, but also the [SIP process](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md). + +A good PR review sets both the submitter and reviewers up for success. It minimizes the time required by both parties to get the code into an acceptable state, without sacrificing quality or safety. Unlike most other software development practices, _safety_ is the primary concern. A PR can and will be delayed or closed if there is any concern that it will lead to unintended consensus-breaking changes. + +This document describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. + +This document is formatted like a checklist. Each paragraph is one goal or action item that the reviewer and/or submitter must complete. The **key take-away** from each paragraph is bolded. + +## Reviewer Expectations + +The overall task of a reviewer is to create an **acceptance plan** for the submitter. This is simply the list of things that the submitter _must_ do in order for the PR to be merged. The acceptance plan should be coherent, cohesive, succinct, and complete enough that the reviewer will understand exactly what they need to do to make the PR worthy of merging, without further reviews. The _lack of ambiguity_ is the most important trait of an acceptance plan. + +Reviewers should **strive to complete the review in one round**. The reviewer should provide enough detail to the submitter that the submitter can make all of the requested changes without further supervision. Whenever possible, the reviewer should provide all of these details publicly as comments, so that _other_ reviewers can vet them as well. If a reviewer _cannot_ complete the review in one round due to its size and complexity, then the reviewer may request that the PR be simplified or broken into multiple PRs. + +Reviewers should aim to **perform a reviewer in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. + +Code reviews should be timely. A PR review should begin no more than **2 business days** after the PR is submitted. The `develop` and `next` branches in particular often change quickly, so letting a PR languish only creates more merge work for the submitter. If a review cannot be begun within 2 business days, then the reviewers should **tell the submitter when they can begin**. This gives the reviewer the opportunity to keep working on the PR (if needed) or even withdraw and resubmit it. + +Reviewers must, above all else, **ensure that submitters follow the PR checklist** below. + +## Submitter Expectations + +Everyone is busy all the time with a host of different tasks. Consequently, a PR's size and scope should be constrained so that **a review can be written for it no more than 2 hours.** This time block starts when the reviewer opens the patch, and ends when the reviewer hits the "submit review" button. If it takes more than 2 hours, then the PR should be broken into multiple PRs unless the reviewers agree to spend more time on it. A PR can be rejected if the reviewers believe they will need longer than this. + +The size and scale of a PR depend on the reviewers' abilities to process the change. Different reviewers and submitters have different levels of familiarity with the codebase. Moreover, everyone has a different schedule -- sometimes, some people are more busy than others. A successful PR submitter **takes the reviewers' familiarity and availability into account** when crafting the PR, even going to far as to ask in advance if a particular person could be available for review. + +## Submission Checklist + +A PR submission's text should **answer the following questions** for reviewers: + +* What problem is being solved by this PR? +* What does the solution do to address them? +* Why is this the best solution? What alternatives were considered, and why are they worse? +* What do reviewers need to be familiar with in order to provide useful feedback? +* What issue(s) are addressed by this PR? +* What are some hints to understanding some of the more intricate or clever parts of the PR? + +In addition, the PR submission should **answer the prompts of the Github template** we use for PRs. + +The code itself should adhere to the following guidelines, which both submitters and reviewers should check: + +### Documentation + +* Each file must have a **copyright statement**. +* Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole. +* Any new files must have some **top-of-file documentation** that describes what the contained code does, and how it fits into the overall module. + +Within the source files, the following **code documentation** standards are expected: + +* Each public function, struct, enum, and trait should have a Rustdoc comment block describing the API contract it offers. This goes for private structs and traits as well. +* Each _non-trivial_ private function should likewise have a Rustdoc comment block. Trivial ones that are self-explanatory, like getters and setters, do not need documentation. If you are unsure if your function needs a docstring, err on the side of documenting it. +* Each struct and enum member must have a Rustdoc comment string indicating what it does, and how it is used. This can be as little as a one-liner, as long as the relevant information is communicated. + +### Factoring + +* **Public or exported struct, enum, and trait definitions go into the `mod.rs` file**. Private structs, enums, and traits can go anywhere. + +* **Each non-`mod.rs` file implements at most one subsystem**. It may include multiple struct implementations and trait implementations. The filename should succinctly identify the subsystem, and the file-level documentation must succinctly describe it and how it relates to other subsystems it interacts with. + +* Directories represent collections of related but distinct subsystems. + +* To the greatest extent possible, **business logic and I/O should be separated**. A common pattern used in the codebase is to place the business logic into an "inner" function that does not do I/O, and handle I/O reads and writes in an "outer" function. The "outer" function only does the needful I/O and passes the data into the "inner" function. The "inner" function is often private, whereas the "outer" function is often public. + +### Refactoring + +* **Any PR that does a large-scale refactoring must be in its own PR**. This includes PRs that touch multiple subsystems. Refactoring often adds line noise that obscures the new functional changes that the PR proposes. Small-scale refactorings are permitted to ship with functional changes. + +* Refactoring PRs can generally be bigger, because they are easier to review. However, **large refactorings that could impact the functional behavior of the system should be discussed first** before carried out. This is because it is imperative that they do not stay open for very long (to keep the submitter's maintenance burden low), but nevertheless reviewing them must still take at most 2 hours. Discussing them first front-loads part of the review process. + +### Databases + +* If at all possible, **the database schema should be preserved**. Exceptions can be made on a case-by-case basis. The reason for this is that it's a big ask for people to re-sync nodes from genesis when they upgrade to a new point release. + +* Any changes to a database schema must also ship with a **new schema version and new schema migration logic**, as well as _test coverage_ for it. + +* The submitter must verify that **any new database columns are indexed**, as relevant to the queries performed on them. Table scans are not permitted if they can be avoided (and they almost always can be). You can find table scans manually by setting the environment variable `BLOCKSTACK_DB_TRACE` when running your tests (this will cause every query executed to be preceded by the output of `EXPLAIN QUERY PLAN` on it). + +* Database changes **cannot be consensus-critical** unless part of a hard fork (see below). + +* If the database schema changes and no migration can be feasibly done, then the submitter **must spin up a node from genesis to verify that it works** _before_ submitting the PR. This genesis spin-up will be tested again before the next node release is made. + +### Data Input + +* **Data from the network, from Bitcoin, and from the config file is untrusted.** Code that ingests such data _cannot assume anything_ about its structure, and _must_ handle any possible byte sequence that can be submitted to the Stacks node. + +* **Data previously written to disk by the node is trusted.** If data loaded from the database that was previously stored by the node is invalid or corrupt, it is appropriate to panic. + +* **All input processing is space-bound.** Every piece of code that ingests data must impose a maximum size on its byte representation. Any inputs that exceed this size _must be discarded with as little processing as possible_. + +* **All input deserialization is resource-bound.** Every piece of code that ingests data must impose a maximum amount of RAM and CPU required to decode it into a structured representation. If the data does not decode with the allotted resources, then no further processing may be done and the data is discarded. + +* **All network input reception is time-bound.** Every piece of code that ingests data _from the network_ must impose a maximum amount of time that ingestion can take. If the data takes too long to arrive, then it must be discarded without any further processing. There is no time bound for data ingested from disk or passed as an argument; this requirement is meant by the space-bound requirement. + +* **Untrusted data ingestion must not panic.** Every piece of code that ingests untrusted data must gracefully handle errors. Panicking failures are forbidden for such data. Panics are only allowed if the ingested data was previously written by the node (and thus trusted). + +### Non-consensus Changes to Blocks, Microblocks, Transactions, and Clarity + +Any changes to code that alters how a block, microblock, or transaction is processed by the node should be **treated as a breaking change until proven otherwise**. This includes changes to the Clarity VM. The reviewer _must_ flag any such changes in the PR, and the submitter _must_ convince _all_ reviewers that they will _not_ break consensus. + +Changes that touch any of these four code paths must be treated with the utmost care. If _any_ core developer suspects that a given PR would break consensus, then they _must_ act to prevent the PR from merging. + +### Changes to the Peer Network + +Any changes to the peer networking code **must be run in production before the PR can be merged.** The submitter should set up a testable node or set of nodes that reviewers can interact with. + +Changes to the peer network should be deployed incrementally and tested by multiple ecosystem entities when possible to verify that they function properly in a production setting. + +### Performance Improvements + +Any PRs that claim to improve performance **must ship with reproducible benchmarks** that accurately measure the improvement. This data must also be reported in the PR submission. + +### Error Handling + +* **Each subsystem must have its own `Error` type.** Error types of aggregate subsystems are encouraged to both wrap their constituent subsystems' `Error` types in their own `Error` types, as well as provide conversions from them via a `From` trait implementation. + +* Functions that act on externally-submitted data **must never panic**. This includes code that acts on incoming network messages, blockchain data, and burnchain (Bitcoin) data. + +* **Runtime panics should be used sparingly**. Generally speaking, a runtime panic is only appropriate if there is no reasonable way to recover from the error condition. For example, this includes (but is not limited to) disk I/O errors, database corruption, and unreachable code. + +* If a runtime panic is desired, it **must have an appropriate error message**. + +### Logging + +* Log messages should be informative and context-free as possible. They are used mainly to help us identify and diagnose problems. They are _not_ used to help you verify that your code works; that's the job of a unit test. + +* **DO NOT USE println!() OR eprintln!()**. Instead, use the logging macros (`test_debug!()`, `trace!()`, `debug!()`, `info!()`, `warn!()`, `error!()`). + +* Use **structured logging** whenever you find yourself logging multiple data with a format string. + +* Use `trace!()` and `test_debug!()` liberally. It only runs in tests. + +* Use `debug!()` for information that is relevant for diagnosing problems at runtime. This is off by default, but can be turned on with the `BLOCKSTACK_DEBUG` environment variable. + +* Use `info!()` sparingly. + +* Use `warn!()` or `error!()` only when there really is a problem. + +### Consensus-Critical Code + +A **consensus-critical change** is a change that affects how the Stacks blockchain processes blocks, microblocks, or transactions, such that a node with the patch _could_ produce a different state root hash than a node without the patch. If this is even _possible_, then the PR is automatically treated as a consensus-critical change and must ship as part of a hard fork. It must also be described in a SIP. + +* **All changes to consensus-critical code must be opened against `next`**. It is _never acceptable_ to open them against `develop` or `master`. + +* **All consensus-critical changes must be gated on the Stacks epoch**. They may only take effect once the system enters a specific epoch (and this must be documented). + +A non-exhaustive list of examples of consensus-critical changes include: + +* Adding or changing block, microblock, or transaction wire formats +* Changing the criteria under which a burnchain operation will be accepted by the node +* Changing the data that gets stored to a MARF key/value pair in the Clarity or Stacks chainstate MARFs +* Changing the order in which data gets stored in the above +* Adding, changing, or removing Clarity functions +* Changing the cost of a Clarity function +* Adding new kinds of transactions, or enabling certain transaction data field values that were previously forbidden. + +### Testing + +* **Unit tests should focus on the business logic with mocked data**. To the greatest extent possible, each error path should be tested _in addition to_ the success path. A submitter should expect to spend most of their test-writing time focusing on error paths; getting the success path to work is often much easier than the error paths. + +* **Unit tests should verify that the I/O code paths work**, but do so in a way that does not "clobber" other tests or prevent other tests from running in parallel (if it can be avoided). This means that unit tests should use their own directories for storing transient state (in `/tmp`), and should bind on ports that are not used anywhere else. + +* If randomness is needed, **tests should use a seeded random number generator if possible**. This ensures that they will reliably pass in CI. + +* When testing a consensus-critical code path, the test coverage should verify that the new behavior is only possible within the epoch(s) in which the behavior is slated to activate. Above all else, **backwards-compatibility is a hard requirement.** + +* **Integration tests are necessary when the PR has a consumer-visible effect**. For example, changes to the RESTful API, event stream, and mining behavior all require integration tests. + +* Every consensus-critical change needs an integration test to verify that the feature activates only when the hard fork activates. From 753aa1a2244acf4715fc2530eac90f763c3551b6 Mon Sep 17 00:00:00 2001 From: CharlieC3 <2747302+CharlieC3@users.noreply.github.com> Date: Tue, 14 Mar 2023 10:11:20 -0400 Subject: [PATCH 023/158] chore: update default seed nodes and example config files --- docs/profiling.md | 2 +- testnet/stacks-node/conf/mainnet-follower-conf.toml | 2 +- testnet/stacks-node/conf/mainnet-miner-conf.toml | 2 +- testnet/stacks-node/conf/mainnet-mockminer-conf.toml | 2 +- testnet/stacks-node/conf/testnet-follower-conf.toml | 2 +- testnet/stacks-node/conf/testnet-miner-conf.toml | 2 +- testnet/stacks-node/src/config.rs | 9 ++------- 7 files changed, 8 insertions(+), 13 deletions(-) diff --git a/docs/profiling.md b/docs/profiling.md index c35ca532af..35bbaf2f18 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -27,7 +27,7 @@ Enabling debug logging using environment variable `STACKS_LOG_DEBUG=1`: $ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config=testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml -DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoind.stacks.co"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444,02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444,03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } +DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoind.stacks.co"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index f6b283e38e..9f992a7e2c 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -2,7 +2,7 @@ # working_dir = "/dir/to/save/chainstate" rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" -bootstrap_node = "02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444,02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444,03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444" +bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444" [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index 1769178740..319328b677 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -5,7 +5,7 @@ p2p_bind = "0.0.0.0:20444" seed = "" local_peer_seed = "" miner = true -bootstrap_node = "02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444,02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444,03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444" +bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444" [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 9283a6ca95..37381a60af 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -4,7 +4,7 @@ rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" miner = true mock_mining = true -bootstrap_node = "02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444,02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444,03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444" +bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444" [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index e1563d333a..6872666a2c 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -2,7 +2,7 @@ # working_dir = "/dir/to/save/chainstate" rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" -bootstrap_node = "047435c194e9b01b3d7f7a2802d6684a3af68d05bbf4ec8f17021980d777691f1d51651f7f1d566532c804da506c117bbf79ad62eea81213ba58f8808b4d9504ad@testnet.stacks.co:20444" +bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:20444" wait_time_for_microblocks = 10000 [burnchain] diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index 3b1b0013e3..379cbd3822 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -5,7 +5,7 @@ p2p_bind = "0.0.0.0:20444" seed = "" local_peer_seed = "" miner = true -bootstrap_node = "047435c194e9b01b3d7f7a2802d6684a3af68d05bbf4ec8f17021980d777691f1d51651f7f1d566532c804da506c117bbf79ad62eea81213ba58f8808b4d9504ad@testnet.stacks.co:20444" +bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:20444" wait_time_for_microblocks = 10000 [burnchain] diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index a3dbfcd91f..6cb3ae2743 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -206,7 +206,7 @@ impl ConfigFile { }; let node = NodeConfigFile { - bootstrap_node: Some("047435c194e9b01b3d7f7a2802d6684a3af68d05bbf4ec8f17021980d777691f1d51651f7f1d566532c804da506c117bbf79ad62eea81213ba58f8808b4d9504ad@xenon.blockstack.org:20444".to_string()), + bootstrap_node: Some("029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:20444".to_string()), miner: Some(false), ..NodeConfigFile::default() }; @@ -250,13 +250,8 @@ impl ConfigFile { ..BurnchainConfigFile::default() }; - let bootstrap_nodes = [ - "02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444", - "02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444", - "03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444"].join(","); - let node = NodeConfigFile { - bootstrap_node: Some(bootstrap_nodes), + bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444".to_string()), miner: Some(false), ..NodeConfigFile::default() }; From ffccbd767db396f14de79d91d3582a00b42e1211 Mon Sep 17 00:00:00 2001 From: Scott A <119700554+fpbgg@users.noreply.github.com> Date: Tue, 14 Mar 2023 13:03:50 -0400 Subject: [PATCH 024/158] fix(deps): only depend on one version of secp256k1 --- Cargo.lock | 340 ++++++++++++++++++++------------------- Cargo.toml | 2 +- stacks-common/Cargo.toml | 2 +- 3 files changed, 179 insertions(+), 165 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b20f432762..df02fb1ea3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -216,12 +216,11 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] @@ -348,9 +347,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array 0.14.6", ] @@ -402,7 +401,7 @@ dependencies = [ "rstest", "rstest_reuse", "rusqlite", - "secp256k1 0.24.3", + "secp256k1", "serde", "serde_derive", "serde_json", @@ -481,9 +480,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -656,9 +655,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -666,9 +665,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", @@ -677,22 +676,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg", "cfg-if 1.0.0", "crossbeam-utils", - "memoffset 0.7.1", + "memoffset 0.8.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", ] @@ -719,9 +718,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" +checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" dependencies = [ "csv-core", "itoa", @@ -773,9 +772,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -785,9 +784,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", @@ -800,15 +799,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ "proc-macro2", "quote", @@ -839,7 +838,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", ] @@ -898,6 +897,27 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -976,9 +996,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" dependencies = [ "futures-core", "futures-sink", @@ -986,15 +1006,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" +checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" [[package]] name = "futures-io" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" +checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" [[package]] name = "futures-lite" @@ -1013,21 +1033,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" [[package]] name = "futures-task" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" [[package]] name = "futures-util" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" dependencies = [ "futures-core", "futures-io", @@ -1110,9 +1130,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes", "fnv", @@ -1278,9 +1298,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -1381,6 +1401,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "io-lifetimes" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "iovec" version = "0.1.4" @@ -1407,9 +1437,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" @@ -1456,9 +1486,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libflate" @@ -1500,6 +1530,12 @@ dependencies = [ "cc", ] +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + [[package]] name = "log" version = "0.4.17" @@ -1527,9 +1563,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] @@ -1729,9 +1765,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.5" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028accff104c4e513bad663bbcd2ad7cfd5304144404c31ed0a77ac103d00660" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" dependencies = [ "thiserror", "ucd-trie", @@ -1811,16 +1847,18 @@ dependencies = [ [[package]] name = "polling" -version = "2.5.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ "autocfg", + "bitflags", "cfg-if 1.0.0", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite", + "windows-sys 0.45.0", ] [[package]] @@ -1872,9 +1910,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ "unicode-ident", ] @@ -1916,9 +1954,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.23" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -1996,9 +2034,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -2006,9 +2044,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -2053,15 +2091,6 @@ version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "reqwest" version = "0.11.14" @@ -2202,7 +2231,21 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.16", + "semver 1.0.17", +] + +[[package]] +name = "rustix" +version = "0.36.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", ] [[package]] @@ -2237,15 +2280,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safemem" @@ -2276,9 +2319,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "sct" @@ -2290,35 +2333,16 @@ dependencies = [ "untrusted", ] -[[package]] -name = "secp256k1" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" -dependencies = [ - "secp256k1-sys 0.4.2", - "serde", -] - [[package]] name = "secp256k1" version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" dependencies = [ - "secp256k1-sys 0.6.1", + "secp256k1-sys", "serde", ] -[[package]] -name = "secp256k1-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" -dependencies = [ - "cc", -] - [[package]] name = "secp256k1-sys" version = "0.6.1" @@ -2348,9 +2372,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "semver-parser" @@ -2369,9 +2393,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" dependencies = [ "serde_derive", ] @@ -2388,9 +2412,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ "proc-macro2", "quote", @@ -2399,9 +2423,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", @@ -2421,9 +2445,9 @@ dependencies = [ [[package]] name = "serde_stacker" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35f73df5c3072392d6a2abb8588d06db7f57b83dc95d5bbb96da71cd8468fcfd" +checksum = "2f5557f4c1103cecd0e639a17ab22d670b89912d8a506589ee627bf738a15a5d" dependencies = [ "serde", "stacker", @@ -2582,7 +2606,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.19", + "time 0.3.20", ] [[package]] @@ -2595,7 +2619,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.19", + "time 0.3.20", ] [[package]] @@ -2606,9 +2630,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi 0.3.9", @@ -2649,7 +2673,7 @@ dependencies = [ "rstest", "rstest_reuse", "rusqlite", - "secp256k1 0.21.3", + "secp256k1", "serde", "serde_derive", "serde_json", @@ -2773,9 +2797,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.108" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56e159d99e6c2b93995d171050271edb50ecc5288fbc7cc17de8fdce4e58c14" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -2784,16 +2808,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", "redox_syscall", - "remove_dir_all", - "winapi 0.3.9", + "rustix", + "windows-sys 0.42.0", ] [[package]] @@ -2827,18 +2850,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", @@ -2883,16 +2906,16 @@ dependencies = [ [[package]] name = "time" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53250a3b3fed8ff8fd988587d8925d26a83ac3845d9e03b220b37f34c2b8d6c2" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", "libc", "num_threads", "serde", "time-core", - "time-macros 0.2.7", + "time-macros 0.2.8", ] [[package]] @@ -2913,9 +2936,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a460aeb8de6dcb0f381e1ee05f1cd56fcf5a5f6eb8187ff3d8f0b11078d38b7c" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -2960,9 +2983,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.25.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg", "bytes", @@ -2972,7 +2995,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "socket2", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -3116,15 +3139,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -3368,15 +3391,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "winapi" version = "0.2.8" @@ -3446,9 +3460,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -3461,45 +3475,45 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "winreg" diff --git a/Cargo.toml b/Cargo.toml index 4a68b9056e..b930bacb78 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,7 +71,7 @@ version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] [dependencies.secp256k1] -version = "0.24.2" +version = "0.24.3" features = ["serde", "recovery"] [dependencies.rusqlite] diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index a962386b5d..19cd58172e 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -37,7 +37,7 @@ version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] [dependencies.secp256k1] -version = "0.21.0" +version = "0.24.3" features = ["serde", "recovery"] [dependencies.rusqlite] From 91173a23cd76f71de97e32d4a66b3d8b050ce015 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 14 Mar 2023 14:09:09 -0400 Subject: [PATCH 025/158] fix: don't panic if a bloom counter underflows --- src/util_lib/bloom.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/util_lib/bloom.rs b/src/util_lib/bloom.rs index d77d476d69..e3633288f0 100644 --- a/src/util_lib/bloom.rs +++ b/src/util_lib/bloom.rs @@ -365,15 +365,15 @@ impl BloomCounter { let sql = format!("CREATE TABLE IF NOT EXISTS {}(counts BLOB NOT NULL, num_bins INTEGER NOT NULL, num_hashes INTEGER NOT NULL, hasher BLOB NOT NULL);", table_name); tx.execute(&sql, NO_PARAMS).map_err(db_error::SqliteError)?; - let (num_bits, num_hashes) = bloom_hash_count(error_rate, max_items); - let counts_vec = vec![0u8; (num_bits * 4) as usize]; + let (num_bins, num_hashes) = bloom_hash_count(error_rate, max_items); + let counts_vec = vec![0u8; (num_bins * 4) as usize]; let hasher_vec = hasher.serialize_to_vec(); let sql = format!( "INSERT INTO {} (counts, num_bins, num_hashes, hasher) VALUES (?1, ?2, ?3, ?4)", table_name ); - let args: &[&dyn ToSql] = &[&counts_vec, &num_bits, &num_hashes, &hasher_vec]; + let args: &[&dyn ToSql] = &[&counts_vec, &num_bins, &num_hashes, &hasher_vec]; tx.execute(&sql, args).map_err(db_error::SqliteError)?; @@ -384,7 +384,7 @@ impl BloomCounter { Ok(BloomCounter { hasher, table_name: table_name.to_string(), - num_bins: num_bits, + num_bins: num_bins, num_hashes, counts_rowid: counts_rowid as u32, }) @@ -533,8 +533,6 @@ impl BloomCounter { let new_bin = bin - 1; BloomCounter::::set_counts_bin(&mut fd, slot, new_bin); count = cmp::min(new_bin, count); - } else { - panic!("BUG: item is present in the bloom counter, but has a zero count (i = {}, slot = {})", i, slot); } } From f6a48d02bfae32f0ea3eded5f31b5fba81112536 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 14 Mar 2023 08:00:54 -0500 Subject: [PATCH 026/158] fix: reduce chain-liveness poll frequency --- testnet/stacks-node/src/config.rs | 10 ++++++++++ testnet/stacks-node/src/run_loop/neon.rs | 8 +++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 6cb3ae2743..6b3f6df012 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -665,6 +665,9 @@ impl Config { // chainstate fault_injection activation for hide_blocks. // you can't set this in the config file. fault_injection_hide_blocks: false, + chain_liveness_poll_time_secs: node + .chain_liveness_poll_time_secs + .unwrap_or(default_node_config.chain_liveness_poll_time_secs), }; (node_config, node.bootstrap_node, node.deny_nodes) } @@ -1450,6 +1453,9 @@ pub struct NodeConfig { // fault injection for hiding blocks. // not part of the config file. pub fault_injection_hide_blocks: bool, + /// At most, how often should the chain-liveness thread + /// wake up the chains-coordinator. Defaults to 300s (5 min). + pub chain_liveness_poll_time_secs: u64, } #[derive(Clone, Debug)] @@ -1727,6 +1733,7 @@ impl NodeConfig { always_use_affirmation_maps: false, require_affirmed_anchor_blocks: true, fault_injection_hide_blocks: false, + chain_liveness_poll_time_secs: 300, } } @@ -1928,6 +1935,9 @@ pub struct NodeConfigFile { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: Option, pub require_affirmed_anchor_blocks: Option, + /// At most, how often should the chain-liveness thread + /// wake up the chains-coordinator. Defaults to 300s (5 min). + pub chain_liveness_poll_time_secs: Option, } #[derive(Clone, Deserialize, Debug)] diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 7c5d387124..47b5df31ce 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -629,7 +629,7 @@ impl RunLoop { last_stacks_pox_reorg_recover_time: &mut u128, ) { let delay = cmp::max( - 1, + config.node.chain_liveness_poll_time_secs, cmp::max( config.miner.first_attempt_time_ms, config.miner.subsequent_attempt_time_ms, @@ -724,7 +724,9 @@ impl RunLoop { &stacks_tip_affirmation_map, &heaviest_affirmation_map ); - // do it anyway since it's harmless + // announce a new stacks block to force the chains coordinator + // to wake up anyways. this isn't free, so we have to make sure + // the chain-liveness thread doesn't wake up too often globals.coord().announce_new_stacks_block(); } @@ -747,7 +749,7 @@ impl RunLoop { last_announce_time: &mut u128, ) { let delay = cmp::max( - 1, + config.node.chain_liveness_poll_time_secs, cmp::max( config.miner.first_attempt_time_ms, config.miner.subsequent_attempt_time_ms, From 6ccadda4ad3705206369f6308362dae60e6f99ea Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 14 Mar 2023 10:18:09 -0500 Subject: [PATCH 027/158] chore: update changelog --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cafbdd3669..325b6ed7a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,9 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Handle the case where a bitcoin node returns zero headers (#3588) - The default value for `always_use_affirmation_maps` is now set to `false`, instead of `true`. This was preventing testnet nodes from reaching the chain -tip with the default configuration. + tip with the default configuration. +- Reduce default poll time of the `chain-liveness` thread which reduces the + possibility that a miner thread will get interrupted (#3610). ## [2.1] From d51ed0c8a970759ef023aa301706aeda5e6e8519 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9?= <30682875+jo-tm@users.noreply.github.com> Date: Wed, 15 Mar 2023 17:00:07 -0300 Subject: [PATCH 028/158] Update README testnet command --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9e70210e94..f2286d883d 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,8 @@ $ cargo nextest run You can observe the state machine in action locally by running: ```bash -$ cargo stacks-node start --config=./testnet/stacks-node/conf/testnet-follower-conf.toml +$ cd testnet/stacks-node +$ cargo stacks-node start --config=./conf/testnet-follower-conf.toml ``` _On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you are have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ From 58addb46f24f038cb227e841a9738debc70d9b9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9?= <30682875+jo-tm@users.noreply.github.com> Date: Fri, 17 Mar 2023 18:27:50 -0300 Subject: [PATCH 029/158] Added --bin and -- not working ow. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f2286d883d..06fca3166c 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ You can observe the state machine in action locally by running: ```bash $ cd testnet/stacks-node -$ cargo stacks-node start --config=./conf/testnet-follower-conf.toml +$ cargo run --bin stacks-node -- start --config=./conf/testnet-follower-conf.toml ``` _On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you are have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ From ac35cde2c3106b0abf03e1378db317ca14ad8eca Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 20 Mar 2023 10:21:31 -0500 Subject: [PATCH 030/158] fix: incorporate unlocks in mempool admitter, #3623 --- src/chainstate/stacks/db/blocks.rs | 5 +- .../stacks/tests/block_construction.rs | 27 +++ src/core/mempool.rs | 21 +- src/main.rs | 1 + src/net/p2p.rs | 1 + src/net/relay.rs | 2 + src/net/rpc.rs | 1 + testnet/stacks-node/src/neon_node.rs | 3 + testnet/stacks-node/src/tenure.rs | 14 ++ .../stacks-node/src/tests/bitcoin_regtest.rs | 12 +- testnet/stacks-node/src/tests/integrations.rs | 32 +++ testnet/stacks-node/src/tests/mempool.rs | 215 +++++++++++++++--- testnet/stacks-node/src/tests/mod.rs | 20 +- 13 files changed, 310 insertions(+), 44 deletions(-) diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index bafeb503b8..f7acd84371 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -6975,6 +6975,7 @@ impl StacksChainState { /// unconfirmed microblock stream trailing off of it. pub fn will_admit_mempool_tx( &mut self, + burn_state_db: &dyn BurnStateDB, current_consensus_hash: &ConsensusHash, current_block: &BlockHeaderHash, tx: &StacksTransaction, @@ -7019,7 +7020,7 @@ impl StacksChainState { let current_tip = StacksChainState::get_parent_index_block(current_consensus_hash, current_block); - let res = match self.with_read_only_clarity_tx(&NULL_BURN_STATE_DB, ¤t_tip, |conn| { + let res = match self.with_read_only_clarity_tx(burn_state_db, ¤t_tip, |conn| { StacksChainState::can_include_tx(conn, &conf, has_microblock_pubk, tx, tx_size) }) { Some(r) => r, @@ -7039,7 +7040,7 @@ impl StacksChainState { { debug!("Transaction {} is unminable in the confirmed chain tip due to nonce {} != {}; trying the unconfirmed chain tip", &tx.txid(), mismatch_error.expected, mismatch_error.actual); - self.with_read_only_unconfirmed_clarity_tx(&NULL_BURN_STATE_DB, |conn| { + self.with_read_only_unconfirmed_clarity_tx(burn_state_db, |conn| { StacksChainState::can_include_tx( conn, &conf, diff --git a/src/chainstate/stacks/tests/block_construction.rs b/src/chainstate/stacks/tests/block_construction.rs index cdd7e3a208..5ba91c8046 100644 --- a/src/chainstate/stacks/tests/block_construction.rs +++ b/src/chainstate/stacks/tests/block_construction.rs @@ -248,6 +248,7 @@ fn test_build_anchored_blocks_stx_transfers_single() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -382,6 +383,7 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -516,6 +518,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -541,6 +544,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -1230,6 +1234,7 @@ fn test_build_anchored_blocks_incrementing_nonces() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &tx, @@ -1430,6 +1435,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -1451,6 +1457,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &contract_tx, @@ -1471,6 +1478,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -1617,6 +1625,7 @@ fn test_build_anchored_blocks_multiple_chaintips() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &contract_tx, @@ -1771,6 +1780,7 @@ fn test_build_anchored_blocks_empty_chaintips() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &contract_tx, @@ -1901,6 +1911,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, contract_tx_bytes, @@ -1928,6 +1939,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, contract_tx_bytes, @@ -2262,6 +2274,7 @@ fn test_build_anchored_blocks_bad_nonces() { mempool .submit_raw( chainstate, + sortdb, &parent_tip_ch, &parent_header_hash, contract_tx_bytes, @@ -2290,6 +2303,7 @@ fn test_build_anchored_blocks_bad_nonces() { mempool .submit_raw( chainstate, + sortdb, &parent_tip_ch, &parent_header_hash, contract_tx_bytes, @@ -2326,6 +2340,7 @@ fn test_build_anchored_blocks_bad_nonces() { mempool .submit_raw( chainstate, + sortdb, &parent_tip_ch, &parent_header_hash, contract_tx_bytes, @@ -2354,6 +2369,7 @@ fn test_build_anchored_blocks_bad_nonces() { mempool .submit_raw( chainstate, + sortdb, &parent_tip_ch, &parent_header_hash, contract_tx_bytes, @@ -2599,6 +2615,7 @@ fn test_build_microblock_stream_forks() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx_bytes, @@ -2925,6 +2942,7 @@ fn test_build_microblock_stream_forks_with_descendants() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx_bytes, @@ -3000,6 +3018,7 @@ fn test_build_microblock_stream_forks_with_descendants() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx_bytes, @@ -3032,6 +3051,7 @@ fn test_build_microblock_stream_forks_with_descendants() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx_bytes, @@ -3989,6 +4009,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx, @@ -4015,6 +4036,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &contract_spends_too_much_tx, @@ -4063,6 +4085,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &spend_too_much, @@ -4113,6 +4136,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &runtime_checkerror_problematic, @@ -4161,6 +4185,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &runtime_checkerror_problematic, @@ -4296,6 +4321,7 @@ fn test_fee_order_mismatch_nonce_order() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer0, @@ -4308,6 +4334,7 @@ fn test_fee_order_mismatch_nonce_order() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer1, diff --git a/src/core/mempool.rs b/src/core/mempool.rs index 1d340d59a3..7f3c195d21 100644 --- a/src/core/mempool.rs +++ b/src/core/mempool.rs @@ -38,6 +38,7 @@ use rusqlite::NO_PARAMS; use siphasher::sip::SipHasher; // this is SipHash-2-4 use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::{ db::blocks::MemPoolRejection, db::ClarityTx, db::StacksChainState, db::TxStreamData, @@ -174,10 +175,17 @@ impl MemPoolAdmitter { pub fn will_admit_tx( &mut self, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, tx: &StacksTransaction, tx_size: u64, ) -> Result<(), MemPoolRejection> { - chainstate.will_admit_mempool_tx(&self.cur_consensus_hash, &self.cur_block, tx, tx_size) + chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), + &self.cur_consensus_hash, + &self.cur_block, + tx, + tx_size, + ) } } @@ -1973,6 +1981,7 @@ impl MemPoolDB { fn tx_submit( mempool_tx: &mut MemPoolTx, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, tx: &StacksTransaction, @@ -2027,7 +2036,9 @@ impl MemPoolDB { mempool_tx .admitter .set_block(&block_hash, (*consensus_hash).clone()); - mempool_tx.admitter.will_admit_tx(chainstate, tx, len)?; + mempool_tx + .admitter + .will_admit_tx(chainstate, sortdb, tx, len)?; } MemPoolDB::try_add_tx( @@ -2064,6 +2075,7 @@ impl MemPoolDB { pub fn submit( &mut self, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, tx: &StacksTransaction, @@ -2101,6 +2113,7 @@ impl MemPoolDB { MemPoolDB::tx_submit( &mut mempool_tx, chainstate, + sortdb, consensus_hash, block_hash, tx, @@ -2116,6 +2129,7 @@ impl MemPoolDB { pub fn miner_submit( &mut self, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, tx: &StacksTransaction, @@ -2129,6 +2143,7 @@ impl MemPoolDB { MemPoolDB::tx_submit( &mut mempool_tx, chainstate, + sortdb, consensus_hash, block_hash, tx, @@ -2146,6 +2161,7 @@ impl MemPoolDB { pub fn submit_raw( &mut self, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, tx_bytes: Vec, @@ -2187,6 +2203,7 @@ impl MemPoolDB { MemPoolDB::tx_submit( &mut mempool_tx, chainstate, + sortdb, consensus_hash, block_hash, &tx, diff --git a/src/main.rs b/src/main.rs index 65c73c71ac..fcf8abb7e2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1433,6 +1433,7 @@ simulating a miner. } let result = mempool_db.submit( &mut chain_state, + &sort_db, &stacks_block.consensus_hash, &stacks_block.anchored_block_hash, &raw_tx, diff --git a/src/net/p2p.rs b/src/net/p2p.rs index 62070ea5a8..141f3b6071 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -5246,6 +5246,7 @@ impl PeerNetwork { if let Err(e) = mempool.submit( chainstate, + sortdb, consensus_hash, block_hash, &tx, diff --git a/src/net/relay.rs b/src/net/relay.rs index ddacfd38eb..fa699fe6f1 100644 --- a/src/net/relay.rs +++ b/src/net/relay.rs @@ -5824,6 +5824,7 @@ pub mod test { let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); match node.chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, @@ -5873,6 +5874,7 @@ pub mod test { let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); match node.chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, diff --git a/src/net/rpc.rs b/src/net/rpc.rs index 41883af802..fdad5b699d 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -2093,6 +2093,7 @@ impl ConversationHttp { } else { match mempool.submit( chainstate, + sortdb, &consensus_hash, &block_hash, &tx, diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index dce07cc854..87d18dc4d0 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1594,6 +1594,7 @@ impl BlockMinerThread { fn load_and_vet_parent_microblocks( &mut self, chain_state: &mut StacksChainState, + sortdb: &SortitionDB, mem_pool: &mut MemPoolDB, parent_block_info: &mut ParentStacksBlockInfo, ) -> Option> { @@ -1662,6 +1663,7 @@ impl BlockMinerThread { // anchored block. if let Err(e) = mem_pool.miner_submit( chain_state, + sortdb, &parent_consensus_hash, &stacks_parent_header.anchored_header.block_hash(), &poison_microblock_tx, @@ -1889,6 +1891,7 @@ impl BlockMinerThread { // target it to the microblock tail in parent_block_info let microblocks_opt = self.load_and_vet_parent_microblocks( &mut chain_state, + &burn_db, &mut mem_pool, &mut parent_block_info, ); diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index a7a5c2cba2..77b1c88a80 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -5,6 +5,10 @@ use super::{BurnchainTip, Config}; use std::thread; use std::time::{Duration, Instant}; +#[cfg(test)] +use stacks::burnchains::PoxConstants; +#[cfg(test)] +use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::db::sortdb::SortitionDBConn; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{ @@ -123,4 +127,14 @@ impl<'a> Tenure { .unwrap(); chain_state } + + #[cfg(test)] + pub fn open_fake_sortdb(&self) -> SortitionDB { + SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + PoxConstants::testnet_default(), + ) + .unwrap() + } } diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 267908c932..2479a403cd 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -372,6 +372,8 @@ fn bitcoind_integration(segwit_flag: bool) { // Use tenure's hook for submitting transactions run_loop.callbacks.on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); + match round { 1 => { // On round 1, publish the KV contract @@ -391,7 +393,7 @@ fn bitcoind_integration(segwit_flag: bool) { // ./blockstack-cli --testnet publish 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 0 0 store /tmp/out.clar let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash, PUBLISH_CONTRACT.to_owned(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash, PUBLISH_CONTRACT.to_owned(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, 2 => { @@ -400,7 +402,7 @@ fn bitcoind_integration(segwit_flag: bool) { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a0100b7ff8b6c20c427b4f4f09c1ad7e50027e2b076b2ddc0ab55e64ef5ea3771dd4763a79bc5a2b1a79b72ce03dd146ccf24b84942d675a815819a8b85aa8065dfaa030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, 3 => { @@ -409,7 +411,7 @@ fn bitcoind_integration(segwit_flag: bool) { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let set_foo_bar = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000002000000000000000a010142a01caf6a32b367664869182f0ebc174122a5a980937ba259d44cc3ebd280e769a53dd3913c8006ead680a6e1c98099fcd509ce94b0a4e90d9f4603b101922d030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265097365742d76616c7565000000020d00000003666f6f0d00000003626172"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, 4 => { @@ -418,7 +420,7 @@ fn bitcoind_integration(segwit_flag: bool) { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000003000000000000000a010046c2c1c345231443fef9a1f64fccfef3e1deacc342b2ab5f97612bb3742aa799038b20aea456789aca6b883e52f84a31adfee0bc2079b740464877af8f2f87d2030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, 5 => { @@ -427,7 +429,7 @@ fn bitcoind_integration(segwit_flag: bool) { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let transfer_1000_stx = "80800000000400b71a091b4b8b7661a661c620966ab6573bc2dcd30000000000000000000000000000000a0000393810832bacd44cfc4024980876135de6b95429bdb610d5ce96a92c9ee9bfd81ec77ea0f1748c8515fc9a1589e51d8b92bf028e3e84ade1249682c05271d5b803020000000000051a525b8a36ef8a73548cd0940c248d3b71ecf4a45100000000000003e800000000000000000000000000000000000000000000000000000000000000000000"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(transfer_1000_stx).unwrap().to_vec(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(transfer_1000_stx).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, _ => {} diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 8366328351..43834e40e2 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -205,6 +205,7 @@ fn integration_test_get_info() { .callbacks .on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let principal_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -221,6 +222,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -234,6 +236,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -247,6 +250,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -281,6 +285,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -304,6 +309,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, tx, @@ -325,6 +331,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, tx_xfer, @@ -1095,6 +1102,7 @@ fn contract_stx_transfer() { .callbacks .on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -1117,6 +1125,7 @@ fn contract_stx_transfer() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, xfer_to_contract, @@ -1132,6 +1141,7 @@ fn contract_stx_transfer() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -1152,6 +1162,7 @@ fn contract_stx_transfer() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -1173,6 +1184,7 @@ fn contract_stx_transfer() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, tx, @@ -1197,6 +1209,7 @@ fn contract_stx_transfer() { .mem_pool .submit( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, &xfer_to_contract, @@ -1215,6 +1228,7 @@ fn contract_stx_transfer() { .mem_pool .submit( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, &xfer_to_contract, @@ -1413,6 +1427,7 @@ fn mine_transactions_out_of_order() { .callbacks .on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let sk = StacksPrivateKey::from_hex(SK_3).unwrap(); let header_hash = chain_tip.block.block_hash(); @@ -1433,6 +1448,7 @@ fn mine_transactions_out_of_order() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, xfer_to_contract, @@ -1447,6 +1463,7 @@ fn mine_transactions_out_of_order() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -1461,6 +1478,7 @@ fn mine_transactions_out_of_order() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, xfer_to_contract, @@ -1475,6 +1493,7 @@ fn mine_transactions_out_of_order() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, xfer_to_contract, @@ -1564,6 +1583,7 @@ fn mine_contract_twice() { .callbacks .on_new_tenure(|round, _burnchain_tip, _chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); if round == 1 { @@ -1578,6 +1598,7 @@ fn mine_contract_twice() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -1649,6 +1670,7 @@ fn bad_contract_tx_rollback() { .callbacks .on_new_tenure(|round, _burnchain_tip, _chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -1674,6 +1696,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, xfer_to_contract, @@ -1692,6 +1715,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, xfer_to_contract, @@ -1706,6 +1730,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, xfer_to_contract, @@ -1720,6 +1745,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -1734,6 +1760,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -1960,6 +1987,7 @@ fn block_limit_runtime_test() { .callbacks .on_new_tenure(|round, _burnchain_tip, _chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( @@ -1985,6 +2013,7 @@ fn block_limit_runtime_test() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -2014,6 +2043,7 @@ fn block_limit_runtime_test() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, tx, @@ -2087,6 +2117,7 @@ fn mempool_errors() { .on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; @@ -2100,6 +2131,7 @@ fn mempool_errors() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index c20560e036..95a8a78e07 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -16,6 +16,7 @@ use stacks::cost_estimates::UnitEstimator; use stacks::net::Error as NetError; use stacks::types::chainstate::{BlockHeaderHash, StacksAddress}; use stacks::util::{hash::*, secp256k1::*}; +use stacks::vm::database::NULL_BURN_STATE_DB; use stacks::vm::{ representations::ContractName, types::PrincipalData, types::QualifiedContractIdentifier, types::StandardPrincipalData, Value, @@ -108,6 +109,7 @@ fn mempool_setup_chainstate() { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; + let sortdb = tenure.open_fake_sortdb(); if round == 1 { eprintln!("Tenure in 1 started!"); @@ -118,6 +120,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx1, @@ -132,6 +135,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx2, @@ -151,6 +155,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx3, @@ -170,6 +175,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx4, @@ -189,6 +195,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx4, @@ -230,7 +237,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); let tx_bytes = make_contract_call( @@ -245,14 +258,26 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); let tx_bytes = make_stacks_transfer(&contract_sk, 5, 200, &other_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); // bad signature @@ -260,7 +285,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!( @@ -296,7 +327,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); assert!(if let MemPoolRejection::BadAddressVersionByte = e { @@ -319,7 +356,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); assert!(if let MemPoolRejection::BadAddressVersionByte = e { true @@ -332,7 +375,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::FeeTooLow(0, _) = e { @@ -346,7 +395,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::BadNonces(_) = e { @@ -360,7 +415,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { @@ -375,7 +436,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::TransferRecipientIsSender(r) = e { @@ -392,7 +459,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::BadAddressVersionByte = e { @@ -419,7 +492,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::BadTransactionVersion = e { @@ -433,7 +512,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::TransferAmountMustBePositive = e { @@ -447,7 +532,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { @@ -460,7 +551,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NotEnoughFunds(100700, 99500) = e { @@ -481,7 +578,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NoSuchContract = e { @@ -502,7 +605,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NoSuchPublicFunction = e { @@ -523,7 +632,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { @@ -537,7 +652,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::ContractAlreadyExists(_) = e { @@ -566,7 +687,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!( @@ -597,7 +724,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::InvalidMicroblocks = e { @@ -629,7 +762,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!( @@ -644,7 +783,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NoCoinbaseViaMempool = e { @@ -696,7 +841,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); let contract_id = QualifiedContractIdentifier::new( @@ -717,7 +868,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); let contract_id = QualifiedContractIdentifier::new( @@ -738,7 +895,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { true diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 1a24d3c93f..3ee513c24c 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -529,11 +529,12 @@ fn should_succeed_mining_valid_txs() { let consensus_hash = chain_tip.metadata.consensus_hash; let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); match round { 1 => { // On round 1, publish the KV contract - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash, PUBLISH_CONTRACT.to_owned(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash, PUBLISH_CONTRACT.to_owned(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -542,7 +543,7 @@ fn should_succeed_mining_valid_txs() { // On round 2, publish a "get:foo" transaction // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 1 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store get-value -e \"foo\" let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a0100b7ff8b6c20c427b4f4f09c1ad7e50027e2b076b2ddc0ab55e64ef5ea3771dd4763a79bc5a2b1a79b72ce03dd146ccf24b84942d675a815819a8b85aa8065dfaa030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -551,7 +552,7 @@ fn should_succeed_mining_valid_txs() { // On round 3, publish a "set:foo=bar" transaction // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 2 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store set-value -e \"foo\" -e \"bar\" let set_foo_bar = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000002000000000000000a010142a01caf6a32b367664869182f0ebc174122a5a980937ba259d44cc3ebd280e769a53dd3913c8006ead680a6e1c98099fcd509ce94b0a4e90d9f4603b101922d030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265097365742d76616c7565000000020d00000003666f6f0d00000003626172"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -560,7 +561,7 @@ fn should_succeed_mining_valid_txs() { // On round 4, publish a "get:foo" transaction // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 3 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store get-value -e \"foo\" let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000003000000000000000a010046c2c1c345231443fef9a1f64fccfef3e1deacc342b2ab5f97612bb3742aa799038b20aea456789aca6b883e52f84a31adfee0bc2079b740464877af8f2f87d2030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -569,7 +570,7 @@ fn should_succeed_mining_valid_txs() { // On round 5, publish a stacks transaction // ./blockstack-cli --testnet token-transfer b1cf9cee5083f421c84d7cb53be5edf2801c3c78d63d53917aee0bdc8bd160ee01 10 0 ST195Q2HPXY576N4CT2A0R94D7DRYSX54A5X3YZTH 1000 let transfer_1000_stx = "80800000000400b71a091b4b8b7661a661c620966ab6573bc2dcd30000000000000000000000000000000a0000393810832bacd44cfc4024980876135de6b95429bdb610d5ce96a92c9ee9bfd81ec77ea0f1748c8515fc9a1589e51d8b92bf028e3e84ade1249682c05271d5b803020000000000051a525b8a36ef8a73548cd0940c248d3b71ecf4a45100000000000003e800000000000000000000000000000000000000000000000000000000000000000000"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(transfer_1000_stx).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(transfer_1000_stx).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -812,13 +813,14 @@ fn should_succeed_handling_malformed_and_valid_txs() { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); match round { 1 => { // On round 1, publish the KV contract let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let publish_contract = make_contract_publish(&contract_sk, 0, 10, "store", STORE_CONTRACT); - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,publish_contract, + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,publish_contract, &ExecutionCost::max_value(), &StacksEpochId::Epoch20, ).unwrap(); @@ -828,7 +830,7 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Will not be mined // ./blockstack-cli contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 1 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store get-value -e \"foo\" let get_foo = "0000000001040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a0101ef2b00e7e55ee5cb7684d5313c7c49680c97e60cb29f0166798e6ffabd984a030cf0a7b919bcf5fa052efd5d9efd96b927213cb3af1cfb8d9c5a0be0fccda64d030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20, ).unwrap(); @@ -838,7 +840,7 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Will not be mined // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 1 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store set-value -e \"foo\" -e \"bar\" let set_foo_bar = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a010093f733efcebe2b239bb22e2e1ed25612547403af66b29282ed1f6fdfbbbf8f7f6ef107256d07947cbb72e165d723af99c447d6e25e7fbb6a92fd9a51c5ef7ee9030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265097365742d76616c7565000000020d00000003666f6f0d00000003626172"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20, ).unwrap(); @@ -847,7 +849,7 @@ fn should_succeed_handling_malformed_and_valid_txs() { // On round 4, publish a "get:foo" transaction // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 1 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store get-value -e \"foo\" let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a0100b7ff8b6c20c427b4f4f09c1ad7e50027e2b076b2ddc0ab55e64ef5ea3771dd4763a79bc5a2b1a79b72ce03dd146ccf24b84942d675a815819a8b85aa8065dfaa030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20, ).unwrap(); From 2cd3dd093a7bbf14eafea37743a7a2a7999939cd Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 20 Mar 2023 12:11:04 -0500 Subject: [PATCH 031/158] test: add regression test for mempool unlock handling #3623 --- .../stacks/tests/block_construction.rs | 235 ++++++++++++++++++ 1 file changed, 235 insertions(+) diff --git a/src/chainstate/stacks/tests/block_construction.rs b/src/chainstate/stacks/tests/block_construction.rs index 5ba91c8046..e85f2cdfdc 100644 --- a/src/chainstate/stacks/tests/block_construction.rs +++ b/src/chainstate/stacks/tests/block_construction.rs @@ -28,6 +28,7 @@ use std::fs; use std::io; use std::path::{Path, PathBuf}; +use clarity::vm::database::ClarityDatabase; use rand::seq::SliceRandom; use rand::thread_rng; use rand::Rng; @@ -4238,6 +4239,240 @@ fn test_is_tx_problematic() { } } +#[test] +fn mempool_incorporate_pox_unlocks() { + let mut initial_balances = vec![]; + let total_balance = 10_000_000_000; + let pk = StacksPrivateKey::new(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&pk)], + ) + .unwrap(); + initial_balances.push((addr.to_account_principal(), total_balance)); + let principal = PrincipalData::from(addr.clone()); + + let mut peer_config = TestPeerConfig::new(function_name!(), 2020, 2021); + peer_config.initial_balances = initial_balances; + peer_config.epochs = Some(vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 1, + end_height: 36, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 36, + end_height: i64::MAX as u64, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]); + peer_config.burnchain.pox_constants.v1_unlock_height = + peer_config.epochs.as_ref().unwrap()[1].end_height as u32 + 1; + let pox_constants = peer_config.burnchain.pox_constants.clone(); + + let mut peer = TestPeer::new(peer_config); + + let chainstate_path = peer.chainstate_path.clone(); + + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + }; + + let first_block_height = peer.sortdb.as_ref().unwrap().first_block_height; + let first_pox_cycle = pox_constants + .block_height_to_reward_cycle(first_block_height, first_stacks_block_height) + .unwrap(); + let active_pox_cycle_start = + pox_constants.reward_cycle_to_block_height(first_block_height, first_pox_cycle + 1); + let lockup_end = pox_constants.v1_unlock_height as u64; + + // test for two PoX cycles + let num_blocks = 3 + lockup_end - first_stacks_block_height; + info!( + "Starting test"; + "num_blocks" => num_blocks, + "first_stacks_block_height" => first_stacks_block_height, + "active_pox_cycle_start" => active_pox_cycle_start, + "active_pox_cycle_end" => lockup_end, + "first_block_height" => first_block_height, + ); + + let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; + let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); + + let mut last_block = None; + for tenure_id in 0..num_blocks { + // send transactions to the mempool + let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + .unwrap(); + + let (burn_ops, stacks_block, microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_height = parent_tip.burn_header_height; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let coinbase_tx = make_coinbase(miner, tenure_id as usize); + + let mut mempool = + MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let mut expected_txids = vec![]; + expected_txids.push(coinbase_tx.txid()); + + // this will be the height of the block that includes this new tenure + let my_height = first_stacks_block_height + 1 + tenure_id; + + let available_balance = chainstate.with_read_only_clarity_tx(&sortdb.index_conn(), &parent_tip.index_block_hash(), |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let burn_block_height = db.get_current_burnchain_block_height() as u64; + let v1_unlock_height = db.get_v1_unlock_height(); + let balance = db.get_account_stx_balance(&principal); + info!("Checking balance"; "v1_unlock_height" => v1_unlock_height, "burn_block_height" => burn_block_height); + balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height) + }) + }).unwrap(); + + if tenure_id <= 1 { + assert_eq!(available_balance, total_balance as u128, "Failed at tenure_id={}", tenure_id); + } else if my_height <= lockup_end + 1 { + assert_eq!(available_balance, 0, "Failed at tenure_id={}", tenure_id); + } else if my_height == lockup_end + 2 { + assert_eq!(available_balance, total_balance as u128 - 10_000, "Failed at tenure_id={}", tenure_id); + } else { + assert_eq!(available_balance, 0, "Failed at tenure_id={}", tenure_id); + } + + if tenure_id == 1 { + let stack_stx = make_user_contract_call( + &pk, + 0, + 10_000, + &StacksAddress::burn_address(false), + "pox", + "stack-stx", + vec![ + Value::UInt(total_balance as u128 - 10_000), + Value::Tuple( + TupleData::from_data(vec![ + ("version".into(), Value::buff_from(vec![0x00]).unwrap()), + ("hashbytes".into(), Value::buff_from(vec![0; 20]).unwrap()), + ]).unwrap(), + ), + Value::UInt(my_height as u128), + Value::UInt(10) + ], + ); + mempool + .submit( + chainstate, + sortdb, + &parent_consensus_hash, + &parent_header_hash, + &stack_stx, + None, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch2_05, + ) + .unwrap(); + expected_txids.push(stack_stx.txid()); + } else if my_height == lockup_end + 2 { + let stx_transfer = make_user_stacks_transfer( + &pk, + 1, + 10_000, + &StacksAddress::burn_address(false).into(), + total_balance - 10_000 - 10_000, + ); + mempool + .submit( + chainstate, + sortdb, + &parent_consensus_hash, + &parent_header_hash, + &stx_transfer, + None, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch2_05, + ) + .unwrap(); + expected_txids.push(stx_transfer.txid()); + } + + let anchored_block = StacksBlockBuilder::build_anchored_block( + chainstate, + &sortdb.index_conn(), + &mut mempool, + &parent_tip, + tip.total_burn, + vrf_proof, + Hash160([tenure_id as u8; 20]), + &coinbase_tx, + BlockBuilderSettings::limited(), + None, + ) + .unwrap(); + + // make sure the right txs get included + let txids : Vec<_> = anchored_block.0.txs.iter().map(|tx| tx.txid()).collect(); + assert_eq!(txids, expected_txids); + + (anchored_block.0, vec![]) + }, + ); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + last_block = Some(StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + )); + } +} + #[test] /// Test the situation in which the nonce order of transactions from a user. That is, /// nonce 1 has a higher fee than nonce 0. From aa5f1674229eef4e6824913f2c23a73aa9cba4ef Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 20 Mar 2023 12:18:50 -0500 Subject: [PATCH 032/158] chore: add changelog entry --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 325b6ed7a2..872497c801 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,18 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.1.0.0.2] + +This software update is a hotfix to resolve improper unlock handling +in mempool admission. This release's chainstate directory is +compatible with chainstate directories from 2.1.0.0.2. + +### Fixed + +- Fix mempool admission logic's improper handling of PoX unlocks. This would + cause users to get spurious `NotEnoughFunds` rejections when trying to submit + their transactions (#3623) + ## [2.1.0.0.1] ### Fixed From 4b7880dd11fe85cecaa9985ae7cbba751cd0594c Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 21 Mar 2023 15:49:45 -0500 Subject: [PATCH 033/158] docs: update pr review docs with my comments --- DEVINFO.md | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/DEVINFO.md b/DEVINFO.md index 3ba00be410..0b22ea6941 100644 --- a/DEVINFO.md +++ b/DEVINFO.md @@ -1,8 +1,10 @@ # How to Create and Review PRs +This document describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. + ## Overview -Blockchain software development requires a much higher degree of rigor than most other kinds of software. This is because with blockchains, **there is no roll-back** from a bad deployment. There is essentially zero room for consensus bugs. If you ship a consensus bug, that bug could not only have catastrophic consequences for users (i.e. they lose all their money), but also be intractable to fix, mitigate, or remove. This is because unlike nearly every other kind of networked software, **the state of the blockchain is what the users' computers say it is.** If you want to make changes, you _must_ get _user_ buy-in, and this is necessarily time-consuming and not at all guaranteed to succeed. +Blockchain software development requires a much higher degree of rigor than most other kinds of software. This is because with blockchains, **there is no roll-back** from a bad deployment. Therefore, making changes to the codebase is necessarily a review-intensive process. No one wants bugs, but **no one can afford consensus bugs**. This page describes how to make and review _non-consensus_ changes. The process for consensus changes includes not only the entirety of this document, but also the [SIP process](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md). @@ -16,7 +18,9 @@ This document is formatted like a checklist. Each paragraph is one goal or acti The overall task of a reviewer is to create an **acceptance plan** for the submitter. This is simply the list of things that the submitter _must_ do in order for the PR to be merged. The acceptance plan should be coherent, cohesive, succinct, and complete enough that the reviewer will understand exactly what they need to do to make the PR worthy of merging, without further reviews. The _lack of ambiguity_ is the most important trait of an acceptance plan. -Reviewers should **strive to complete the review in one round**. The reviewer should provide enough detail to the submitter that the submitter can make all of the requested changes without further supervision. Whenever possible, the reviewer should provide all of these details publicly as comments, so that _other_ reviewers can vet them as well. If a reviewer _cannot_ complete the review in one round due to its size and complexity, then the reviewer may request that the PR be simplified or broken into multiple PRs. +Reviewers should **complete the review in one round**. The reviewer should provide enough detail to the submitter that the submitter can make all of the requested changes without further supervision. Whenever possible, the reviewer should provide all of these details publicly as comments, so that _other_ reviewers can vet them as well. If a reviewer _cannot_ complete the review in one round due to its size and complexity, then the reviewer may request that the PR be simplified or broken into multiple PRs. + +Reviewers should make use of Github's "pending comments" feature. This ensures that the review is "atomic": when the reviewer submits the review, all the comments are published at once. Reviewers should aim to **perform a reviewer in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. @@ -24,11 +28,17 @@ Code reviews should be timely. A PR review should begin no more than **2 busine Reviewers must, above all else, **ensure that submitters follow the PR checklist** below. +**As a reviewer, if you do not understand the PR's code or the potential consequences of the code, it is the submitter's responsibility to simplify the code, provide better documentation, or withdraw the PR.** + ## Submitter Expectations Everyone is busy all the time with a host of different tasks. Consequently, a PR's size and scope should be constrained so that **a review can be written for it no more than 2 hours.** This time block starts when the reviewer opens the patch, and ends when the reviewer hits the "submit review" button. If it takes more than 2 hours, then the PR should be broken into multiple PRs unless the reviewers agree to spend more time on it. A PR can be rejected if the reviewers believe they will need longer than this. -The size and scale of a PR depend on the reviewers' abilities to process the change. Different reviewers and submitters have different levels of familiarity with the codebase. Moreover, everyone has a different schedule -- sometimes, some people are more busy than others. A successful PR submitter **takes the reviewers' familiarity and availability into account** when crafting the PR, even going to far as to ask in advance if a particular person could be available for review. +The size and scale of a PR depend on the reviewers' abilities to process the change. Different reviewers and submitters have different levels of familiarity with the codebase. Moreover, everyone has a different schedule -- sometimes, some people are more busy than others. + +A successful PR submitter **takes the reviewers' familiarity and availability into account** when crafting the PR, even going so far as to ask in advance if a particular person could be available for review. + +Providing detailed answers to reviewer questions is often necessary as a submitter. In order to make this information accessible even after a PR has merged, submitters should strive to incorporate any clarifications into code comments. ## Submission Checklist @@ -40,10 +50,13 @@ A PR submission's text should **answer the following questions** for reviewers: * What do reviewers need to be familiar with in order to provide useful feedback? * What issue(s) are addressed by this PR? * What are some hints to understanding some of the more intricate or clever parts of the PR? +* Does this PR change any database schemas? Does a node need to re-sync from genesis when this PR is applied? In addition, the PR submission should **answer the prompts of the Github template** we use for PRs. -The code itself should adhere to the following guidelines, which both submitters and reviewers should check: +The code itself should adhere to our coding guidelines, which both submitters and reviewers should check. + +## Coding Guidelines ### Documentation From 18b3b860bc0a8fa72e37118cc402d7a86e15afa7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 22 Mar 2023 08:48:27 -0500 Subject: [PATCH 034/158] docs: merge CONTRIBUTORS.md and DEVINFO.md into docs/CONTRIBUTING.md --- CONTRIBUTORS.md | 359 ----------------------------- DEVINFO.md => docs/CONTRIBUTING.md | 358 ++++++++++++++++++++++++++-- docs/contributing.md | 31 --- 3 files changed, 342 insertions(+), 406 deletions(-) delete mode 100644 CONTRIBUTORS.md rename DEVINFO.md => docs/CONTRIBUTING.md (52%) delete mode 100644 docs/contributing.md diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md deleted file mode 100644 index 2d24c23a36..0000000000 --- a/CONTRIBUTORS.md +++ /dev/null @@ -1,359 +0,0 @@ -# Contributing to Blockstack Core - -Blockstack Core is open-source software written in Rust. Contributions -should adhere to the following best practices. - -You can find information on joining online community forums (Discord, mailing list etc.) in the [README](README.md). - -#### Table Of Contents - -[Code of Conduct](#code-of-conduct) - -[How Can I Contribute?](#how-can-i-contribute) -* [Development Workflow](#development-workflow) -* [Contributing Conventions](#contributing-conventions) - -[Style](#style) -* [Git Commit Messages](#git-commit-messages) -* [Rust Styleguide](#rust-styleguide) -* [Comments](#comments) - -[License Agreement](#licensing-and-contributor-license-agreement) - -# Code of Conduct - -This project and everyone participating in it is governed by this [Code of Conduct](CODE_OF_CONDUCT.md). - -# How Can I Contribute? -## Development Workflow - -- For typical development, branch off of the `develop` branch. -- For consensus breaking changes, branch off of the `next` branch. -- For hotfixes, branch off of `master`. - -### Documentation Updates - -- Any major changes should be added to the [CHANGELOG](CHANGELOG.md). -- Mention any required documentation changes in the description of your pull request. -- If adding an RPC endpoint, add an entry for the new endpoint to the OpenAPI spec `./docs/rpc/openapi.yaml`. -- If your code adds or modifies any major features (struct, trait, test, module, function, etc.), each should be documented according to our [style rules](#comments). - - To generate HTML documentation for the library, run `cargo doc --no-deps --open`. - - It's possible to check the percentage of code coverage by (a) switching to the nightly version of rust (can run `rustup default nightly`, and also might need to edit `rust-toolchain` file to say "nightly" instead of "stable"), and (b) running `RUSTDOCFLAGS='-Z unstable-options --show-coverage' cargo doc`. - -### Each file should include relevant unit tests - -Each Rust file should contain a `mod test {}` definition, in which unit tests -should be supplied for the file's methods. Unit tests should cover a maximal -amount of code paths. - -## Contributing Conventions - -### Simplicity of implementation - -The most important consideration when accepting or rejecting a contribution is -the simplicity (i.e. ease of understanding) of its implementation. -Contributions that are "clever" or introduce functionality beyond the scope of -the immediate problem they are meant to solve will be rejected. - -#### Type simplicity - -Simplicity of implementation includes simplicity of types. Type parameters -and associated types should only be used if there are at -least two possible implementations of those types. - -Lifetime parameters should only be introduced if the compiler cannot deduce them -on its own. - -### Builds with a stable Rust compiler -We use a recent, stable Rust compiler. Contributions should _not_ -require nightly Rust features to build and run. - -### Use built-in logging facilities - -Blockstack Core implements logging macros in `util::log`. If your code needs to -output data, it should use these macros _exclusively_ for doing so. The only -exception is code that is explicitly user-facing, such as help documentation. - -### Minimal dependencies - -Adding new package dependencies is very much discouraged. Exceptions will be -granted on a case-by-case basis, and only if deemed absolutely necessary. - -### Minimal global macros - -Adding new global macros is discouraged. Exceptions will only be given if -absolutely necessary. - -### Minimal compiler warnings - -Contributions should not trigger compiler warnings if possible, and should not -mask compiler warnings with macros. Common sources of compiler warnings that -will not be accepted include, but are not limited to: - -* unnecessary imports -* unused code -* variable naming conventions -* unhandled return types - -### Minimal `unsafe` code - -Contributions should not contain `unsafe` blocks if at all possible. - -### Error definitions - -Each module should include an `Error` enumeration in its `mod.rs` that encodes -errors specific to the module. All error code paths in the module should return -an `Err` type with one of the module's errors. - -# Style -## Git Commit Messages -Aim to use descriptive git commit messages. We try to follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/). -The general format is as follows: -``` -[optional scope]: - -[optional body] -[optional footer(s)] -``` -Common types include build, ci, docs, fix, feat, test, refactor, etc. - -## Rust styleguide - -### Code block consistency - -Surrounding code blocks with `{` and `}` is encouraged, even when the enclosed -block is a single statement. Blocks in the same lexical scope must use -consistent conventions. For example, consider the following: - -``` -match foo { - 1..2 => { - // this is a single statement, but it is surrounded - // with { and } because the other blocks in the match - // statement need them. - Ok(true) - }, - 3..4 => { - error!("Bad value for foo"); - Err(Error::BadFoo) - }, - _ => { - // similarly, this block uses { } - Ok(true) - } -} - -// conversely, all of the arms of this match statement -// have one-statement blocks, so { and } can be elided. -match bar { - 1..2 => Some("abc"), - 3..4 => Some("def"), - _ => None -} -``` - -### Whitespace - -All contributions should use the same whitespacing as the rest of the project. -Moreover, Pull requests where a large number of changes only deal with whitespace will be -rejected. - -## Comments -Comments are very important for the readability and correctness of the codebase. The purpose of comments is: - -* Allow readers to understand the roles of components and functions without having to check how they are used. -* Allow readers to check the correctness of the code against the comments. -* Allow readers to follow tests. - -In the limit, if there are no comments, the problems that arise are: - -* Understanding one part of the code requires understanding *many* parts of the code. This is because the reader is forced to learn the meanings of constructs inductively through their use. Learning how one construct is used requires understanding its neighbors, and then their neighbors, and so on, recursively. Instead, with a good comment, the reader can understand the role of a construct with `O(1)` work by reading the comment. -* The user cannot be certain if there is a bug in the code, because there is no distinction between the contract of a function, and its definition. -* The user cannot be sure if a test is correct, because the logic of the test is not specified, and the functions do not have contracts. - -### Comment Formatting - -Comments are to be formatted in typical `rust` style, specifically: - -- Use markdown to format comments. - -- Use the triple forward slash "///" for modules, structs, enums, traits and functions. Use double forward slash "//" for comments on individual lines of code. - -- Start with a high-level description of the function, adding more sentences with details if necessary. - -- When documenting panics, errors, or other conceptual sections, introduce a Markdown section with a single `#`, e.g.: - - - ``` - # Errors - * ContractTooLargeError: Thrown when `contract` is larger than `MAX_CONTRACT_SIZE`. - ``` - -### Content of Comments -The following kinds of things should have comments. - -#### Components -Comments for a component (`struct`, `trait`, or `enum`) should explain what the overall -purpose of that component is. This is usually a concept, and not a formal contract. Include anything that is not obvious about this component. - -**Example:** - -```rust -/// The `ReadOnlyChecker` analyzes a contract to determine whether -/// there are any violations of read-only declarations. By a "violation" -/// we mean a function that is marked as "read only" but which tries -/// to modify chainstate. -pub struct ReadOnlyChecker<'a, 'b> { -``` - -This comment is considered positive because it explains the concept behind the class at a glance, so that the reader has some idea about what the methods will achieve, without reading each method declaration and comment. It also defines some terms that can be used in the comments on the method names. - -#### Functions - -The comments on a function should explain what the function does, without having to read it. Wherever practical, it should specify the contract of a function, such that a bug in the logic could be discovered by a discrepancy between contract and implementation, or such that a test could be written with only access to the function comment. - -Without being unnecessarily verbose, explain how the output is calculated -from the inputs. Explain the side effects. Explain any restrictions on the inputs. Explain failure -conditions, including when the function will panic, return an error -or return an empty value. - -**Example:** - -```rust -/// A contract that does not violate its read-only declarations is called -/// *read-only correct*. -impl<'a, 'b> ReadOnlyChecker<'a, 'b> { - /// Checks each top-level expression in `contract_analysis.expressions` - /// for read-only correctness. - /// - /// Returns successfully iff this function is read-only correct. - /// - /// # Errors - /// - /// - Returns CheckErrors::WriteAttemptedInReadOnly if there is a read-only - /// violation, i.e. if some function marked read-only attempts to modify - /// the chainstate. - pub fn run(&mut self, contract_analysis: &ContractAnalysis) -> CheckResult<()> -``` - -This comment is considered positive because it explains the contract of the function in pseudo-code. Someone who understands the constructs mentioned could, e.g., write a test for this method from this description. - -#### Comments on Implementations of Virtual Methods - -Note that, if a function implements a virtual function on an interface, the comments should not -repeat what was specified on the interface declaration. The comment should only add information specific to that implementation. - -### Data Members -Each data member in a struct should have a comment describing what that member -is, and what it is used for. Such comments are usually brief but should -clear up any ambiguity that might result from having only the variable -name and type. - -**Example:** - -```rust -pub struct ReadOnlyChecker<'a, 'b> { - /// Mapping from function name to a boolean indicating whether - /// the function with that name is read-only. - /// This map contains all functions in the contract analyzed. - defined_functions: HashMap, -``` - -This comment is considered positive because it clarifies users might have about the content and role of this member. E.g., it explains that the `bool` indicates whether the function is *read-only*, whereas this cannot be gotten from the signature alone. - -#### Tests - -Each test should have enough comments to help an unfamiliar reader understand: - -1. what is conceptually being tested -1. why a given answer is expected - -Sometimes this can be obvious without much comments, perhaps from the context, -or because the test is very simple. Often though, comments are necessary. - -**Example:** - -```rust -#[test] -#[ignore] -fn transaction_validation_integration_test() { - /// The purpose of this test is to check if the mempool admission checks - /// for the post tx endpoint are working as expected wrt the optional - /// `mempool_admission_check` query parameter. - /// - /// In this test, we are manually creating a microblock as well as - /// reloading the unconfirmed state of the chainstate, instead of relying - /// on `next_block_and_wait` to generate microblocks. We do this because - /// the unconfirmed state is not automatically being initialized - /// on the node, so attempting to validate any transactions against the - /// expected unconfirmed state fails. -``` - -This comment is considered positive because it explains the purpose of the test (checking the case of an optional parameter), it also guides the reader to understand the low-level details about why a microblock is created manually. - -### How Much to Comment - -Contributors should strike a balance between commenting "too much" and commenting "too little". Commenting "too much" primarily includes commenting things that are clear from the context. Commenting "too little" primarily includes writing no comments at all, or writing comments that leave important questions unresolved. - -Human judgment and creativity must be used to create good comments, which convey important information with small amounts of text. There is no single rule which can determine what a good comment is. Longer comments are *not* always better, since needlessly long comments have a cost: they require the reader to read more, take up whitespace, and take longer to write and review. - -#### Don't Restate the Function Names - -The contracts of functions should be implemented precisely enough that tests could be written looking only at the declaration and the comments (and without looking at the definition!). However: - -* **the author should assume that the reader has already read and understood the function name, variable names, type names, etc.** -* **the author should only state information that is new** - -So, if a function and its variables have very descriptive names, then there may be nothing to add in the comments at all! - -**Bad Example** - -``` -/// Appends a transaction to a block. -fn append_transaction_to_block(transaction:Transaction, &mut Block) -> Result<()> -``` - -This is considered bad because the function name already says "append transaction to block", so it doesn't add anything to restate it in the comments. However, *do* add anything that is not redundant, such as elaborating what it means to "append" (if there is more to say), or what conditions will lead to an error. - -**Good Example** - -``` -/// # Errors -/// -/// - BlockTooBigError: Is returned if adding `transaction` to `block` results -/// in a block size bigger than MAX_BLOCK_SIZE. -fn append_transaction_to_block(transaction:Transaction, block:&mut Block) -> Result<()> -``` - -This is considered good because the reader builds on the context created by the function and variable names. Rather than restating them, the function just adds elements of the contract that are not implicit in the declaration. - -#### Do's and Dont's - -*Don't* over-comment by documenting things that are clear from the context. E.g.: - -- Don't document the types of inputs or outputs, since these are parts of the type signature in `rust`. -- Don't necessarily document standard "getters" and "setters", like `get_clarity_version()`, unless there is unexpected information to add with the comment. -- Don't explain that a specific test does type-checking, if it is in a file that is dedicated to type-checking. - -*Do* document things that are not clear, e.g.: - -- For a function called `process_block`, explain what it means to "process" a block. -- For a function called `process_block`, make clear whether we mean anchored blocks, microblocks, or both. -- For a function called `run`, explain the steps involved in "running". -- For a function that takes arguments `peer1` and `peer2`, explain the difference between the two. -- For a function that takes an argument `height`, either explain in the comment what this is the *height of*. Alternatively, expand the variable name to remove the ambiguity. -- For a test, document what it is meant to test, and why the expected answers are, in fact, expected. - -### Changing Code Instead of Comments - -Keep in mind that better variable names can reduce the need for comments, e.g.: - -* `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height -* `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks -* `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment - -# Licensing and contributor license agreement - -Blockstack Core is released under the terms of the GPL version 3. Contributions -that are not licensed under compatible terms will be rejected. Moreover, -contributions will not be accepted unless _all_ authors accept the project's -contributor license agreement. diff --git a/DEVINFO.md b/docs/CONTRIBUTING.md similarity index 52% rename from DEVINFO.md rename to docs/CONTRIBUTING.md index 0b22ea6941..2d1c4f504e 100644 --- a/DEVINFO.md +++ b/docs/CONTRIBUTING.md @@ -1,6 +1,58 @@ -# How to Create and Review PRs +# Contributing to the Stacks Blockchain -This document describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. +The Stacks blockchain is open-source software written in Rust. Contributions +should adhere to the following best practices. + +Blockchain software development requires a much higher degree of rigor +than most other kinds of software. This is because with blockchains, +**there is no roll-back** from a bad deployment. There is essentially +zero room for consensus bugs. If you ship a consensus bug, that bug +could not only have catastrophic consequences for users (i.e. they +lose all their money), but also be intractable to fix, mitigate, or +remove. This is because unlike nearly every other kind of networked +software, **the state of the blockchain is what the users' computers +say it is.** If you want to make changes, you _must_ get _user_ +buy-in, and this is necessarily time-consuming and not at all +guaranteed to succeed. + +You can find information on joining online community forums (Discord, mailing list etc.) in the [README](README.md). + +# Code of Conduct + +This project and everyone participating in it is governed by this [Code of Conduct](CODE_OF_CONDUCT.md). + +# How Can I Contribute? + +## Development Workflow + +- For typical development, branch off of the `develop` branch. +- For consensus breaking changes, branch off of the `next` branch. +- For hotfixes, branch off of `master`. + +### Documentation Updates + +- Any major changes should be added to the [CHANGELOG](CHANGELOG.md). +- Mention any required documentation changes in the description of your pull request. +- If adding an RPC endpoint, add an entry for the new endpoint to the + OpenAPI spec `./docs/rpc/openapi.yaml`. +- If your code adds or modifies any major features (struct, trait, + test, module, function, etc.), each should be documented according + to our [coding guidelines](#Coding-Guidelines). + +## Git Commit Messages +Aim to use descriptive git commit messages. We try to follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/). +The general format is as follows: +``` +[optional scope]: + +[optional body] +[optional footer(s)] +``` +Common types include build, ci, docs, fix, feat, test, refactor, etc. + +# Creating and Reviewing PRs + +This section describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. ## Overview @@ -54,11 +106,53 @@ A PR submission's text should **answer the following questions** for reviewers: In addition, the PR submission should **answer the prompts of the Github template** we use for PRs. -The code itself should adhere to our coding guidelines, which both submitters and reviewers should check. +The code itself should adhere to our coding guidelines and conventions, which both submitters and reviewers should check. + +# Coding Conventions + +### Simplicity of implementation + +The most important consideration when accepting or rejecting a contribution is +the simplicity (i.e. ease of understanding) of its implementation. +Contributions that are "clever" or introduce functionality beyond the scope of +the immediate problem they are meant to solve will be rejected. + +#### Type simplicity + +Simplicity of implementation includes simplicity of types. Type parameters +and associated types should only be used if there are at +least two possible implementations of those types. + +Lifetime parameters should only be introduced if the compiler cannot deduce them +on its own. + +### Builds with a stable Rust compiler + +We use a recent, stable Rust compiler. Contributions should _not_ +require nightly Rust features to build and run. + +### Minimal dependencies + +Adding new package dependencies is very much discouraged. Exceptions will be +granted on a case-by-case basis, and only if deemed absolutely necessary. + +### Minimal global macros + +Adding new global macros is discouraged. Exceptions will only be given if +absolutely necessary. -## Coding Guidelines +### No compiler warnings -### Documentation +Contributions should not trigger compiler warnings if possible, and should not +mask compiler warnings with macros. + +### Minimal `unsafe` code + +Contributions should not contain `unsafe` blocks if at all possible. + +# Coding Guidelines + +## Documentation * Each file must have a **copyright statement**. * Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole. @@ -70,7 +164,7 @@ Within the source files, the following **code documentation** standards are expe * Each _non-trivial_ private function should likewise have a Rustdoc comment block. Trivial ones that are self-explanatory, like getters and setters, do not need documentation. If you are unsure if your function needs a docstring, err on the side of documenting it. * Each struct and enum member must have a Rustdoc comment string indicating what it does, and how it is used. This can be as little as a one-liner, as long as the relevant information is communicated. -### Factoring +## Factoring * **Public or exported struct, enum, and trait definitions go into the `mod.rs` file**. Private structs, enums, and traits can go anywhere. @@ -80,13 +174,13 @@ Within the source files, the following **code documentation** standards are expe * To the greatest extent possible, **business logic and I/O should be separated**. A common pattern used in the codebase is to place the business logic into an "inner" function that does not do I/O, and handle I/O reads and writes in an "outer" function. The "outer" function only does the needful I/O and passes the data into the "inner" function. The "inner" function is often private, whereas the "outer" function is often public. -### Refactoring +## Refactoring * **Any PR that does a large-scale refactoring must be in its own PR**. This includes PRs that touch multiple subsystems. Refactoring often adds line noise that obscures the new functional changes that the PR proposes. Small-scale refactorings are permitted to ship with functional changes. * Refactoring PRs can generally be bigger, because they are easier to review. However, **large refactorings that could impact the functional behavior of the system should be discussed first** before carried out. This is because it is imperative that they do not stay open for very long (to keep the submitter's maintenance burden low), but nevertheless reviewing them must still take at most 2 hours. Discussing them first front-loads part of the review process. -### Databases +## Databases * If at all possible, **the database schema should be preserved**. Exceptions can be made on a case-by-case basis. The reason for this is that it's a big ask for people to re-sync nodes from genesis when they upgrade to a new point release. @@ -98,7 +192,7 @@ Within the source files, the following **code documentation** standards are expe * If the database schema changes and no migration can be feasibly done, then the submitter **must spin up a node from genesis to verify that it works** _before_ submitting the PR. This genesis spin-up will be tested again before the next node release is made. -### Data Input +## Data Input * **Data from the network, from Bitcoin, and from the config file is untrusted.** Code that ingests such data _cannot assume anything_ about its structure, and _must_ handle any possible byte sequence that can be submitted to the Stacks node. @@ -112,23 +206,23 @@ Within the source files, the following **code documentation** standards are expe * **Untrusted data ingestion must not panic.** Every piece of code that ingests untrusted data must gracefully handle errors. Panicking failures are forbidden for such data. Panics are only allowed if the ingested data was previously written by the node (and thus trusted). -### Non-consensus Changes to Blocks, Microblocks, Transactions, and Clarity +## Non-consensus Changes to Blocks, Microblocks, Transactions, and Clarity Any changes to code that alters how a block, microblock, or transaction is processed by the node should be **treated as a breaking change until proven otherwise**. This includes changes to the Clarity VM. The reviewer _must_ flag any such changes in the PR, and the submitter _must_ convince _all_ reviewers that they will _not_ break consensus. Changes that touch any of these four code paths must be treated with the utmost care. If _any_ core developer suspects that a given PR would break consensus, then they _must_ act to prevent the PR from merging. -### Changes to the Peer Network +## Changes to the Peer Network Any changes to the peer networking code **must be run in production before the PR can be merged.** The submitter should set up a testable node or set of nodes that reviewers can interact with. Changes to the peer network should be deployed incrementally and tested by multiple ecosystem entities when possible to verify that they function properly in a production setting. -### Performance Improvements +## Performance Improvements Any PRs that claim to improve performance **must ship with reproducible benchmarks** that accurately measure the improvement. This data must also be reported in the PR submission. -### Error Handling +## Error Handling * **Each subsystem must have its own `Error` type.** Error types of aggregate subsystems are encouraged to both wrap their constituent subsystems' `Error` types in their own `Error` types, as well as provide conversions from them via a `From` trait implementation. @@ -138,7 +232,7 @@ Any PRs that claim to improve performance **must ship with reproducible benchmar * If a runtime panic is desired, it **must have an appropriate error message**. -### Logging +## Logging * Log messages should be informative and context-free as possible. They are used mainly to help us identify and diagnose problems. They are _not_ used to help you verify that your code works; that's the job of a unit test. @@ -154,7 +248,7 @@ Any PRs that claim to improve performance **must ship with reproducible benchmar * Use `warn!()` or `error!()` only when there really is a problem. -### Consensus-Critical Code +## Consensus-Critical Code A **consensus-critical change** is a change that affects how the Stacks blockchain processes blocks, microblocks, or transactions, such that a node with the patch _could_ produce a different state root hash than a node without the patch. If this is even _possible_, then the PR is automatically treated as a consensus-critical change and must ship as part of a hard fork. It must also be described in a SIP. @@ -172,7 +266,7 @@ A non-exhaustive list of examples of consensus-critical changes include: * Changing the cost of a Clarity function * Adding new kinds of transactions, or enabling certain transaction data field values that were previously forbidden. -### Testing +## Testing * **Unit tests should focus on the business logic with mocked data**. To the greatest extent possible, each error path should be tested _in addition to_ the success path. A submitter should expect to spend most of their test-writing time focusing on error paths; getting the success path to work is often much easier than the error paths. @@ -185,3 +279,235 @@ A non-exhaustive list of examples of consensus-critical changes include: * **Integration tests are necessary when the PR has a consumer-visible effect**. For example, changes to the RESTful API, event stream, and mining behavior all require integration tests. * Every consensus-critical change needs an integration test to verify that the feature activates only when the hard fork activates. + +PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel +(which is the default operation of the `cargo test` command), these tests should be decorated with `#[ignore]`. + +A test should be marked `#[ignore]` if: + + 1. It does not _always_ pass `cargo test` in a vanilla environment + (i.e., it does not need to run with `--test-threads 1`). + + 2. Or, it runs for over a minute via a normal `cargo test` execution + (the `cargo test` command will warn if this is not the case). + + + +## Formatting + +This repository uses the default rustfmt formatting style. PRs will be checked against `rustfmt` and will _fail_ if not +properly formatted. + +You can check the formatting locally via: + +```bash +cargo fmt --all -- --check +``` + +You can automatically reformat your commit via: + +```bash +cargo fmt --all +``` + +## Comments +Comments are very important for the readability and correctness of the codebase. The purpose of comments is: + +* Allow readers to understand the roles of components and functions without having to check how they are used. +* Allow readers to check the correctness of the code against the comments. +* Allow readers to follow tests. + +In the limit, if there are no comments, the problems that arise are: + +* Understanding one part of the code requires understanding *many* parts of the code. This is because the reader is forced to learn the meanings of constructs inductively through their use. Learning how one construct is used requires understanding its neighbors, and then their neighbors, and so on, recursively. Instead, with a good comment, the reader can understand the role of a construct with `O(1)` work by reading the comment. +* The user cannot be certain if there is a bug in the code, because there is no distinction between the contract of a function, and its definition. +* The user cannot be sure if a test is correct, because the logic of the test is not specified, and the functions do not have contracts. + +### Comment Formatting + +Comments are to be formatted in typical `rust` style, specifically: + +- Use markdown to format comments. + +- Use the triple forward slash "///" for modules, structs, enums, traits and functions. Use double forward slash "//" for comments on individual lines of code. + +- Start with a high-level description of the function, adding more sentences with details if necessary. + +- When documenting panics, errors, or other conceptual sections, introduce a Markdown section with a single `#`, e.g.: + + ``` + # Errors + * ContractTooLargeError: Thrown when `contract` is larger than `MAX_CONTRACT_SIZE`. + ``` + +### Content of Comments + + +#### Component Comments + +Comments for a component (`struct`, `trait`, or `enum`) should explain what the overall +purpose of that component is. This is usually a concept, and not a formal contract. Include anything that is not obvious about this component. + +**Example:** + +```rust +/// The `ReadOnlyChecker` analyzes a contract to determine whether +/// there are any violations of read-only declarations. By a "violation" +/// we mean a function that is marked as "read only" but which tries +/// to modify chainstate. +pub struct ReadOnlyChecker<'a, 'b> { +``` + +This comment is considered positive because it explains the concept behind the class at a glance, so that the reader has some idea about what the methods will achieve, without reading each method declaration and comment. It also defines some terms that can be used in the comments on the method names. + +#### Function Comments + +The comments on a function should explain what the function does, without having to read it. Wherever practical, it should specify the contract of a function, such that a bug in the logic could be discovered by a discrepancy between contract and implementation, or such that a test could be written with only access to the function comment. + +Without being unnecessarily verbose, explain how the output is calculated +from the inputs. Explain the side effects. Explain any restrictions on the inputs. Explain failure +conditions, including when the function will panic, return an error +or return an empty value. + +**Example:** + +```rust +/// A contract that does not violate its read-only declarations is called +/// *read-only correct*. +impl<'a, 'b> ReadOnlyChecker<'a, 'b> { + /// Checks each top-level expression in `contract_analysis.expressions` + /// for read-only correctness. + /// + /// Returns successfully iff this function is read-only correct. + /// + /// # Errors + /// + /// - Returns CheckErrors::WriteAttemptedInReadOnly if there is a read-only + /// violation, i.e. if some function marked read-only attempts to modify + /// the chainstate. + pub fn run(&mut self, contract_analysis: &ContractAnalysis) -> CheckResult<()> +``` + +This comment is considered positive because it explains the contract of the function in pseudo-code. Someone who understands the constructs mentioned could, e.g., write a test for this method from this description. + +#### Comments on Implementations of Virtual Methods + +Note that, if a function implements a virtual function on an interface, the comments should not +repeat what was specified on the interface declaration. The comment should only add information specific to that implementation. + +#### Data Member Comments + +Each data member in a struct should have a comment describing what that member +is, and what it is used for. Such comments are usually brief but should +clear up any ambiguity that might result from having only the variable +name and type. + +**Example:** + +```rust +pub struct ReadOnlyChecker<'a, 'b> { + /// Mapping from function name to a boolean indicating whether + /// the function with that name is read-only. + /// This map contains all functions in the contract analyzed. + defined_functions: HashMap, +``` + +This comment is considered positive because it clarifies users might have about the content and role of this member. E.g., it explains that the `bool` indicates whether the function is *read-only*, whereas this cannot be gotten from the signature alone. + +#### Test Comments + +Each test should have enough comments to help an unfamiliar reader understand: + +1. what is conceptually being tested +1. why a given answer is expected + +Sometimes this can be obvious without much comments, perhaps from the context, +or because the test is very simple. Often though, comments are necessary. + +**Example:** + +```rust +#[test] +#[ignore] +fn transaction_validation_integration_test() { + /// The purpose of this test is to check if the mempool admission checks + /// for the post tx endpoint are working as expected wrt the optional + /// `mempool_admission_check` query parameter. + /// + /// In this test, we are manually creating a microblock as well as + /// reloading the unconfirmed state of the chainstate, instead of relying + /// on `next_block_and_wait` to generate microblocks. We do this because + /// the unconfirmed state is not automatically being initialized + /// on the node, so attempting to validate any transactions against the + /// expected unconfirmed state fails. +``` + +This comment is considered positive because it explains the purpose of the test (checking the case of an optional parameter), it also guides the reader to understand the low-level details about why a microblock is created manually. + +### How Much to Comment + +Contributors should strike a balance between commenting "too much" and commenting "too little". Commenting "too much" primarily includes commenting things that are clear from the context. Commenting "too little" primarily includes writing no comments at all, or writing comments that leave important questions unresolved. + +Human judgment and creativity must be used to create good comments, which convey important information with small amounts of text. There is no single rule which can determine what a good comment is. Longer comments are *not* always better, since needlessly long comments have a cost: they require the reader to read more, take up whitespace, and take longer to write and review. + +### Don't Restate Names in Comments + +The contracts of functions should be implemented precisely enough that tests could be written looking only at the declaration and the comments (and without looking at the definition!). However: + +* **the author should assume that the reader has already read and understood the function name, variable names, type names, etc.** +* **the author should only state information that is new** + +So, if a function and its variables have very descriptive names, then there may be nothing to add in the comments at all! + +**Bad Example** + +``` +/// Appends a transaction to a block. +fn append_transaction_to_block(transaction:Transaction, &mut Block) -> Result<()> +``` + +This is considered bad because the function name already says "append transaction to block", so it doesn't add anything to restate it in the comments. However, *do* add anything that is not redundant, such as elaborating what it means to "append" (if there is more to say), or what conditions will lead to an error. + +**Good Example** + +``` +/// # Errors +/// +/// - BlockTooBigError: Is returned if adding `transaction` to `block` results +/// in a block size bigger than MAX_BLOCK_SIZE. +fn append_transaction_to_block(transaction:Transaction, block:&mut Block) -> Result<()> +``` + +This is considered good because the reader builds on the context created by the function and variable names. Rather than restating them, the function just adds elements of the contract that are not implicit in the declaration. + +### Do's and Dont's of Comments + +*Don't* over-comment by documenting things that are clear from the context. E.g.: + +- Don't document the types of inputs or outputs, since these are parts of the type signature in `rust`. +- Don't necessarily document standard "getters" and "setters", like `get_clarity_version()`, unless there is unexpected information to add with the comment. +- Don't explain that a specific test does type-checking, if it is in a file that is dedicated to type-checking. + +*Do* document things that are not clear, e.g.: + +- For a function called `process_block`, explain what it means to "process" a block. +- For a function called `process_block`, make clear whether we mean anchored blocks, microblocks, or both. +- For a function called `run`, explain the steps involved in "running". +- For a function that takes arguments `peer1` and `peer2`, explain the difference between the two. +- For a function that takes an argument `height`, either explain in the comment what this is the *height of*. Alternatively, expand the variable name to remove the ambiguity. +- For a test, document what it is meant to test, and why the expected answers are, in fact, expected. + +### Changing Code Instead of Comments + +Keep in mind that better variable names can reduce the need for comments, e.g.: + +* `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height +* `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks +* `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment + +# Licensing and contributor license agreement + +`stacks-blockchain` is released under the terms of the GPL version 3. Contributions +that are not licensed under compatible terms will be rejected. Moreover, +contributions will not be accepted unless _all_ authors accept the project's +contributor license agreement. diff --git a/docs/contributing.md b/docs/contributing.md deleted file mode 100644 index 40ad17c844..0000000000 --- a/docs/contributing.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing - -## Tests and Coverage - -PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel -(which is the default operation of the `cargo test` command), these tests should be decorated with `#[ignore]`. -If you add `#[ignore]` tests, you should add your branch to the filters for the `all_tests` job in our circle.yml -(or if you are working on net code or marf code, your branch should be named such that it matches the existing -filters there). - -A test should be marked `#[ignore]` if: - -1. It does not _always_ pass `cargo test` in a vanilla environment (i.e., it does not need to run with `--test-threads 1`). -2. Or, it runs for over a minute via a normal `cargo test` execution (the `cargo test` command will warn if this is not the case). - -## Formatting - -This repository uses the default rustfmt formatting style. PRs will be checked against `rustfmt` and will _fail_ if not -properly formatted. - -You can check the formatting locally via: - -```bash -cargo fmt --all -- --check -``` - -You can automatically reformat your commit via: - -```bash -cargo fmt --all -``` From 73f55526e6142d982926d9331364df11ee2b2c13 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 22 Mar 2023 10:01:35 -0500 Subject: [PATCH 035/158] docs: move docs/CONTRIBUTING.md to CONTRIBUTING.md --- docs/CONTRIBUTING.md => CONTRIBUTING.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/CONTRIBUTING.md => CONTRIBUTING.md (100%) diff --git a/docs/CONTRIBUTING.md b/CONTRIBUTING.md similarity index 100% rename from docs/CONTRIBUTING.md rename to CONTRIBUTING.md From 2b5cda5b6320f3ff94756790967ed8ed5cfd9563 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 28 Mar 2023 13:42:47 -0500 Subject: [PATCH 036/158] address PR review feedback --- CONTRIBUTING.md | 44 +++++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2d1c4f504e..8721444007 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,8 +62,6 @@ Therefore, making changes to the codebase is necessarily a review-intensive proc A good PR review sets both the submitter and reviewers up for success. It minimizes the time required by both parties to get the code into an acceptable state, without sacrificing quality or safety. Unlike most other software development practices, _safety_ is the primary concern. A PR can and will be delayed or closed if there is any concern that it will lead to unintended consensus-breaking changes. -This document describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. - This document is formatted like a checklist. Each paragraph is one goal or action item that the reviewer and/or submitter must complete. The **key take-away** from each paragraph is bolded. ## Reviewer Expectations @@ -74,9 +72,9 @@ Reviewers should **complete the review in one round**. The reviewer should prov Reviewers should make use of Github's "pending comments" feature. This ensures that the review is "atomic": when the reviewer submits the review, all the comments are published at once. -Reviewers should aim to **perform a reviewer in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. +Reviewers should aim to **perform a review in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. -Code reviews should be timely. A PR review should begin no more than **2 business days** after the PR is submitted. The `develop` and `next` branches in particular often change quickly, so letting a PR languish only creates more merge work for the submitter. If a review cannot be begun within 2 business days, then the reviewers should **tell the submitter when they can begin**. This gives the reviewer the opportunity to keep working on the PR (if needed) or even withdraw and resubmit it. +Code reviews should be timely. A PR review should begin no more than **2 business days** after the PR is submitted. This applies to each reviewer: i.e., we expect all reviewers to respond within two days. The `develop` and `next` branches in particular often change quickly, so letting a PR languish only creates more merge work for the submitter. If a review cannot be begun within 2 business days, then the reviewers should **tell the submitter when they can begin**. This gives the reviewer the opportunity to keep working on the PR (if needed) or even withdraw and resubmit it. Reviewers must, above all else, **ensure that submitters follow the PR checklist** below. @@ -155,7 +153,7 @@ Contributions should not contain `unsafe` blocks if at all possible. ## Documentation * Each file must have a **copyright statement**. -* Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole. +* Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole ([example](https://github.com/stacks-network/stacks-blockchain/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L17)). * Any new files must have some **top-of-file documentation** that describes what the contained code does, and how it fits into the overall module. Within the source files, the following **code documentation** standards are expected: @@ -166,13 +164,17 @@ Within the source files, the following **code documentation** standards are expe ## Factoring -* **Public or exported struct, enum, and trait definitions go into the `mod.rs` file**. Private structs, enums, and traits can go anywhere. - * **Each non-`mod.rs` file implements at most one subsystem**. It may include multiple struct implementations and trait implementations. The filename should succinctly identify the subsystem, and the file-level documentation must succinctly describe it and how it relates to other subsystems it interacts with. * Directories represent collections of related but distinct subsystems. -* To the greatest extent possible, **business logic and I/O should be separated**. A common pattern used in the codebase is to place the business logic into an "inner" function that does not do I/O, and handle I/O reads and writes in an "outer" function. The "outer" function only does the needful I/O and passes the data into the "inner" function. The "inner" function is often private, whereas the "outer" function is often public. +* To the greatest extent possible, **business logic and I/O should be + separated**. A common pattern used in the codebase is to place the + business logic into an "inner" function that does not do I/O, and + handle I/O reads and writes in an "outer" function. The "outer" + function only does the needful I/O and passes the data into the + "inner" function. The "inner" function is often private, whereas + the "outer" function is often public. For example, [`inner_try_mine_microblock` and `try_mine_microblock`](https://github.com/stacks-network/stacks-blockchain/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L1148-L1216). ## Refactoring @@ -214,17 +216,25 @@ Changes that touch any of these four code paths must be treated with the utmost ## Changes to the Peer Network -Any changes to the peer networking code **must be run in production before the PR can be merged.** The submitter should set up a testable node or set of nodes that reviewers can interact with. +Any changes to the peer networking code **must be run on both mainnet and testnet before the PR can be merged.** The submitter should set up a testable node or set of nodes that reviewers can interact with. -Changes to the peer network should be deployed incrementally and tested by multiple ecosystem entities when possible to verify that they function properly in a production setting. +Changes to the peer network should be deployed incrementally and tested by multiple parties when possible to verify that they function properly in a production setting. ## Performance Improvements Any PRs that claim to improve performance **must ship with reproducible benchmarks** that accurately measure the improvement. This data must also be reported in the PR submission. +For an example, see [PR #3075](https://github.com/stacks-network/stacks-blockchain/pull/3075). + ## Error Handling -* **Each subsystem must have its own `Error` type.** Error types of aggregate subsystems are encouraged to both wrap their constituent subsystems' `Error` types in their own `Error` types, as well as provide conversions from them via a `From` trait implementation. +* **Results must use `Error` types**. Fallible functions in the +codebase must use `Error` types in their `Result`s. If a new module's +errors are sufficiently different from existing `Error` types in the +codebaes, the new module must define a new `Error` type. Errors that +are caused by other `Error` types should be wrapped in a variant of +the new `Error` type. You should provide conversions via a `From` +trait implementation. * Functions that act on externally-submitted data **must never panic**. This includes code that acts on incoming network messages, blockchain data, and burnchain (Bitcoin) data. @@ -238,7 +248,7 @@ Any PRs that claim to improve performance **must ship with reproducible benchmar * **DO NOT USE println!() OR eprintln!()**. Instead, use the logging macros (`test_debug!()`, `trace!()`, `debug!()`, `info!()`, `warn!()`, `error!()`). -* Use **structured logging** whenever you find yourself logging multiple data with a format string. +* Use **structured logging** to include dynamic data in your log entry. For example, `info!("Append block"; "block_id" => %block_id)` as opposed to `info!("Append block with block_id = {}", block_id)`. * Use `trace!()` and `test_debug!()` liberally. It only runs in tests. @@ -301,13 +311,13 @@ properly formatted. You can check the formatting locally via: ```bash -cargo fmt --all -- --check +cargo fmt --all -- --check --config group_imports=StdExternalCrate ``` You can automatically reformat your commit via: ```bash -cargo fmt --all +cargo fmt --all -- --config group_imports=StdExternalCrate ``` ## Comments @@ -335,7 +345,7 @@ Comments are to be formatted in typical `rust` style, specifically: - When documenting panics, errors, or other conceptual sections, introduce a Markdown section with a single `#`, e.g.: - ``` + ```rust # Errors * ContractTooLargeError: Thrown when `contract` is larger than `MAX_CONTRACT_SIZE`. ``` @@ -461,7 +471,7 @@ So, if a function and its variables have very descriptive names, then there may **Bad Example** -``` +```rust /// Appends a transaction to a block. fn append_transaction_to_block(transaction:Transaction, &mut Block) -> Result<()> ``` @@ -470,7 +480,7 @@ This is considered bad because the function name already says "append transactio **Good Example** -``` +```rust /// # Errors /// /// - BlockTooBigError: Is returned if adding `transaction` to `block` results From 6eb9d0141c6ee288b715f0051bda5c95af574da1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 28 Mar 2023 14:09:02 -0500 Subject: [PATCH 037/158] add resource bound example --- CONTRIBUTING.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8721444007..2ecea5a7c8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -202,7 +202,13 @@ Within the source files, the following **code documentation** standards are expe * **All input processing is space-bound.** Every piece of code that ingests data must impose a maximum size on its byte representation. Any inputs that exceed this size _must be discarded with as little processing as possible_. -* **All input deserialization is resource-bound.** Every piece of code that ingests data must impose a maximum amount of RAM and CPU required to decode it into a structured representation. If the data does not decode with the allotted resources, then no further processing may be done and the data is discarded. +* **All input deserialization is resource-bound.** Every piece of code + that ingests data must impose a maximum amount of RAM and CPU + required to decode it into a structured representation. If the data + does not decode with the allotted resources, then no further + processing may be done and the data is discarded. For an example, see + how the parsing functions in the http module use `BoundReader` and + `MAX_PAYLOAD_LEN` in [http.rs](https://github.com/stacks-network/stacks-blockchain/blob/4852d6439b473e24705f14b8af637aded33cb422/src/net/http.rs#L2260-L2285). * **All network input reception is time-bound.** Every piece of code that ingests data _from the network_ must impose a maximum amount of time that ingestion can take. If the data takes too long to arrive, then it must be discarded without any further processing. There is no time bound for data ingested from disk or passed as an argument; this requirement is meant by the space-bound requirement. From c7d21154c7c212f3a18086078a3269f0e61dc59f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 28 Mar 2023 15:01:38 -0500 Subject: [PATCH 038/158] add more content from GH reviews --- CONTRIBUTING.md | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2ecea5a7c8..1fb91d3fb6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,6 +29,40 @@ This project and everyone participating in it is governed by this [Code of Condu - For consensus breaking changes, branch off of the `next` branch. - For hotfixes, branch off of `master`. +If you have commit access, use a branch in this repository. If you do +not, then you must use a github fork of the repository. + +### Branch naming + +Branch names should use a prefix that conveys the overall goal of the branch: + +- `feat/some-fancy-new-thing` for new features +- `fix/some-broken-thing` for hot fixes and bug fixes +- `docs/something-needs-a-comment` for documentation +- `ci/build-changes` for continuous-integration changes +- `test/more-coverage` for branches that only add more tests +- `refactor/formatting-fix` for refactors + +### Merging PRs from Forks + +PRs from forks or opened by contributors without commit access require +some special handling for merging. Any such PR, after being reviewed, +must get assigned to a contributor with commit access. This merge-owner +is responsible for: + +1. Creating a new branch in this repository based on the base branch + for the PR. +2. Retargeting the PR toward the new branch. +3. Merging the PR into the new branch. +4. Opening a new PR from `new_branch -> original_base` +5. Tagging reviewers for re-approval. +6. Merging the new PR. + +For an example of this process, see PRs +[#3598](https://github.com/stacks-network/stacks-blockchain/pull/3598) and +[#3626](https://github.com/stacks-network/stacks-blockchain/pull/3626). + + ### Documentation Updates - Any major changes should be added to the [CHANGELOG](CHANGELOG.md). @@ -50,6 +84,13 @@ The general format is as follows: ``` Common types include build, ci, docs, fix, feat, test, refactor, etc. +When a commit is addressing or related to a particular Github issue, it +should reference the issue in the commit message. For example: + +``` +fix: incorporate unlocks in mempool admitter, #3623 +``` + # Creating and Reviewing PRs This section describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. From 87fd39163dbf8cc62dc973678d1cb43808bd6ce6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 29 Mar 2023 08:06:39 -0500 Subject: [PATCH 039/158] more PR feedback --- CONTRIBUTING.md | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1fb91d3fb6..d388891f44 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -115,7 +115,15 @@ Reviewers should make use of Github's "pending comments" feature. This ensures t Reviewers should aim to **perform a review in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. -Code reviews should be timely. A PR review should begin no more than **2 business days** after the PR is submitted. This applies to each reviewer: i.e., we expect all reviewers to respond within two days. The `develop` and `next` branches in particular often change quickly, so letting a PR languish only creates more merge work for the submitter. If a review cannot be begun within 2 business days, then the reviewers should **tell the submitter when they can begin**. This gives the reviewer the opportunity to keep working on the PR (if needed) or even withdraw and resubmit it. +Code reviews should be timely. Reviewers should start no more than +**2 business days** after reviewers are assigned. This applies to each +reviewer: i.e., we expect all reviewers to respond within two days. +The `develop` and `next` branches in particular often change quickly, +so letting a PR languish only creates more merge work for the +submitter. If a review cannot be started within this timeframe, then +the reviewers should **tell the submitter when they can begin**. This +gives the reviewer the opportunity to keep working on the PR (if +needed) or even withdraw and resubmit it. Reviewers must, above all else, **ensure that submitters follow the PR checklist** below. @@ -129,7 +137,13 @@ The size and scale of a PR depend on the reviewers' abilities to process the cha A successful PR submitter **takes the reviewers' familiarity and availability into account** when crafting the PR, even going so far as to ask in advance if a particular person could be available for review. -Providing detailed answers to reviewer questions is often necessary as a submitter. In order to make this information accessible even after a PR has merged, submitters should strive to incorporate any clarifications into code comments. +Providing detailed answers to reviewer questions is often necessary as a submitter. In order to make this information accessible even after a PR has merged, **submitters should strive to incorporate any clarifications into code comments**. + +**Selecting Reviewers**. PR submitters may tag reviewers that they +think are relevant to the code changes in the PR (or using the +reviewer suggestions provided by Github). If a PR is submitted without +assigned reviewers, then reviewers will be assigned at least by the next +Weekly Blockchain Engineering Meeting (information can be found in Discord). ## Submission Checklist @@ -486,17 +500,17 @@ or because the test is very simple. Often though, comments are necessary. ```rust #[test] #[ignore] +/// The purpose of this test is to check if the mempool admission checks +/// for the post tx endpoint are working as expected wrt the optional +/// `mempool_admission_check` query parameter. +/// +/// In this test, we are manually creating a microblock as well as +/// reloading the unconfirmed state of the chainstate, instead of relying +/// on `next_block_and_wait` to generate microblocks. We do this because +/// the unconfirmed state is not automatically being initialized +/// on the node, so attempting to validate any transactions against the +/// expected unconfirmed state fails. fn transaction_validation_integration_test() { - /// The purpose of this test is to check if the mempool admission checks - /// for the post tx endpoint are working as expected wrt the optional - /// `mempool_admission_check` query parameter. - /// - /// In this test, we are manually creating a microblock as well as - /// reloading the unconfirmed state of the chainstate, instead of relying - /// on `next_block_and_wait` to generate microblocks. We do this because - /// the unconfirmed state is not automatically being initialized - /// on the node, so attempting to validate any transactions against the - /// expected unconfirmed state fails. ``` This comment is considered positive because it explains the purpose of the test (checking the case of an optional parameter), it also guides the reader to understand the low-level details about why a microblock is created manually. From 2015617b6ca9bc1678fce96bde1ce1b072305007 Mon Sep 17 00:00:00 2001 From: Igor Sylvester <1248500+igorsyl@users.noreply.github.com> Date: Thu, 6 Apr 2023 09:39:34 -0500 Subject: [PATCH 040/158] Add AI code policy --- CONTRIBUTING.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d388891f44..cdde42a845 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -582,3 +582,6 @@ Keep in mind that better variable names can reduce the need for comments, e.g.: that are not licensed under compatible terms will be rejected. Moreover, contributions will not be accepted unless _all_ authors accept the project's contributor license agreement. + +# Use of AI-code Generation +The Stacks Foundation has a very script policy of no AI-generated code due to uncertainly about licensing issues. From d446737b2aae925f4d01f886f9e9681f4d4e8a9d Mon Sep 17 00:00:00 2001 From: Igor Sylvester <1248500+igorsyl@users.noreply.github.com> Date: Thu, 6 Apr 2023 09:42:29 -0500 Subject: [PATCH 041/158] Update CONTRIBUTING.md --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cdde42a845..8a39f48859 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -584,4 +584,4 @@ contributions will not be accepted unless _all_ authors accept the project's contributor license agreement. # Use of AI-code Generation -The Stacks Foundation has a very script policy of no AI-generated code due to uncertainly about licensing issues. +The Stacks Foundation has a very strict policy of not accepting AI-generated code PRs due to uncertainly about licensing issues. From fa0eeaef5f9198c70522d0e1af08af449deab95d Mon Sep 17 00:00:00 2001 From: Igor Sylvester <1248500+igorsyl@users.noreply.github.com> Date: Thu, 6 Apr 2023 10:16:12 -0500 Subject: [PATCH 042/158] Update CONTRIBUTING.md Co-authored-by: Aaron Blankstein --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8a39f48859..9c121b9dbf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -583,5 +583,5 @@ that are not licensed under compatible terms will be rejected. Moreover, contributions will not be accepted unless _all_ authors accept the project's contributor license agreement. -# Use of AI-code Generation +## Use of AI-code Generation The Stacks Foundation has a very strict policy of not accepting AI-generated code PRs due to uncertainly about licensing issues. From ebf6cf22aab880df6e129d216d446e80ba2f5049 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 12 Apr 2023 07:42:48 -0500 Subject: [PATCH 043/158] fix: neon_node unprocessed_block behavior --- CHANGELOG.md | 9 +++++++- clarity/src/vm/database/clarity_db.rs | 9 -------- src/chainstate/stacks/db/blocks.rs | 26 +++++++++++++++-------- src/chainstate/stacks/mod.rs | 2 +- src/clarity_vm/database/mod.rs | 2 +- testnet/stacks-node/src/config.rs | 6 ++++++ testnet/stacks-node/src/neon_node.rs | 30 ++++++++++++++++++++------- 7 files changed, 55 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 872497c801..f1edd70e25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,18 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.1.0.0.3] + +This is a high-priority hotfix release to address a bug in the +stacks-node miner logic which could impact miner availability. This +release's chainstate directory is compatible with chainstate +directories from 2.1.0.0.2. + ## [2.1.0.0.2] This software update is a hotfix to resolve improper unlock handling in mempool admission. This release's chainstate directory is -compatible with chainstate directories from 2.1.0.0.2. +compatible with chainstate directories from 2.1.0.0.1. ### Fixed diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 1eca4b231c..7d21399006 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -1833,15 +1833,6 @@ impl<'a> ClarityDatabase<'a> { self.burn_state_db.get_burn_block_height(sortition_id) } - pub fn get_burn_header_hash( - &self, - height: u32, - sortition_id: &SortitionId, - ) -> Option { - self.burn_state_db - .get_burn_header_hash(height, sortition_id) - } - /// This function obtains the stacks epoch version, which is based on the burn block height. /// Valid epochs include stacks 1.0, 2.0, 2.05, and so on. pub fn get_stacks_epoch(&self, height: u32) -> Option { diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index f7acd84371..3b7d63bc56 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -3704,13 +3704,18 @@ impl StacksChainState { Ok(count - to_write) } - /// Check whether or not there exists a Stacks block at or higher than a given height that is - /// unprocessed. This is used by miners to determine whether or not the block-commit they're - /// about to send is about to be invalidated - pub fn has_higher_unprocessed_blocks(conn: &DBConn, height: u64) -> Result { + /// Check whether or not there exists a Stacks block at or higher + /// than a given height that is unprocessed and relatively + /// new. This is used by miners to determine whether or not the + /// block-commit they're about to send is about to be invalidated. + pub fn has_higher_unprocessed_blocks( + conn: &DBConn, + height: u64, + deadline: u64, + ) -> Result { let sql = - "SELECT 1 FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND height >= ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + "SELECT 1 FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND height >= ?1 AND arrival_time >= ?2"; + let args: &[&dyn ToSql] = &[&u64_to_sql(height)?, &u64_to_sql(deadline)?]; let res = conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -3720,10 +3725,13 @@ impl StacksChainState { /// Get the metadata of the highest unprocessed block. /// The block data will not be returned - pub fn get_highest_unprocessed_block(conn: &DBConn) -> Result, Error> { + pub fn get_highest_unprocessed_block( + conn: &DBConn, + deadline: u64, + ) -> Result, Error> { let sql = - "SELECT * FROM staging_blocks WHERE orphaned = 0 AND processed = 0 ORDER BY height DESC LIMIT 1"; - let res = query_row(conn, sql, NO_PARAMS)?; + "SELECT * FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND arrival_time >= ?1 ORDER BY height DESC LIMIT 1"; + let res = query_row(conn, sql, &[u64_to_sql(deadline)?])?; Ok(res) } diff --git a/src/chainstate/stacks/mod.rs b/src/chainstate/stacks/mod.rs index 934b92ddb0..e10460930e 100644 --- a/src/chainstate/stacks/mod.rs +++ b/src/chainstate/stacks/mod.rs @@ -84,7 +84,7 @@ pub use stacks_common::address::{ C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; -pub const STACKS_BLOCK_VERSION: u8 = 4; +pub const STACKS_BLOCK_VERSION: u8 = 5; pub const STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE: u8 = 1; pub const MAX_BLOCK_LEN: u32 = 2 * 1024 * 1024; diff --git a/src/clarity_vm/database/mod.rs b/src/clarity_vm/database/mod.rs index 599a53b385..67994c3609 100644 --- a/src/clarity_vm/database/mod.rs +++ b/src/clarity_vm/database/mod.rs @@ -534,7 +534,7 @@ impl BurnStateDB for SortitionDBConn<'_> { Some(height) => height, }; - if height >= current_height { + if height > current_height { return None; } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 6b3f6df012..8bb7e167c1 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -869,6 +869,9 @@ impl Config { candidate_retry_cache_size: miner .candidate_retry_cache_size .unwrap_or(miner_default_config.candidate_retry_cache_size), + unprocessed_block_deadline_secs: miner + .unprocessed_block_deadline_secs + .unwrap_or(miner_default_config.unprocessed_block_deadline_secs), }, None => miner_default_config, }; @@ -1846,6 +1849,7 @@ pub struct MinerConfig { pub wait_for_block_download: bool, pub nonce_cache_size: u64, pub candidate_retry_cache_size: u64, + pub unprocessed_block_deadline_secs: u64, } impl MinerConfig { @@ -1861,6 +1865,7 @@ impl MinerConfig { wait_for_block_download: true, nonce_cache_size: 10_000, candidate_retry_cache_size: 10_000, + unprocessed_block_deadline_secs: 30, } } } @@ -1976,6 +1981,7 @@ pub struct MinerConfigFile { pub segwit: Option, pub nonce_cache_size: Option, pub candidate_retry_cache_size: Option, + pub unprocessed_block_deadline_secs: Option, } #[derive(Clone, Deserialize, Default, Debug)] diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 87d18dc4d0..90f10c92db 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1760,6 +1760,7 @@ impl BlockMinerThread { burnchain: &Burnchain, sortdb: &SortitionDB, chainstate: &StacksChainState, + unprocessed_block_deadline: u64, ) -> bool { let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: could not query canonical sortition DB tip"); @@ -1768,13 +1769,21 @@ impl BlockMinerThread { .get_stacks_chain_tip(sortdb) .expect("FATAL: could not query canonical Stacks chain tip") { - let has_unprocessed = - StacksChainState::has_higher_unprocessed_blocks(chainstate.db(), stacks_tip.height) - .expect("FATAL: failed to query staging blocks"); + // if a block hasn't been processed within some deadline seconds of receipt, don't block + // mining + let process_deadline = get_epoch_time_secs() - unprocessed_block_deadline; + let has_unprocessed = StacksChainState::has_higher_unprocessed_blocks( + chainstate.db(), + stacks_tip.height, + process_deadline, + ) + .expect("FATAL: failed to query staging blocks"); if has_unprocessed { - let highest_unprocessed_opt = - StacksChainState::get_highest_unprocessed_block(chainstate.db()) - .expect("FATAL: failed to query staging blocks"); + let highest_unprocessed_opt = StacksChainState::get_highest_unprocessed_block( + chainstate.db(), + process_deadline, + ) + .expect("FATAL: failed to query staging blocks"); if let Some(highest_unprocessed) = highest_unprocessed_opt { let highest_unprocessed_block_sn_opt = @@ -2011,8 +2020,12 @@ impl BlockMinerThread { .expect("FATAL: mutex poisoned") .is_blocked(); - let has_unprocessed = - Self::unprocessed_blocks_prevent_mining(&self.burnchain, &burn_db, &chain_state); + let has_unprocessed = Self::unprocessed_blocks_prevent_mining( + &self.burnchain, + &burn_db, + &chain_state, + self.config.miner.unprocessed_block_deadline_secs, + ); if stacks_tip.anchored_block_hash != anchored_block.header.parent_block || parent_block_info.parent_consensus_hash != stacks_tip.consensus_hash || cur_burn_chain_tip.burn_header_hash != self.burn_block.burn_header_hash @@ -2977,6 +2990,7 @@ impl RelayerThread { &self.burnchain, self.sortdb_ref(), self.chainstate_ref(), + self.config.miner.unprocessed_block_deadline_secs, ); if has_unprocessed { debug!( From d4c73d1b599a9197c3c5a319b174c4494f7b27e0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 19 Apr 2023 21:23:28 -0500 Subject: [PATCH 044/158] add epoch-2.2 variant --- clarity/src/vm/analysis/mod.rs | 4 +- clarity/src/vm/analysis/type_checker/mod.rs | 6 +- clarity/src/vm/costs/mod.rs | 2 +- clarity/src/vm/functions/mod.rs | 2 + clarity/src/vm/types/signatures.rs | 4 +- clarity/src/vm/version.rs | 1 + src/chainstate/burn/db/sortdb.rs | 1 + .../burn/operations/leader_block_commit.rs | 4 +- src/chainstate/coordinator/mod.rs | 2 +- src/chainstate/stacks/db/blocks.rs | 34 +++++++-- src/chainstate/stacks/db/mod.rs | 1 + src/chainstate/stacks/db/transactions.rs | 1 + src/core/mod.rs | 71 +++++++++++++++++++ src/cost_estimates/pessimistic.rs | 2 + stacks-common/src/types/mod.rs | 5 +- 15 files changed, 124 insertions(+), 16 deletions(-) diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index cbe408e689..443fbc1954 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -137,7 +137,9 @@ pub fn run_analysis( StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { TypeChecker2_05::run_pass(&epoch, &mut contract_analysis, db) } - StacksEpochId::Epoch21 => TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db), + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { + TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db) + } StacksEpochId::Epoch10 => unreachable!("Epoch 1.0 is not a valid epoch for analysis"), }?; TraitChecker::run_pass(&epoch, &mut contract_analysis, db)?; diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index 5a5be7a23b..8286cbdc87 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -50,7 +50,9 @@ impl FunctionType { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { self.check_args_2_05(accounting, args) } - StacksEpochId::Epoch21 => self.check_args_2_1(accounting, args, clarity_version), + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { + self.check_args_2_1(accounting, args, clarity_version) + } StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), } } @@ -66,7 +68,7 @@ impl FunctionType { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { self.check_args_by_allowing_trait_cast_2_05(db, func_args) } - StacksEpochId::Epoch21 => { + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 9d89249e6a..e8998d0759 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -699,7 +699,7 @@ impl LimitedCostTracker { } StacksEpochId::Epoch20 => COSTS_1_NAME.to_string(), StacksEpochId::Epoch2_05 => COSTS_2_NAME.to_string(), - StacksEpochId::Epoch21 => COSTS_3_NAME.to_string(), + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => COSTS_3_NAME.to_string(), } } } diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index ad1195aa54..21e6fcdfb2 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -56,6 +56,8 @@ macro_rules! switch_on_global_epoch { StacksEpochId::Epoch2_05 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 2.1. StacksEpochId::Epoch21 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 2.2. + StacksEpochId::Epoch22 => $Epoch205Version(args, env, context), } } }; diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 3dd44e4305..446a2cb5f2 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -529,7 +529,7 @@ impl TypeSignature { pub fn admits_type(&self, epoch: &StacksEpochId, other: &TypeSignature) -> Result { match epoch { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => self.admits_type_v2_0(&other), - StacksEpochId::Epoch21 => self.admits_type_v2_1(other), + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => self.admits_type_v2_1(other), StacksEpochId::Epoch10 => unreachable!("epoch 1.0 not supported"), } } @@ -1045,7 +1045,7 @@ impl TypeSignature { ) -> Result { match epoch { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => Self::least_supertype_v2_0(a, b), - StacksEpochId::Epoch21 => Self::least_supertype_v2_1(a, b), + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => Self::least_supertype_v2_1(a, b), StacksEpochId::Epoch10 => unreachable!("Clarity 1.0 is not supported"), } } diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 2e7a2e9638..3b667fd507 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -31,6 +31,7 @@ impl ClarityVersion { StacksEpochId::Epoch20 => ClarityVersion::Clarity1, StacksEpochId::Epoch2_05 => ClarityVersion::Clarity1, StacksEpochId::Epoch21 => ClarityVersion::Clarity2, + StacksEpochId::Epoch22 => ClarityVersion::Clarity2, } } } diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 6a4ec37678..2798289dfc 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -2847,6 +2847,7 @@ impl SortitionDB { } StacksEpochId::Epoch2_05 => version == "2" || version == "3" || version == "4", StacksEpochId::Epoch21 => version == "3" || version == "4", + StacksEpochId::Epoch22 => version == "4", } } diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index 22c369c1aa..a45c1d1ca3 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -38,6 +38,7 @@ use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::index::storage::TrieFileStorage; use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; use crate::codec::{write_next, Error as codec_error, StacksMessageCodec}; +use crate::core::STACKS_EPOCH_2_2_MARKER; use crate::core::{StacksEpoch, StacksEpochId}; use crate::core::{STACKS_EPOCH_2_05_MARKER, STACKS_EPOCH_2_1_MARKER}; use crate::net::Error as net_error; @@ -753,6 +754,7 @@ impl LeaderBlockCommitOp { } StacksEpochId::Epoch2_05 => self.check_epoch_commit_marker(STACKS_EPOCH_2_05_MARKER), StacksEpochId::Epoch21 => self.check_epoch_commit_marker(STACKS_EPOCH_2_1_MARKER), + StacksEpochId::Epoch22 => self.check_epoch_commit_marker(STACKS_EPOCH_2_2_MARKER), } } @@ -767,7 +769,7 @@ impl LeaderBlockCommitOp { ) -> Result { let tx_tip = tx.context.chain_tip.clone(); let intended_sortition = match epoch_id { - StacksEpochId::Epoch21 => { + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { // correct behavior -- uses *sortition height* to find the intended sortition ID let sortition_height = self .block_height diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 322efe133f..f788d71783 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -2987,7 +2987,7 @@ impl< return Ok(Some(pox_anchor)); } } - StacksEpochId::Epoch21 => { + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { // 2.1 behavior: the anchor block must also be the // heaviest-confirmed anchor block by BTC weight, and the highest // such anchor block if there are multiple contenders. diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 3b7d63bc56..f6c7e49ac6 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -4886,21 +4886,41 @@ impl StacksChainState { receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); applied = true; } + StacksEpochId::Epoch22 => { + receipts.push(clarity_tx.block.initialize_epoch_2_05()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + applied = true; + } _ => { panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); } }, - StacksEpochId::Epoch2_05 => { + StacksEpochId::Epoch2_05 => match sortition_epoch.epoch_id { + StacksEpochId::Epoch21 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + applied = true; + } + StacksEpochId::Epoch22 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + applied = true; + } + _ => { + panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); + } + }, + StacksEpochId::Epoch21 => { assert_eq!( sortition_epoch.epoch_id, - StacksEpochId::Epoch21, - "Should only transition from Epoch2_05 to Epoch21" + StacksEpochId::Epoch22, + "Should only transition from Epoch21 to Epoch22" ); - receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); applied = true; } - StacksEpochId::Epoch21 => { - panic!("No defined transition from Epoch21 forward") + StacksEpochId::Epoch22 => { + panic!("No defined transition from Epoch22 forward") } } } @@ -5487,7 +5507,7 @@ impl StacksChainState { // The DelegateStx bitcoin wire format does not exist before Epoch 2.1. Ok((stack_ops, transfer_ops, vec![])) } - StacksEpochId::Epoch21 => { + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, parent_index_hash, diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index 71853f22e2..0578691ed5 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -223,6 +223,7 @@ impl DBConfig { self.version == "2" || self.version == "3" || self.version == "4" } StacksEpochId::Epoch21 => self.version == "3" || self.version == "4", + StacksEpochId::Epoch22 => self.version == "4", } } } diff --git a/src/chainstate/stacks/db/transactions.rs b/src/chainstate/stacks/db/transactions.rs index 66bca7d5f7..fdff595ecd 100644 --- a/src/chainstate/stacks/db/transactions.rs +++ b/src/chainstate/stacks/db/transactions.rs @@ -8347,6 +8347,7 @@ pub mod test { StacksEpochId::Epoch20 => self.get_stacks_epoch(0), StacksEpochId::Epoch2_05 => self.get_stacks_epoch(1), StacksEpochId::Epoch21 => self.get_stacks_epoch(2), + StacksEpochId::Epoch22 => self.get_stacks_epoch(3), } } fn get_pox_payout_addrs( diff --git a/src/core/mod.rs b/src/core/mod.rs index 678455937e..80d85d50e0 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -326,6 +326,10 @@ pub static STACKS_EPOCH_2_05_MARKER: u8 = 0x05; /// *or greater*. pub static STACKS_EPOCH_2_1_MARKER: u8 = 0x06; +/// Stacks 2.2 epoch marker. All block-commits in 2.2 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_2_2_MARKER: u8 = 0x07; + #[test] fn test_ord_for_stacks_epoch() { let epochs = STACKS_EPOCHS_MAINNET.clone(); @@ -391,6 +395,8 @@ pub trait StacksEpochExtension { #[cfg(test)] fn unit_test_2_1(epoch_2_0_block_height: u64) -> Vec; #[cfg(test)] + fn unit_test_2_2(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, @@ -553,6 +559,70 @@ impl StacksEpochExtension for StacksEpoch { ] } + #[cfg(test)] + fn unit_test_2_2(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ] + } + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( @@ -612,6 +682,7 @@ impl StacksEpochExtension for StacksEpoch { } StacksEpochId::Epoch2_05 => StacksEpoch::unit_test_2_05(first_burnchain_height), StacksEpochId::Epoch21 => StacksEpoch::unit_test_2_1(first_burnchain_height), + StacksEpochId::Epoch22 => StacksEpoch::unit_test_2_2(first_burnchain_height), } } diff --git a/src/cost_estimates/pessimistic.rs b/src/cost_estimates/pessimistic.rs index 7982426b2a..4264151160 100644 --- a/src/cost_estimates/pessimistic.rs +++ b/src/cost_estimates/pessimistic.rs @@ -230,6 +230,8 @@ impl PessimisticEstimator { StacksEpochId::Epoch20 => "", StacksEpochId::Epoch2_05 => ":2.05", StacksEpochId::Epoch21 => ":2.1", + // reuse cost estimates in Epoch22 + StacksEpochId::Epoch22 => ":2.1", }; format!( "cc{}:{}:{}.{}", diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 6e2559a5dc..f39691072f 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -72,11 +72,12 @@ pub enum StacksEpochId { Epoch20 = 0x02000, Epoch2_05 = 0x02005, Epoch21 = 0x0200a, + Epoch22 = 0x0200f, } impl StacksEpochId { pub fn latest() -> StacksEpochId { - StacksEpochId::Epoch21 + StacksEpochId::Epoch22 } } @@ -87,6 +88,7 @@ impl std::fmt::Display for StacksEpochId { StacksEpochId::Epoch20 => write!(f, "2.0"), StacksEpochId::Epoch2_05 => write!(f, "2.05"), StacksEpochId::Epoch21 => write!(f, "2.1"), + StacksEpochId::Epoch22 => write!(f, "2.2"), } } } @@ -100,6 +102,7 @@ impl TryFrom for StacksEpochId { x if x == StacksEpochId::Epoch20 as u32 => Ok(StacksEpochId::Epoch20), x if x == StacksEpochId::Epoch2_05 as u32 => Ok(StacksEpochId::Epoch2_05), x if x == StacksEpochId::Epoch21 as u32 => Ok(StacksEpochId::Epoch21), + x if x == StacksEpochId::Epoch22 as u32 => Ok(StacksEpochId::Epoch22), _ => Err("Invalid epoch"), } } From 679ff445d75e30fb99de58c62d7d229405c0d3a7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Apr 2023 10:28:23 -0500 Subject: [PATCH 045/158] epoch 2.2: unlock pox-2 lockups --- clarity/src/vm/database/clarity_db.rs | 23 ++- clarity/src/vm/database/structures.rs | 104 ++++++++++-- clarity/src/vm/docs/mod.rs | 4 + clarity/src/vm/functions/assets.rs | 3 +- clarity/src/vm/test_util/mod.rs | 4 + src/burnchains/mod.rs | 19 ++- src/burnchains/tests/affirmation.rs | 156 +++++++++++++++--- src/burnchains/tests/db.rs | 52 +++++- src/chainstate/burn/db/sortdb.rs | 2 +- .../burn/operations/leader_block_commit.rs | 6 +- src/chainstate/coordinator/tests.rs | 64 +++++-- src/chainstate/stacks/boot/contract_tests.rs | 4 + src/chainstate/stacks/boot/mod.rs | 12 +- src/chainstate/stacks/boot/pox_2_tests.rs | 18 +- src/chainstate/stacks/db/blocks.rs | 25 ++- src/chainstate/stacks/db/transactions.rs | 19 ++- .../stacks/tests/block_construction.rs | 3 +- src/clarity_vm/clarity.rs | 33 ++++ src/clarity_vm/database/mod.rs | 8 + src/core/mod.rs | 8 + src/net/inv.rs | 2 + src/net/mod.rs | 1 + src/net/rpc.rs | 15 +- testnet/stacks-node/src/tests/epoch_21.rs | 15 ++ .../src/tests/neon_integrations.rs | 3 + 25 files changed, 511 insertions(+), 92 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 7d21399006..9b5e124e78 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -113,6 +113,7 @@ pub trait HeadersDB { pub trait BurnStateDB { fn get_v1_unlock_height(&self) -> u32; + fn get_v2_unlock_height(&self) -> u32; /// Returns the *burnchain block height* for the `sortition_id` is associated with. fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option; @@ -197,6 +198,10 @@ impl BurnStateDB for &dyn BurnStateDB { (*self).get_v1_unlock_height() } + fn get_v2_unlock_height(&self) -> u32 { + (*self).get_v2_unlock_height() + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { (*self).get_burn_block_height(sortition_id) } @@ -367,6 +372,10 @@ impl BurnStateDB for NullBurnStateDB { u32::max_value() } + fn get_v2_unlock_height(&self) -> u32 { + u32::max_value() + } + fn get_pox_prepare_length(&self) -> u32 { panic!("NullBurnStateDB should not return PoX info"); } @@ -752,6 +761,12 @@ impl<'a> ClarityDatabase<'a> { self.burn_state_db.get_v1_unlock_height() } + /// Return the height for PoX v1 -> v2 auto unlocks + /// from the burn state db + pub fn get_v2_unlock_height(&self) -> u32 { + self.burn_state_db.get_v2_unlock_height() + } + /// Get the last-known burnchain block height. /// Note that this is _not_ the burnchain height in which this block was mined! /// This is the burnchain block height of the parent of the Stacks block at the current Stacks @@ -1777,8 +1792,8 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) } @@ -1796,8 +1811,8 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) } diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 5fd583c9d5..40472bb350 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -324,27 +324,40 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { pub fn get_available_balance(&self) -> u128 { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - self.balance - .get_available_balance_at_burn_block(self.burn_block_height, v1_unlock_height) + let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + self.balance.get_available_balance_at_burn_block( + self.burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) } pub fn canonical_balance_repr(&self) -> STXBalance { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); + let v2_unlock_height = self.db_ref.get_v2_unlock_height(); self.balance - .canonical_repr_at_block(self.burn_block_height, v1_unlock_height) + .canonical_repr_at_block(self.burn_block_height, v1_unlock_height, v2_unlock_height) .0 } pub fn has_locked_tokens(&self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - self.balance - .has_locked_tokens_at_burn_block(self.burn_block_height, v1_unlock_height) + let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + self.balance.has_locked_tokens_at_burn_block( + self.burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) } pub fn has_unlockable_tokens(&self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - self.balance - .has_unlockable_tokens_at_burn_block(self.burn_block_height, v1_unlock_height) + let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + self.balance.has_unlockable_tokens_at_burn_block( + self.burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) } pub fn can_transfer(&self, amount: u128) -> bool { @@ -552,9 +565,11 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { /// Unlock any tokens that are unlockable at the current /// burn block height, and return the amount newly unlocked fn unlock_available_tokens_if_any(&mut self) -> u128 { - let (new_balance, unlocked) = self - .balance - .canonical_repr_at_block(self.burn_block_height, self.db_ref.get_v1_unlock_height()); + let (new_balance, unlocked) = self.balance.canonical_repr_at_block( + self.burn_block_height, + self.db_ref.get_v1_unlock_height(), + self.db_ref.get_v2_unlock_height(), + ); self.balance = new_balance; unlocked } @@ -588,7 +603,7 @@ impl STXBalance { /// *while* factoring in the PoX 2 early unlock for PoX 1. /// This value is still lazy: this unlock height may be less than the current /// burn block height, if so it will be updated in a canonicalized view. - pub fn effective_unlock_height(&self, v1_unlock_height: u32) -> u64 { + pub fn effective_unlock_height(&self, v1_unlock_height: u32, v2_unlock_height: u32) -> u64 { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } => { @@ -598,7 +613,13 @@ impl STXBalance { *unlock_height } } - STXBalance::LockedPoxTwo { unlock_height, .. } => *unlock_height, + STXBalance::LockedPoxTwo { unlock_height, .. } => { + if *unlock_height >= (v2_unlock_height as u64) { + v2_unlock_height as u64 + } else { + *unlock_height + } + } } } @@ -695,8 +716,13 @@ impl STXBalance { &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> (STXBalance, u128) { - if self.has_unlockable_tokens_at_burn_block(burn_block_height, v1_unlock_height) { + if self.has_unlockable_tokens_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) { ( STXBalance::Unlocked { amount: self.get_total_balance(), @@ -712,8 +738,13 @@ impl STXBalance { &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> u128 { - if self.has_unlockable_tokens_at_burn_block(burn_block_height, v1_unlock_height) { + if self.has_unlockable_tokens_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) { self.get_total_balance() } else { match self { @@ -732,8 +763,13 @@ impl STXBalance { &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> (u128, u64) { - if self.has_unlockable_tokens_at_burn_block(burn_block_height, v1_unlock_height) { + if self.has_unlockable_tokens_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) { (0, 0) } else { match self { @@ -781,6 +817,7 @@ impl STXBalance { &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> bool { match self { STXBalance::Unlocked { .. } => false, @@ -806,7 +843,19 @@ impl STXBalance { amount_locked, unlock_height, .. - } => *amount_locked > 0 && *unlock_height > burn_block_height, + } => { + if *amount_locked == 0 { + return false; + } + if *unlock_height <= burn_block_height { + return false; + } + // if unlockable due to Stacks 2.2 early unlock + if v2_unlock_height as u64 <= burn_block_height { + return false; + } + true + } } } @@ -814,6 +863,7 @@ impl STXBalance { &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> bool { match self { STXBalance::Unlocked { .. } => false, @@ -839,7 +889,20 @@ impl STXBalance { amount_locked, unlock_height, .. - } => *amount_locked > 0 && *unlock_height <= burn_block_height, + } => { + if *amount_locked == 0 { + return false; + } + // if normally unlockable, return true + if *unlock_height <= burn_block_height { + return true; + } + // if unlockable due to Stacks 2.2 early unlock + if v2_unlock_height as u64 <= burn_block_height { + return true; + } + false + } } } @@ -848,7 +911,12 @@ impl STXBalance { amount: u128, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> bool { - self.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height) >= amount + self.get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) >= amount } } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 0b78120e19..257a61b039 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2769,6 +2769,10 @@ mod test { u32::max_value() } + fn get_v2_unlock_height(&self) -> u32 { + u32::max_value() + } + fn get_pox_prepare_length(&self) -> u32 { panic!("Docs db should not return PoX info") } diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index 0ade85e642..b54c8b1b5f 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -236,6 +236,7 @@ pub fn special_stx_account( .get_stx_balance_snapshot(&principal) .canonical_balance_repr(); let v1_unlock_ht = env.global_context.database.get_v1_unlock_height(); + let v2_unlock_ht = env.global_context.database.get_v2_unlock_height(); TupleData::from_data(vec![ ( @@ -248,7 +249,7 @@ pub fn special_stx_account( ), ( "unlock-height".try_into().unwrap(), - Value::UInt(stx_balance.effective_unlock_height(v1_unlock_ht) as u128), + Value::UInt(stx_balance.effective_unlock_height(v1_unlock_ht, v2_unlock_ht) as u128), ), ]) .map(|t| Value::Tuple(t)) diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 92d8daccd9..a1987e6985 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -210,6 +210,10 @@ impl BurnStateDB for UnitTestBurnStateDB { u32::max_value() } + fn get_v2_unlock_height(&self) -> u32 { + u32::max_value() + } + fn get_pox_prepare_length(&self) -> u32 { 1 } diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index d8be22859e..a42e2fc0f5 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -312,6 +312,8 @@ pub struct PoxConstants { /// also defines the burn height at which PoX reward sets are calculated using /// PoX v2 rather than v1 pub v1_unlock_height: u32, + /// The auto unlock height for PoX v2 lockups during Epoch 2.2 + pub v2_unlock_height: u32, _shadow: PhantomData<()>, } @@ -325,6 +327,7 @@ impl PoxConstants { sunset_start: u64, sunset_end: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> PoxConstants { assert!(anchor_threshold > (prepare_length / 2)); assert!(prepare_length < reward_cycle_length); @@ -339,13 +342,24 @@ impl PoxConstants { sunset_start, sunset_end, v1_unlock_height, + v2_unlock_height, _shadow: PhantomData, } } #[cfg(test)] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots - PoxConstants::new(10, 5, 3, 25, 5, 5000, 10000, u32::max_value()) + PoxConstants::new( + 10, + 5, + 3, + 25, + 5, + 5000, + 10000, + u32::max_value(), + u32::max_value(), + ) } /// Returns the PoX contract that is "active" at the given burn block height @@ -386,6 +400,7 @@ impl PoxConstants { BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT, + POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT, ) } @@ -399,6 +414,7 @@ impl PoxConstants { BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT, + POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT, ) // total liquid supply is 40000000000000000 µSTX } @@ -412,6 +428,7 @@ impl PoxConstants { BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, 1_000_000, + 2_000_000, ) } diff --git a/src/burnchains/tests/affirmation.rs b/src/burnchains/tests/affirmation.rs index 94adb4676a..46d73e9d6d 100644 --- a/src/burnchains/tests/affirmation.rs +++ b/src/burnchains/tests/affirmation.rs @@ -488,8 +488,17 @@ fn test_read_prepare_phase_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -555,8 +564,17 @@ fn test_parent_block_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -647,8 +665,17 @@ fn test_filter_orphan_block_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -708,8 +735,17 @@ fn test_filter_missed_block_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -769,8 +805,17 @@ fn test_find_heaviest_block_commit() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -982,8 +1027,17 @@ fn test_find_heaviest_parent_commit_many_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1235,8 +1289,17 @@ fn test_update_pox_affirmation_maps_3_forks() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1485,8 +1548,17 @@ fn test_update_pox_affirmation_maps_unique_anchor_block() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1678,8 +1750,17 @@ fn test_update_pox_affirmation_maps_absent() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -2141,8 +2222,17 @@ fn test_update_pox_affirmation_maps_nothing() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -2408,8 +2498,17 @@ fn test_update_pox_affirmation_fork_2_cycles() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 2, 2, 25, 5, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 2, + 2, + 25, + 5, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -2700,8 +2799,17 @@ fn test_update_pox_affirmation_fork_duel() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 2, 2, 25, 5, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 2, + 2, + 25, + 5, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; diff --git a/src/burnchains/tests/db.rs b/src/burnchains/tests/db.rs index f37040fd20..2aa60e2593 100644 --- a/src/burnchains/tests/db.rs +++ b/src/burnchains/tests/db.rs @@ -509,8 +509,17 @@ fn test_get_commit_at() { let first_height = 1; let mut burnchain = Burnchain::regtest(":memory"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -624,8 +633,17 @@ fn test_get_set_check_anchor_block() { let first_height = 1; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -709,8 +727,17 @@ fn test_update_block_descendancy() { let first_height = 1; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -828,8 +855,17 @@ fn test_update_block_descendancy_with_fork() { let first_height = 1; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::max_value(), + u32::max_value(), + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 2798289dfc..296e5f3f57 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -9464,7 +9464,7 @@ pub mod tests { fs::create_dir_all(path_root).unwrap(); - let pox_consts = PoxConstants::new(10, 3, 3, 25, 5, u64::MAX, u64::MAX, u32::MAX); + let pox_consts = PoxConstants::new(10, 3, 3, 25, 5, u64::MAX, u64::MAX, u32::MAX, u32::MAX); let mut burnchain = Burnchain::regtest(path_root); burnchain.pox_constants = pox_consts.clone(); diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index a45c1d1ca3..2e9ece5cb3 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -1769,7 +1769,7 @@ mod tests { ]; let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::max_value()), + pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), @@ -2302,7 +2302,7 @@ mod tests { ]; let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::max_value()), + pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), @@ -2992,7 +2992,7 @@ mod tests { .unwrap(); let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::max_value()), + pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index aa3ee613e8..e53a4ffc08 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -516,6 +516,7 @@ pub fn get_burnchain(path: &str, pox_consts: Option) -> Burnchain u64::max_value(), u64::max_value(), u32::max_value(), + u32::max_value(), ) }); b @@ -953,6 +954,7 @@ fn missed_block_commits_2_05() { 7010, sunset_ht, u32::max_value(), + u32::max_value(), )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1269,6 +1271,7 @@ fn missed_block_commits_2_1() { 7010, sunset_ht, u32::max_value(), + u32::max_value(), )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1609,6 +1612,7 @@ fn late_block_commits_2_1() { 7010, sunset_ht, u32::max_value(), + u32::max_value(), )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2663,6 +2667,7 @@ fn test_pox_btc_ops() { let sunset_ht = 8000; let pox_v1_unlock_ht = u32::max_value(); + let pox_v2_unlock_ht = u32::max_value(); let pox_consts = Some(PoxConstants::new( 5, 3, @@ -2672,6 +2677,7 @@ fn test_pox_btc_ops() { 7010, sunset_ht, pox_v1_unlock_ht, + pox_v2_unlock_ht, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2849,8 +2855,11 @@ fn test_pox_btc_ops() { assert_eq!(stacker_balance.amount_locked(), stacked_amt); } else { assert_eq!( - stacker_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + stacker_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), balance as u128, "No lock should be active" ); @@ -2939,6 +2948,7 @@ fn test_stx_transfer_btc_ops() { let _r = std::fs::remove_dir_all(path); let pox_v1_unlock_ht = u32::max_value(); + let pox_v2_unlock_ht = u32::max_value(); let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 5, @@ -2949,6 +2959,7 @@ fn test_stx_transfer_btc_ops() { 7010, sunset_ht, pox_v1_unlock_ht, + pox_v2_unlock_ht, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3148,26 +3159,38 @@ fn test_stx_transfer_btc_ops() { if ix > 2 { assert_eq!( - sender_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + sender_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), (balance as u128) - transfer_amt, "Transfer should have decremented balance" ); assert_eq!( - recipient_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + recipient_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), transfer_amt, "Recipient should have incremented balance" ); } else { assert_eq!( - sender_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + sender_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), balance as u128, ); assert_eq!( - recipient_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + recipient_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), 0, ); } @@ -3330,6 +3353,7 @@ fn test_delegate_stx_btc_ops() { let _r = std::fs::remove_dir_all(path); let pox_v1_unlock_ht = 12; + let pox_v2_unlock_ht = u32::max_value(); let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 100, @@ -3340,6 +3364,7 @@ fn test_delegate_stx_btc_ops() { 7010, sunset_ht, pox_v1_unlock_ht, + pox_v2_unlock_ht, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3643,6 +3668,7 @@ fn test_initial_coinbase_reward_distributions() { 7010, sunset_ht, u32::max_value(), + u32::max_value(), )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3880,6 +3906,7 @@ fn test_epoch_switch_cost_contract_instantiation() { 10, sunset_ht, u32::max_value(), + u32::max_value(), )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4070,7 +4097,17 @@ fn test_epoch_switch_pox_contract_instantiation() { let _r = std::fs::remove_dir_all(path); let sunset_ht = 8000; - let pox_consts = Some(PoxConstants::new(6, 3, 3, 25, 5, 10, sunset_ht, 10)); + let pox_consts = Some(PoxConstants::new( + 6, + 3, + 3, + 25, + 5, + 10, + sunset_ht, + 10, + u32::max_value(), + )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..15).map(|_| VRFPrivateKey::new()).collect(); @@ -4313,6 +4350,7 @@ fn test_epoch_verify_active_pox_contract() { let _r = std::fs::remove_dir_all(path); let pox_v1_unlock_ht = 12; + let pox_v2_unlock_ht = u32::max_value(); let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 6, @@ -4323,6 +4361,7 @@ fn test_epoch_verify_active_pox_contract() { 7010, sunset_ht, pox_v1_unlock_ht, + pox_v2_unlock_ht, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4612,6 +4651,7 @@ fn test_sortition_with_sunset() { 10, sunset_ht, u32::max_value(), + u32::max_value(), )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4919,6 +4959,7 @@ fn test_sortition_with_sunset_and_epoch_switch() { 10, sunset_ht, v1_unlock_ht, + u32::max_value(), )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5267,6 +5308,7 @@ fn test_pox_processable_block_in_different_pox_forks() { u64::MAX - 1, u64::MAX, u32::max_value(), + u32::max_value(), )); let b = get_burnchain(path, pox_consts.clone()); let b_blind = get_burnchain(path_blinded, pox_consts.clone()); diff --git a/src/chainstate/stacks/boot/contract_tests.rs b/src/chainstate/stacks/boot/contract_tests.rs index 3d662837c8..5b565f64ae 100644 --- a/src/chainstate/stacks/boot/contract_tests.rs +++ b/src/chainstate/stacks/boot/contract_tests.rs @@ -403,6 +403,10 @@ impl BurnStateDB for TestSimBurnStateDB { u32::max_value() } + fn get_v2_unlock_height(&self) -> u32 { + u32::max_value() + } + fn get_pox_prepare_length(&self) -> u32 { self.pox_constants.prepare_length } diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index 347d9c2f2c..fcd4d5497e 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -970,7 +970,17 @@ pub mod test { #[test] fn get_reward_threshold_units() { - let test_pox_constants = PoxConstants::new(501, 1, 1, 1, 5, 5000, 10000, u32::max_value()); + let test_pox_constants = PoxConstants::new( + 501, + 1, + 1, + 1, + 5, + 5000, + 10000, + u32::max_value(), + u32::max_value(), + ); // when the liquid amount = the threshold step, // the threshold should always be the step size. let liquid = POX_THRESHOLD_STEPS_USTX; diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index ab9cf9ad42..5d7b2ea82d 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1117,7 +1117,11 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), ) - .canonical_repr_at_block(height_target + 1, burnchain.pox_constants.v1_unlock_height); + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); while get_tip(peer.sortdb.as_ref()).block_height < height_target { @@ -1142,7 +1146,11 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), ) - .canonical_repr_at_block(height_target + 1, burnchain.pox_constants.v1_unlock_height); + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); assert_eq!(bob_bal.amount_locked(), 0); // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block @@ -1151,7 +1159,11 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), ) - .canonical_repr_at_block(height_target + 1, burnchain.pox_constants.v1_unlock_height); + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); assert_eq!(bob_bal.amount_locked(), 0); // check that the total reward cycle amounts have decremented correctly diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index f6c7e49ac6..512a1cf4e0 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -7166,11 +7166,12 @@ impl StacksChainState { return Err(MemPoolRejection::BadAddressVersionByte); } - let (block_height, v1_unlock_height) = - clarity_connection.with_clarity_db_readonly(|ref mut db| { + let (block_height, v1_unlock_height, v2_unlock_height) = clarity_connection + .with_clarity_db_readonly(|ref mut db| { ( db.get_current_burnchain_block_height() as u64, db.get_v1_unlock_height(), + db.get_v2_unlock_height(), ) }); @@ -7179,6 +7180,7 @@ impl StacksChainState { fee as u128, block_height, v1_unlock_height, + v2_unlock_height, ) { match &tx.payload { TransactionPayload::TokenTransfer(..) => { @@ -7187,9 +7189,11 @@ impl StacksChainState { _ => { return Err(MemPoolRejection::NotEnoughFunds( fee as u128, - payer - .stx_balance - .get_available_balance_at_burn_block(block_height, v1_unlock_height), + payer.stx_balance.get_available_balance_at_burn_block( + block_height, + v1_unlock_height, + v2_unlock_height, + ), )); } } @@ -7212,12 +7216,15 @@ impl StacksChainState { total_spent, block_height, v1_unlock_height, + v2_unlock_height, ) { return Err(MemPoolRejection::NotEnoughFunds( total_spent, - origin - .stx_balance - .get_available_balance_at_burn_block(block_height, v1_unlock_height), + origin.stx_balance.get_available_balance_at_burn_block( + block_height, + v1_unlock_height, + v2_unlock_height, + ), )); } @@ -7227,12 +7234,14 @@ impl StacksChainState { fee as u128, block_height, v1_unlock_height, + v2_unlock_height, ) { return Err(MemPoolRejection::NotEnoughFunds( fee as u128, payer.stx_balance.get_available_balance_at_burn_block( block_height, v1_unlock_height, + v2_unlock_height, ), )); } diff --git a/src/chainstate/stacks/db/transactions.rs b/src/chainstate/stacks/db/transactions.rs index fdff595ecd..17868ddaab 100644 --- a/src/chainstate/stacks/db/transactions.rs +++ b/src/chainstate/stacks/db/transactions.rs @@ -425,17 +425,22 @@ impl StacksChainState { fee: u64, payer_account: StacksAccount, ) -> Result { - let (cur_burn_block_height, v1_unlock_ht) = - clarity_tx.with_clarity_db_readonly(|ref mut db| { + let (cur_burn_block_height, v1_unlock_ht, v2_unlock_ht) = clarity_tx + .with_clarity_db_readonly(|ref mut db| { ( db.get_current_burnchain_block_height(), db.get_v1_unlock_height(), + db.get_v2_unlock_height(), ) }); let consolidated_balance = payer_account .stx_balance - .get_available_balance_at_burn_block(cur_burn_block_height as u64, v1_unlock_ht); + .get_available_balance_at_burn_block( + cur_burn_block_height as u64, + v1_unlock_ht, + v2_unlock_ht, + ); if consolidated_balance < fee as u128 { return Err(Error::InvalidFee); @@ -7848,7 +7853,7 @@ pub mod test { assert_eq!( StacksChainState::get_account(&mut conn, &addr.into()) .stx_balance - .get_available_balance_at_burn_block(0, 0), + .get_available_balance_at_burn_block(0, 0, 0), (1000000000 - fee) as u128 ); @@ -8286,6 +8291,9 @@ pub mod test { fn get_v1_unlock_height(&self) -> u32 { 2 } + fn get_v2_unlock_height(&self) -> u32 { + u32::max_value() + } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) } @@ -8492,6 +8500,9 @@ pub mod test { fn get_v1_unlock_height(&self) -> u32 { 2 } + fn get_v2_unlock_height(&self) -> u32 { + u32::max_value() + } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) } diff --git a/src/chainstate/stacks/tests/block_construction.rs b/src/chainstate/stacks/tests/block_construction.rs index e85f2cdfdc..8596ca892e 100644 --- a/src/chainstate/stacks/tests/block_construction.rs +++ b/src/chainstate/stacks/tests/block_construction.rs @@ -4369,9 +4369,10 @@ fn mempool_incorporate_pox_unlocks() { clarity_tx.with_clarity_db_readonly(|db| { let burn_block_height = db.get_current_burnchain_block_height() as u64; let v1_unlock_height = db.get_v1_unlock_height(); + let v2_unlock_height = db.get_v2_unlock_height(); let balance = db.get_account_stx_balance(&principal); info!("Checking balance"; "v1_unlock_height" => v1_unlock_height, "burn_block_height" => burn_block_height); - balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height) + balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height, v2_unlock_height) }) }).unwrap(); diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index af5da7c107..eef15c6159 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -887,6 +887,34 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { }) } + pub fn initialize_epoch_2_2(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + // This is important because pox-2 is instantiated before costs-3. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch22; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch22); + Ok(()) + }) + .unwrap(); + + // require 2.2 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch22; + }); + + debug!("Epoch 2.2 initialized"); + + (old_cost_tracker, Ok(vec![])) + }) + } + pub fn initialize_epoch_2_1(&mut self) -> Result, Error> { // use the `using!` statement to ensure that the old cost_tracker is placed // back in all branches after initialization @@ -2248,6 +2276,11 @@ mod tests { ) -> Option { self.get_stacks_epoch(0) } + + fn get_v2_unlock_height(&self) -> u32 { + u32::max_value() + } + fn get_v1_unlock_height(&self) -> u32 { u32::max_value() } diff --git a/src/clarity_vm/database/mod.rs b/src/clarity_vm/database/mod.rs index 67994c3609..73687f22bd 100644 --- a/src/clarity_vm/database/mod.rs +++ b/src/clarity_vm/database/mod.rs @@ -456,6 +456,10 @@ impl BurnStateDB for SortitionHandleTx<'_> { self.context.pox_constants.v1_unlock_height } + fn get_v2_unlock_height(&self) -> u32 { + self.context.pox_constants.v2_unlock_height + } + fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length } @@ -567,6 +571,10 @@ impl BurnStateDB for SortitionDBConn<'_> { self.context.pox_constants.v1_unlock_height } + fn get_v2_unlock_height(&self) -> u32 { + self.context.pox_constants.v2_unlock_height + } + fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length } diff --git a/src/core/mod.rs b/src/core/mod.rs index 80d85d50e0..921cdd63bc 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -106,12 +106,15 @@ pub const BITCOIN_MAINNET_INITIAL_REWARD_START_BLOCK: u64 = 651389; pub const BITCOIN_MAINNET_STACKS_2_05_BURN_HEIGHT: u64 = 713_000; pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; +pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_700; + pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = "000000000000010dd0863ec3d7a0bae17c1957ae1de9cbcdae8e77aad33e3b8c"; pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_104_380; pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; +pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_432_000; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; @@ -156,6 +159,11 @@ pub const POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = pub const POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT as u32) + 1; +pub const POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = + (BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT as u32) + 1; +pub const POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = + (BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT as u32) + 1; + /// Burn block height at which the ASTRules::PrecheckSize becomes the default behavior on mainnet pub const AST_RULES_PRECHECK_SIZE: u64 = 752000; // on or about Aug 30 2022 diff --git a/src/net/inv.rs b/src/net/inv.rs index f11b101eb7..5175fd0926 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -3118,6 +3118,7 @@ mod test { u64::max_value(), u64::max_value(), u32::max_value(), + u32::MAX, ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); @@ -3145,6 +3146,7 @@ mod test { u64::max_value(), u64::max_value(), u32::max_value(), + u32::MAX, ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); diff --git a/src/net/mod.rs b/src/net/mod.rs index 76b5a14782..afaaea3fd0 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -2484,6 +2484,7 @@ pub mod test { u64::max_value(), u64::max_value(), u32::max_value(), + u32::MAX, ); let mut spending_account = TestMinerFactory::new().next_miner( diff --git a/src/net/rpc.rs b/src/net/rpc.rs index fdad5b699d..10543820fa 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -1260,6 +1260,7 @@ impl ConversationHttp { let key = ClarityDatabase::make_key_for_account_balance(&account); let burn_block_height = clarity_db.get_current_burnchain_block_height() as u64; let v1_unlock_height = clarity_db.get_v1_unlock_height(); + let v2_unlock_height = clarity_db.get_v2_unlock_height(); let (balance, balance_proof) = if with_proof { clarity_db .get_with_proof::(&key) @@ -1285,10 +1286,16 @@ impl ConversationHttp { .unwrap_or_else(|| (0, None)) }; - let unlocked = balance - .get_available_balance_at_burn_block(burn_block_height, v1_unlock_height); - let (locked, unlock_height) = balance - .get_locked_balance_at_burn_block(burn_block_height, v1_unlock_height); + let unlocked = balance.get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ); + let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ); let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index d6e4732955..b6087f17c0 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -125,6 +125,7 @@ fn advance_to_2_1( u64::max_value() - 2, u64::max_value() - 1, u32::max_value(), + u32::MAX, )); burnchain_config.pox_constants = pox_constants.clone(); @@ -619,6 +620,7 @@ fn transition_fixes_bitcoin_rigidity() { (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::max_value(), + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1061,6 +1063,7 @@ fn transition_adds_get_pox_addr_recipients() { u64::max_value() - 2, u64::max_value() - 1, v1_unlock_height, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1362,6 +1365,7 @@ fn transition_adds_mining_from_segwit() { u64::MAX, u64::MAX, v1_unlock_height, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1525,6 +1529,7 @@ fn transition_removes_pox_sunset() { (sunset_start_rc * reward_cycle_len - 1).into(), (sunset_end_rc * reward_cycle_len).into(), (epoch_21 as u32) + 1, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1805,6 +1810,7 @@ fn transition_empty_blocks() { u64::max_value() - 2, u64::max_value() - 1, (epoch_2_1 + 1) as u32, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -2162,6 +2168,7 @@ fn test_pox_reorgs_three_flaps() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -2697,6 +2704,7 @@ fn test_pox_reorg_one_flap() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3120,6 +3128,7 @@ fn test_pox_reorg_flap_duel() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3553,6 +3562,7 @@ fn test_pox_reorg_flap_reward_cycles() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3980,6 +3990,7 @@ fn test_pox_missing_five_anchor_blocks() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4379,6 +4390,7 @@ fn test_sortition_divergence_pre_21() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4741,6 +4753,7 @@ fn trait_invocation_cross_epoch() { (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::max_value(), + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4985,6 +4998,7 @@ fn test_v1_unlock_height_with_current_stackers() { u64::max_value() - 2, u64::max_value() - 1, v1_unlock_height as u32, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5245,6 +5259,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { u64::max_value() - 2, u64::max_value() - 1, v1_unlock_height as u32, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index fed67995f4..5456505a89 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1800,6 +1800,7 @@ fn stx_delegate_btc_integration_test() { (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5826,6 +5827,7 @@ fn pox_integration_test() { (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -10459,6 +10461,7 @@ fn test_competing_miners_build_on_same_chain( (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); From a741ee886910f45f83e582409b8c7049778872d5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 20 Apr 2023 07:06:05 -0500 Subject: [PATCH 046/158] refresh epochs table in sortdb and add epochs definition --- src/chainstate/burn/db/sortdb.rs | 42 +++++++++++++++++++++++++++----- src/chainstate/stacks/db/mod.rs | 2 +- src/core/mod.rs | 26 +++++++++++++++++--- 3 files changed, 59 insertions(+), 11 deletions(-) diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 296e5f3f57..9a68cf17f7 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -501,7 +501,7 @@ impl FromRow for StacksEpoch { } } -pub const SORTITION_DB_VERSION: &'static str = "4"; +pub const SORTITION_DB_VERSION: &'static str = "5"; const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ r#" @@ -702,6 +702,9 @@ const SORTITION_DB_SCHEMA_4: &'static [&'static str] = &[ );"#, ]; +const SORTITION_DB_SCHEMA_5: &'static [&'static str] = &[r#" + DELETE FROM epochs;"#]; + // update this to add new indexes const LAST_SORTITION_DB_INDEX: &'static str = "index_delegate_stx_burn_header_hash"; @@ -2813,7 +2816,9 @@ impl SortitionDB { } /// Get the schema version of a sortition DB, given the path to it. - /// Returns the version string, if it exists + /// Returns the version string, if it exists. + /// + /// Does **not** migrate the database (like `open()` or `connect()` would) pub fn get_db_version_from_path(path: &str) -> Result, db_error> { if fs::metadata(path).is_err() { return Err(db_error::NoDBError); @@ -2843,11 +2848,17 @@ impl SortitionDB { match epoch { StacksEpochId::Epoch10 => true, StacksEpochId::Epoch20 => { - version == "1" || version == "2" || version == "3" || version == "4" + version == "1" + || version == "2" + || version == "3" + || version == "4" + || version == "5" + } + StacksEpochId::Epoch2_05 => { + version == "2" || version == "3" || version == "4" || version == "5" } - StacksEpochId::Epoch2_05 => version == "2" || version == "3" || version == "4", - StacksEpochId::Epoch21 => version == "3" || version == "4", - StacksEpochId::Epoch22 => version == "4", + StacksEpochId::Epoch21 => version == "3" || version == "4" || version == "5", + StacksEpochId::Epoch22 => version == "3" || version == "4" || version == "5", } } @@ -2916,6 +2927,21 @@ impl SortitionDB { Ok(()) } + fn apply_schema_5(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + for sql_exec in SORTITION_DB_SCHEMA_5 { + tx.execute_batch(sql_exec)?; + } + + SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["5"], + )?; + + Ok(()) + } + fn check_schema_version_or_error(&mut self) -> Result<(), db_error> { match SortitionDB::get_schema_version(self.conn()) { Ok(Some(version)) => { @@ -2954,6 +2980,10 @@ impl SortitionDB { let tx = self.tx_begin()?; SortitionDB::apply_schema_4(&tx.deref())?; tx.commit()?; + } else if version == "4" { + let tx = self.tx_begin()?; + SortitionDB::apply_schema_5(&tx.deref(), epochs)?; + tx.commit()?; } else if version == expected_version { return Ok(()); } else { diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index 0578691ed5..00b56892c8 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -223,7 +223,7 @@ impl DBConfig { self.version == "2" || self.version == "3" || self.version == "4" } StacksEpochId::Epoch21 => self.version == "3" || self.version == "4", - StacksEpochId::Epoch22 => self.version == "4", + StacksEpochId::Epoch22 => self.version == "3" || self.version == "4", } } } diff --git a/src/core/mod.rs b/src/core/mod.rs index 921cdd63bc..2b00356682 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -50,13 +50,14 @@ pub use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, STACKS_EPOCH // fourth byte == highest epoch supported by this node // - 0x05 for 2.05 // - 0x06 for 2.1 -pub const PEER_VERSION_MAINNET: u32 = 0x18000006; -pub const PEER_VERSION_TESTNET: u32 = 0xfacade06; +pub const PEER_VERSION_MAINNET: u32 = 0x18000007; +pub const PEER_VERSION_TESTNET: u32 = 0xfacade07; pub const PEER_VERSION_EPOCH_1_0: u8 = 0x00; pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; pub const PEER_VERSION_EPOCH_2_05: u8 = 0x05; pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; +pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; // network identifiers pub const NETWORK_ID_MAINNET: u32 = 0x17000000; @@ -105,6 +106,7 @@ pub const BITCOIN_MAINNET_FIRST_BLOCK_HASH: &str = pub const BITCOIN_MAINNET_INITIAL_REWARD_START_BLOCK: u64 = 651389; pub const BITCOIN_MAINNET_STACKS_2_05_BURN_HEIGHT: u64 = 713_000; pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; +pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_700; pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_700; @@ -228,7 +230,7 @@ pub fn check_fault_injection(fault_name: &str) -> bool { } lazy_static! { - pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 4] = [ + pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 5] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -253,10 +255,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch21, start_height: BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2 + }, ]; } @@ -347,9 +356,18 @@ fn test_ord_for_stacks_epoch() { assert_eq!(epochs[0].cmp(&epochs[0]), Ordering::Equal); assert_eq!(epochs[1].cmp(&epochs[1]), Ordering::Equal); assert_eq!(epochs[2].cmp(&epochs[2]), Ordering::Equal); + assert_eq!(epochs[3].cmp(&epochs[3]), Ordering::Equal); + assert_eq!(epochs[4].cmp(&epochs[4]), Ordering::Equal); assert_eq!(epochs[2].cmp(&epochs[0]), Ordering::Greater); assert_eq!(epochs[2].cmp(&epochs[1]), Ordering::Greater); assert_eq!(epochs[1].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[3].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[3].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[3].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[4].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[4].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[4].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[4].cmp(&epochs[3]), Ordering::Greater); } #[test] From 5503a84b25c856af6a61aefe9df20ab0abf40b6b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Apr 2023 10:44:36 -0500 Subject: [PATCH 047/158] epoch 2.2: disable pox-2 --- src/chainstate/coordinator/mod.rs | 5 ++++ src/clarity_vm/special.rs | 40 ++++++++++++++++++++++++++++ src/core/mod.rs | 2 -- testnet/stacks-node/src/neon_node.rs | 4 +-- 4 files changed, 47 insertions(+), 4 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index f788d71783..187a7dcfe0 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -265,6 +265,11 @@ impl RewardSetProvider for OnChainRewardSetProvider { sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { + if current_burn_height > burnchain.pox_constants.v2_unlock_height as u64 { + info!("PoX reward cycle defaulting to burn in Epoch 2.2"); + return Ok(RewardSet::empty()); + } + let registered_addrs = chainstate.get_reward_addresses(burnchain, sortdb, current_burn_height, block_id)?; diff --git a/src/clarity_vm/special.rs b/src/clarity_vm/special.rs index b02b6aba33..c78ede2b50 100644 --- a/src/clarity_vm/special.rs +++ b/src/clarity_vm/special.rs @@ -940,6 +940,32 @@ fn is_pox_v1_read_only(func_name: &str) -> bool { || func_name == "get-pox-info" } +fn is_pox_v2_read_only(func_name: &str) -> bool { + "get-pox-rejection" == func_name + || "is-pox-active" == func_name + || "burn-height-to-reward-cycle" == func_name + || "reward-cycle-to-burn-height" == func_name + || "current-pox-reward-cycle" == func_name + || "get-stacker-info" == func_name + || "get-check-delegation" == func_name + || "get-reward-set-size" == func_name + || "next-cycle-rejection-votes" == func_name + || "get-total-ustx-stacked" == func_name + || "get-reward-set-pox-address" == func_name + || "get-stacking-minimum" == func_name + || "check-pox-addr-version" == func_name + || "check-pox-addr-hashbytes" == func_name + || "check-pox-lock-period" == func_name + || "can-stack-stx" == func_name + || "minimal-can-stack-stx" == func_name + || "get-pox-info" == func_name + || "get-delegation-info" == func_name + || "get-allowance-contract-callers" == func_name + || "get-num-reward-set-pox-addresses" == func_name + || "get-partial-stacked-by-cycle" == func_name + || "get-total-pox-rejection" == func_name +} + /// Handle special cases of contract-calls -- namely, those into PoX that should lock up STX pub fn handle_contract_call_special_cases( global_context: &mut GlobalContext, @@ -966,6 +992,20 @@ pub fn handle_contract_call_special_cases( } return handle_pox_v1_api_contract_call(global_context, sender, function_name, result); } else if *contract_id == boot_code_id(POX_2_NAME, global_context.mainnet) { + if !is_pox_v2_read_only(function_name) + && global_context.database.get_v2_unlock_height() + <= global_context.database.get_current_burnchain_block_height() + { + // NOTE: get-pox-info is read-only, so it can call old pox v1 stuff + warn!("PoX-1 function call attempted on an account after v2 unlock height"; + "v2_unlock_ht" => global_context.database.get_v2_unlock_height(), + "current_burn_ht" => global_context.database.get_current_burnchain_block_height(), + "function_name" => function_name, + "contract_id" => %contract_id + ); + return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)); + } + return handle_pox_v2_api_contract_call( global_context, sender, diff --git a/src/core/mod.rs b/src/core/mod.rs index 2b00356682..1013f9fd5f 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -108,8 +108,6 @@ pub const BITCOIN_MAINNET_STACKS_2_05_BURN_HEIGHT: u64 = 713_000; pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_700; -pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_700; - pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 90f10c92db..42da5ac074 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -177,7 +177,7 @@ use stacks::chainstate::stacks::{ use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; -use stacks::core::STACKS_EPOCH_2_1_MARKER; +use stacks::core::STACKS_EPOCH_2_2_MARKER; use stacks::cost_estimates::metrics::CostMetric; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; @@ -1326,7 +1326,7 @@ impl BlockMinerThread { apparent_sender: sender, key_block_ptr: key.block_height as u32, key_vtxindex: key.op_vtxindex as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_2_MARKER], new_seed: vrf_seed, parent_block_ptr, parent_vtxindex, From 730028e3f6f5c3c32fda9f290a07ac929d041410 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Apr 2023 13:03:51 -0500 Subject: [PATCH 048/158] add e2e bitcoin test for epoch-2.2 disabling pox --- src/clarity_vm/special.rs | 6 ++---- testnet/stacks-node/src/tests/mod.rs | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/clarity_vm/special.rs b/src/clarity_vm/special.rs index c78ede2b50..c4ed5b9003 100644 --- a/src/clarity_vm/special.rs +++ b/src/clarity_vm/special.rs @@ -992,12 +992,10 @@ pub fn handle_contract_call_special_cases( } return handle_pox_v1_api_contract_call(global_context, sender, function_name, result); } else if *contract_id == boot_code_id(POX_2_NAME, global_context.mainnet) { - if !is_pox_v2_read_only(function_name) - && global_context.database.get_v2_unlock_height() - <= global_context.database.get_current_burnchain_block_height() + if !is_pox_v2_read_only(function_name) && global_context.epoch_id >= StacksEpochId::Epoch22 { // NOTE: get-pox-info is read-only, so it can call old pox v1 stuff - warn!("PoX-1 function call attempted on an account after v2 unlock height"; + warn!("PoX-2 function call attempted on an account after Epoch 2.2"; "v2_unlock_ht" => global_context.database.get_v2_unlock_height(), "current_burn_ht" => global_context.database.get_current_burnchain_block_height(), "function_name" => function_name, diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 3ee513c24c..766123b6f1 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -42,6 +42,7 @@ mod atlas; mod bitcoin_regtest; mod epoch_205; mod epoch_21; +mod epoch_22; mod integrations; mod mempool; pub mod neon_integrations; From 37cce8b53cccfc7749db16d8ee06d59ac3a02b22 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Apr 2023 13:40:43 -0500 Subject: [PATCH 049/158] use EpochID check for disabling, add (missing) test file --- src/chainstate/coordinator/mod.rs | 9 +- testnet/stacks-node/src/tests/epoch_22.rs | 1087 +++++++++++++++++++++ 2 files changed, 1091 insertions(+), 5 deletions(-) create mode 100644 testnet/stacks-node/src/tests/epoch_22.rs diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 187a7dcfe0..c033254e07 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -265,7 +265,10 @@ impl RewardSetProvider for OnChainRewardSetProvider { sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { - if current_burn_height > burnchain.pox_constants.v2_unlock_height as u64 { + let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), current_burn_height)?.expect( + &format!("FATAL: no epoch for burn height {}", current_burn_height), + ); + if cur_epoch.epoch_id >= StacksEpochId::Epoch22 { info!("PoX reward cycle defaulting to burn in Epoch 2.2"); return Ok(RewardSet::empty()); } @@ -300,10 +303,6 @@ impl RewardSetProvider for OnChainRewardSetProvider { "registered_addrs" => registered_addrs.len()); } - let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), current_burn_height)?.expect( - &format!("FATAL: no epoch for burn height {}", current_burn_height), - ); - Ok(StacksChainState::make_reward_set( threshold, registered_addrs, diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs new file mode 100644 index 0000000000..2735fdf883 --- /dev/null +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -0,0 +1,1087 @@ +use std::collections::HashMap; +use std::env; +use std::thread; + +use stacks::burnchains::Burnchain; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::core::PEER_VERSION_EPOCH_2_2; +use stacks::core::STACKS_EPOCH_MAX; +use stacks::types::chainstate::StacksAddress; + +use crate::config::EventKeyType; +use crate::config::EventObserverConfig; +use crate::config::InitialBalance; +use crate::neon; +use crate::node::get_account_balances; +use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::tests::neon_integrations::*; +use crate::tests::*; +use crate::BitcoinRegtestController; +use crate::BurnchainController; +use stacks::core; + +use super::neon_integrations::get_account; +use crate::stacks_common::types::Address; +use crate::stacks_common::util::hash::bytes_to_hex; +use stacks::burnchains::PoxConstants; + +use stacks_common::util::hash::Hash160; +use stacks_common::util::secp256k1::Secp256k1PublicKey; + +use stacks::clarity_cli::vm_execute as execute; + +use clarity::vm::types::PrincipalData; +use clarity::vm::ClarityVersion; + +use stacks::util::sleep_ms; + +use stacks::util_lib::boot::boot_code_id; +use stacks_common::types::chainstate::StacksBlockId; + +#[test] +#[ignore] +/// Verify that it is acceptable to launch PoX-2 at the end of a reward cycle, and set v1 unlock +/// height to be at the start of the subsequent reward cycle. +/// +/// Verify that PoX-1 stackers continue to receive PoX payouts after v1 unlock height, and that +/// PoX-2 stackers only begin receiving rewards at the start of the reward cycle following the one +/// that contains v1 unlock height. +/// +/// Verify that both of the above work even if miners do not mine in the same block as the PoX-2 +/// start height or v1 unlock height (e.g. suppose there's a delay). +/// +/// Verify the (buggy) stacks-increase behavior in PoX-2, and then verify that Epoch-2.2 +/// **disables** PoX after it activates. +/// +/// Verification works using expected number of slots for burn and various PoX addresses. +/// +fn pox_2_stack_increase_epoch22_fix() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 230; + let v1_unlock_height = 231; + let epoch_2_2 = 255; // two blocks before next prepare phase. + + let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + + let spender_sk = StacksPrivateKey::new(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let spender_2_sk = StacksPrivateKey::new(); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + + let spender_3_sk = StacksPrivateKey::new(); + let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); + + let mut initial_balances = vec![]; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: stacked + increase_by + 100_000, + }); + + initial_balances.push(InitialBalance { + address: spender_2_addr.clone(), + amount: stacked + 100_000, + }); + + // // create a third initial balance so that there's more liquid ustx than the stacked amount bug. + // // otherwise, it surfaces the DoS vector. + initial_balances.push(InitialBalance { + address: spender_3_addr.clone(), + amount: stacked + 100_000, + }); + + let pox_pubkey_1 = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash_1 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_1) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_2 = Secp256k1PublicKey::from_hex( + "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", + ) + .unwrap(); + let pox_pubkey_hash_2 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_2) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_3 = Secp256k1PublicKey::from_hex( + "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", + ) + .unwrap(); + let pox_pubkey_hash_3 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_3) + .to_bytes() + .to_vec(), + ); + + let (mut conf, _) = neon_integration_test_conf(); + + // we'll manually post a forked stream to the node + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs.push(StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: epoch_2_2, + end_height: STACKS_EPOCH_MAX, + block_limit: epochs[3].block_limit.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }); + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // stack right away + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_1 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let pox_addr_tuple_3 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let tx = make_contract_call( + &spender_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_1.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.05 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_1); + assert!(sort_height > v1_unlock_height); + + // *now* advance to 2.1 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.1"); + + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_2 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + let tx = make_contract_call( + &spender_sk, + 1, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_2_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_3.clone(), + Value::UInt(sort_height as u128), + Value::UInt(10), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..5 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // invoke stack-increase + let tx = make_contract_call( + &spender_sk, + 2, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-increase", + &[Value::UInt(increase_by.into())], + ); + + info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + for _i in 0..15 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // invoke stack-increase again, in Epoch-2.2, it should + // runtime abort + let aborted_increase_nonce = 3; + let tx = make_contract_call( + &spender_sk, + aborted_increase_nonce, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-increase", + &[Value::UInt(5000)], + ); + + info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // finish the cycle after the 2.2 transition, + // and mine two more cycles + for _i in 0..25 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + let tip_info = get_chain_info(&conf); + let tip = StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + let mut reward_cycle_pox_addrs = HashMap::new(); + + info!("Last tip height = {}", tip_info.burn_block_height); + + for height in 211..tip_info.burn_block_height { + let reward_cycle = pox_constants + .block_height_to_reward_cycle(burnchain_config.first_block_height, height) + .unwrap(); + + if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { + reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); + } + + let iconn = sortdb.index_conn(); + let pox_addrs = chainstate + .clarity_eval_read_only( + &iconn, + &tip, + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", height), + ) + .expect_optional() + .unwrap() + .expect_tuple() + .get_owned("addrs") + .unwrap() + .expect_list(); + + debug!("Test burnchain height {}", height); + if !burnchain_config.is_in_prepare_phase(height) { + if pox_addrs.len() > 0 { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_0, 1); + } + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_1, 1); + } + } + + // let mut have_expected_payout = false; + // if height < epoch_2_1 + (reward_cycle_len as u64) { + // for addr_tuple in pox_addrs { + // // can either pay to pox tuple 1, or burn + // assert_ne!(addr_tuple, pox_addr_tuple_2); + // if addr_tuple == pox_addr_tuple_1 { + // have_expected_payout = true; + // } + // } + // } + // } else { + // if pox_addrs.len() > 0 { + // assert_eq!(pox_addrs.len(), 2); + // for addr_tuple in pox_addrs { + // // can either pay to pox tuple 2, or burn + // assert_ne!(addr_tuple, pox_addr_tuple_1); + // if addr_tuple == pox_addr_tuple_2 { + // have_expected_payout = true; + // } + // } + // } + // } + // assert!(have_expected_payout); + } + } + + let reward_cycle_min = *reward_cycle_pox_addrs.keys().min().unwrap(); + let reward_cycle_max = *reward_cycle_pox_addrs.keys().max().unwrap(); + + let pox_addr_1 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_1).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_2 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_2).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_3 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_3).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let burn_pox_addr = PoxAddress::Standard( + StacksAddress::new( + 26, + Hash160::from_hex("0000000000000000000000000000000000000000").unwrap(), + ), + Some(AddressHashMode::SerializeP2PKH), + ); + + let expected_slots = HashMap::from([ + ( + 21u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 22u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 23u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + // cycle 24 is the first 2.1, it should have pox_2 and pox_3 with equal + // slots (because increase hasn't gone into effect yet) and 2 burn slots + ( + 24, + HashMap::from([ + (pox_addr_2.clone(), 6u64), + (pox_addr_3.clone(), 6), + (burn_pox_addr.clone(), 2), + ]), + ), + // stack-increase has been invoked, and so the reward set is skewed. + // pox_addr_2 should get the majority of slots (~ 67%) + ( + 25, + HashMap::from([ + (pox_addr_2.clone(), 9u64), + (pox_addr_3.clone(), 4), + (burn_pox_addr.clone(), 1), + ]), + ), + // Epoch 2.2 has started, so the reward set should be fixed. + // pox_addr_2 should get 1 extra slot, because stack-increase + // did increase their stacked amount + (26, HashMap::from([(burn_pox_addr.clone(), 14)])), + (27, HashMap::from([(burn_pox_addr.clone(), 14)])), + ]); + + for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { + let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + for (pox_addr, slots) in cycle_counts.iter() { + assert_eq!( + *slots, + expected_slots[&reward_cycle][&pox_addr], + "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", + &pox_addr, + reward_cycle, + ); + info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); + } + } + + let mut abort_tested = false; + let blocks = test_observer::get_blocks(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); + if &tx_sender == &spender_addr + && parsed.auth.get_origin_nonce() == aborted_increase_nonce + { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + _ => panic!("Expected aborted_increase_nonce to be a contract call"), + }; + assert_eq!(contract_call.contract_name.as_str(), "pox-2"); + assert_eq!(contract_call.function_name.as_str(), "stack-increase"); + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + assert_eq!(result.to_string(), "(err none)"); + abort_tested = true; + } + } + } + + assert!(abort_tested, "The stack-increase transaction must have been aborted, and it must have been tested in the tx receipts"); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + +#[test] +#[ignore] +/// Verify that it is acceptable to launch PoX-2 at the end of a reward cycle, and set v1 unlock +/// height to be at the start of the subsequent reward cycle. +/// +/// Verify that PoX-1 stackers continue to receive PoX payouts after v1 unlock height, and that +/// PoX-2 stackers only begin receiving rewards at the start of the reward cycle following the one +/// that contains v1 unlock height. +/// +/// Verify that both of the above work even if miners do not mine in the same block as the PoX-2 +/// start height or v1 unlock height (e.g. suppose there's a delay). +/// +/// Verify that pox-2 locked funds unlock in Epoch-2.2 +/// +fn pox_2_unlock_all() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 5; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 222; + let v1_unlock_height = epoch_2_1 + 1; + let epoch_2_2 = 239; // one block before a prepare phase + + let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + + let spender_sk = StacksPrivateKey::new(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let spender_2_sk = StacksPrivateKey::new(); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + + let mut initial_balances = vec![]; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: stacked + 100_000, + }); + + initial_balances.push(InitialBalance { + address: spender_2_addr.clone(), + amount: stacked + 100_000, + }); + + let pox_pubkey_1 = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash_1 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_1) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_2 = Secp256k1PublicKey::from_hex( + "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", + ) + .unwrap(); + let pox_pubkey_hash_2 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_2) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_3 = Secp256k1PublicKey::from_hex( + "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", + ) + .unwrap(); + let pox_pubkey_hash_3 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_3) + .to_bytes() + .to_vec(), + ); + + let (mut conf, _) = neon_integration_test_conf(); + + // we'll manually post a forked stream to the node + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs.push(StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: epoch_2_2, + end_height: STACKS_EPOCH_MAX, + block_limit: epochs[3].block_limit.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }); + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // stack right away + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_1 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let pox_addr_tuple_3 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let tx = make_contract_call( + &spender_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_1.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.05 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_1); + assert!(sort_height > v1_unlock_height); + + // *now* advance to 2.1 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.1"); + + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_2 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + let tx = make_contract_call( + &spender_sk, + 1, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_2_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_3.clone(), + Value::UInt(sort_height as u128), + Value::UInt(10), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + + // advance to 1 block before 2.2 activation + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_2 - 1 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + let spender_1_account = get_account(&http_origin, &spender_addr); + let spender_2_account = get_account(&http_origin, &spender_2_addr); + + info!("spender_1_account = {:?}", spender_1_account); + info!("spender_2_account = {:?}", spender_1_account); + + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + + // finish the cycle after the 2.2 transition, + // and mine two more cycles + for _i in 0..14 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + let tip_info = get_chain_info(&conf); + let tip = StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + let mut reward_cycle_pox_addrs = HashMap::new(); + + info!("Last tip height = {}", tip_info.burn_block_height); + + for height in 211..tip_info.burn_block_height { + let reward_cycle = pox_constants + .block_height_to_reward_cycle(burnchain_config.first_block_height, height) + .unwrap(); + + if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { + reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); + } + + let iconn = sortdb.index_conn(); + let pox_addrs = chainstate + .clarity_eval_read_only( + &iconn, + &tip, + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", height), + ) + .expect_optional() + .unwrap() + .expect_tuple() + .get_owned("addrs") + .unwrap() + .expect_list(); + + debug!("Test burnchain height {}", height); + if !burnchain_config.is_in_prepare_phase(height) { + if pox_addrs.len() > 0 { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_0, 1); + } + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_1, 1); + } + } + } + } + + let reward_cycle_min = *reward_cycle_pox_addrs.keys().min().unwrap(); + let reward_cycle_max = *reward_cycle_pox_addrs.keys().max().unwrap(); + + let pox_addr_1 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_1).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_2 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_2).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_3 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_3).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let burn_pox_addr = PoxAddress::Standard( + StacksAddress::new( + 26, + Hash160::from_hex("0000000000000000000000000000000000000000").unwrap(), + ), + Some(AddressHashMode::SerializeP2PKH), + ); + + let expected_slots = HashMap::from([ + ( + 21u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 22u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 23u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + // cycle 24 is the first 2.1, it should have pox_2 and pox_3 with equal + // slots (because increase hasn't gone into effect yet) and 2 burn slots + ( + 24, + HashMap::from([ + (pox_addr_2.clone(), 6u64), + (pox_addr_3.clone(), 6), + (burn_pox_addr.clone(), 2), + ]), + ), + // stack-increase has been invoked, and so the reward set is skewed. + // pox_addr_2 should get the majority of slots (~ 67%) + ( + 25, + HashMap::from([ + (pox_addr_2.clone(), 9u64), + (pox_addr_3.clone(), 4), + (burn_pox_addr.clone(), 1), + ]), + ), + // Epoch 2.2 has started, so the reward set should be fixed. + // pox_addr_2 should get 1 extra slot, because stack-increase + // did increase their stacked amount + (26, HashMap::from([(burn_pox_addr.clone(), 14)])), + (27, HashMap::from([(burn_pox_addr.clone(), 14)])), + ]); + + for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { + let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + for (pox_addr, slots) in cycle_counts.iter() { + // assert_eq!( + // *slots, + // expected_slots[&reward_cycle][&pox_addr], + // "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", + // &pox_addr, + // reward_cycle, + // ); + info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); + } + } + + let mut abort_tested = false; + let blocks = test_observer::get_blocks(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); + } + } + + assert!(abort_tested, "The stack-increase transaction must have been aborted, and it must have been tested in the tx receipts"); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} From 466d38741464ebf84fec60d2a365b3a3991177fb Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Apr 2023 15:46:13 -0500 Subject: [PATCH 050/158] add transfer, lookup, mempool checks to pox_2_unlock_all --- .github/workflows/bitcoin-tests.yml | 2 + testnet/stacks-node/src/tests/epoch_22.rs | 207 ++++++++++++++++------ 2 files changed, 158 insertions(+), 51 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index cdd494e598..3886682b54 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -98,6 +98,8 @@ jobs: - tests::epoch_21::test_v1_unlock_height_with_current_stackers - tests::epoch_21::test_v1_unlock_height_with_delay_and_current_stackers - tests::epoch_21::trait_invocation_cross_epoch + - tests::epoch_22::pox_2_unlock_all + - tests::epoch_22::disable_pox - tests::neon_integrations::bad_microblock_pubkey steps: - uses: actions/checkout@v2 diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 2735fdf883..21cd425a71 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -56,7 +56,7 @@ use stacks_common::types::chainstate::StacksBlockId; /// /// Verification works using expected number of slots for burn and various PoX addresses. /// -fn pox_2_stack_increase_epoch22_fix() { +fn disable_pox() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -640,8 +640,15 @@ fn pox_2_unlock_all() { let spender_2_sk = StacksPrivateKey::new(); let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + let spender_3_sk = StacksPrivateKey::new(); + let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); + let mut initial_balances = vec![]; + let spender_1_initial_balance = stacked + 100_000; + let spender_2_initial_balance = stacked + 100_000; + let tx_fee = 3000; + initial_balances.push(InitialBalance { address: spender_addr.clone(), amount: stacked + 100_000, @@ -797,7 +804,7 @@ fn pox_2_unlock_all() { let tx = make_contract_call( &spender_sk, 0, - 3000, + tx_fee, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox", "stack-stx", @@ -845,7 +852,7 @@ fn pox_2_unlock_all() { let tx = make_contract_call( &spender_sk, 1, - 3000, + tx_fee, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -863,7 +870,7 @@ fn pox_2_unlock_all() { let tx = make_contract_call( &spender_2_sk, 0, - 3000, + tx_fee, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -890,17 +897,134 @@ fn pox_2_unlock_all() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); } + // this block activates 2.2 + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + + // this *burn block* is when the unlock occurs + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); info!("spender_1_account = {:?}", spender_1_account); info!("spender_2_account = {:?}", spender_1_account); + assert_eq!( + spender_1_account.balance as u64, + spender_1_initial_balance - stacked - (2 * tx_fee), + "Spender 1 should still be locked" + ); + assert_eq!( + spender_1_account.locked as u64, stacked, + "Spender 1 should still be locked" + ); + assert_eq!( + spender_1_account.nonce, 2, + "Spender 1 should have two accepted transactions" + ); + + assert_eq!( + spender_2_account.balance as u64, + spender_2_initial_balance - stacked - (1 * tx_fee), + "Spender 2 should still be locked" + ); + assert_eq!( + spender_2_account.locked as u64, stacked, + "Spender 2 should still be locked" + ); + assert_eq!( + spender_2_account.nonce, 1, + "Spender 2 should have two accepted transactions" + ); + + // and this block is the first block whose parent has >= unlock burn block + // (which is the criterion for the unlock) next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + let spender_1_account = get_account(&http_origin, &spender_addr); + let spender_2_account = get_account(&http_origin, &spender_2_addr); + + info!("spender_1_account = {:?}", spender_1_account); + info!("spender_2_account = {:?}", spender_1_account); + + assert_eq!( + spender_1_account.balance, + spender_1_initial_balance as u128 - (2 * tx_fee as u128), + "Spender 1 should be unlocked" + ); + assert_eq!(spender_1_account.locked, 0, "Spender 1 should be unlocked"); + assert_eq!( + spender_1_account.nonce, 2, + "Spender 1 should have two accepted transactions" + ); + + assert_eq!( + spender_2_account.balance, + spender_2_initial_balance as u128 - (1 * tx_fee as u128), + "Spender 2 should be unlocked" + ); + assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); + assert_eq!( + spender_2_account.nonce, 1, + "Spender 2 should have two accepted transactions" + ); + + // perform a transfer + let tx = make_stacks_transfer(&spender_sk, 2, tx_fee, &spender_3_addr, 1_000_000); + + info!("Submit stack transfer tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // this wakes up the node to mine the transaction + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + // this block selects the previously mined block + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + + let spender_1_account = get_account(&http_origin, &spender_addr); + let spender_2_account = get_account(&http_origin, &spender_2_addr); + let spender_3_account = get_account(&http_origin, &spender_3_addr); + + info!("spender_1_account = {:?}", spender_1_account); + info!("spender_2_account = {:?}", spender_1_account); + + assert_eq!( + spender_3_account.balance, 1_000_000, + "Recipient account should have funds" + ); + assert_eq!( + spender_3_account.locked, 0, + "Burn account should be unlocked" + ); + assert_eq!( + spender_3_account.nonce, 0, + "Burn should have no accepted transactions" + ); + + assert_eq!( + spender_1_account.balance, + spender_1_initial_balance as u128 - (3 * tx_fee as u128) - 1_000_000, + "Spender 1 should be unlocked" + ); + assert_eq!(spender_1_account.locked, 0, "Spender 1 should be unlocked"); + assert_eq!( + spender_1_account.nonce, 3, + "Spender 1 should have three accepted transactions" + ); + + assert_eq!( + spender_2_account.balance, + spender_2_initial_balance as u128 - (1 * tx_fee as u128), + "Spender 2 should be unlocked" + ); + assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); + assert_eq!( + spender_2_account.nonce, 1, + "Spender 2 should have two accepted transactions" + ); + // finish the cycle after the 2.2 transition, // and mine two more cycles - for _i in 0..14 { + for _i in 0..10 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let tip_info = get_chain_info(&conf); if tip_info.burn_block_height > last_block_height { @@ -1010,61 +1134,44 @@ fn pox_2_unlock_all() { ); let expected_slots = HashMap::from([ + (42u64, HashMap::from([(pox_addr_1.clone(), 4u64)])), + (43, HashMap::from([(pox_addr_1.clone(), 4)])), + (44, HashMap::from([(pox_addr_1.clone(), 4)])), + // cycle 45 is the first 2.1, and in the setup of this test, there's not + // enough time for the stackers to begin in this cycle + (45, HashMap::from([(burn_pox_addr.clone(), 4)])), + (46, HashMap::from([(burn_pox_addr.clone(), 4)])), ( - 21u64, - HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), - ), - ( - 22u64, - HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), - ), - ( - 23u64, - HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), - ), - // cycle 24 is the first 2.1, it should have pox_2 and pox_3 with equal - // slots (because increase hasn't gone into effect yet) and 2 burn slots - ( - 24, - HashMap::from([ - (pox_addr_2.clone(), 6u64), - (pox_addr_3.clone(), 6), - (burn_pox_addr.clone(), 2), - ]), - ), - // stack-increase has been invoked, and so the reward set is skewed. - // pox_addr_2 should get the majority of slots (~ 67%) - ( - 25, - HashMap::from([ - (pox_addr_2.clone(), 9u64), - (pox_addr_3.clone(), 4), - (burn_pox_addr.clone(), 1), - ]), + 47, + HashMap::from([(pox_addr_2.clone(), 2), (pox_addr_3.clone(), 2)]), ), - // Epoch 2.2 has started, so the reward set should be fixed. - // pox_addr_2 should get 1 extra slot, because stack-increase - // did increase their stacked amount - (26, HashMap::from([(burn_pox_addr.clone(), 14)])), - (27, HashMap::from([(burn_pox_addr.clone(), 14)])), + // Now 2.2 is active, everything should be a burn. + (48, HashMap::from([(burn_pox_addr.clone(), 4)])), + (49, HashMap::from([(burn_pox_addr.clone(), 4)])), + (50, HashMap::from([(burn_pox_addr.clone(), 4)])), ]); for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { - let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; + let cycle_counts = match reward_cycle_pox_addrs.get(&reward_cycle) { + Some(x) => x, + None => { + info!("No reward cycle entry = {}", reward_cycle); + continue; + } + }; assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); for (pox_addr, slots) in cycle_counts.iter() { - // assert_eq!( - // *slots, - // expected_slots[&reward_cycle][&pox_addr], - // "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - // &pox_addr, - // reward_cycle, - // ); + assert_eq!( + *slots, + expected_slots[&reward_cycle][&pox_addr], + "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", + &pox_addr, + reward_cycle, + ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } } - let mut abort_tested = false; let blocks = test_observer::get_blocks(); for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); @@ -1080,8 +1187,6 @@ fn pox_2_unlock_all() { } } - assert!(abort_tested, "The stack-increase transaction must have been aborted, and it must have been tested in the tx receipts"); - test_observer::clear(); channel.stop_chains_coordinator(); } From 78fc3f831c258fceb16baca22166e701b1548e87 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 24 Apr 2023 11:18:10 -0500 Subject: [PATCH 051/158] choose 2_431_300 as testnet 2.2 height --- src/core/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 1013f9fd5f..ce6ed3017e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -114,7 +114,7 @@ pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = "000000000000010dd0863ec3d7a0bae17c1957ae1de9cbcdae8e77aad33e3b8c"; pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_104_380; pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; -pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_432_000; +pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_431_300; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; From 4a3ef720e87fd0561ad84b8ef65493c4aab591c9 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 24 Apr 2023 14:45:09 -0500 Subject: [PATCH 052/158] activate 2.2 on testnet --- src/core/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index ce6ed3017e..d902c5f276 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -268,7 +268,7 @@ lazy_static! { } lazy_static! { - pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 4] = [ + pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 5] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -297,6 +297,13 @@ lazy_static! { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2 + }, ]; } From 1d09e2fe446b66b75af28fd1a3a281075a497aa2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 24 Apr 2023 14:51:55 -0500 Subject: [PATCH 053/158] address refactoring review comments --- src/clarity_vm/clarity.rs | 55 +++++++++++------------ src/core/mod.rs | 2 +- testnet/stacks-node/src/tests/epoch_22.rs | 24 ---------- 3 files changed, 28 insertions(+), 53 deletions(-) diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index eef15c6159..071185650b 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -887,34 +887,6 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { }) } - pub fn initialize_epoch_2_2(&mut self) -> Result, Error> { - // use the `using!` statement to ensure that the old cost_tracker is placed - // back in all branches after initialization - using!(self.cost_track, "cost tracker", |old_cost_tracker| { - // epoch initialization is *free*. - // NOTE: this also means that cost functions won't be evaluated. - // This is important because pox-2 is instantiated before costs-3. - self.cost_track.replace(LimitedCostTracker::new_free()); - self.epoch = StacksEpochId::Epoch22; - self.as_transaction(|tx_conn| { - // bump the epoch in the Clarity DB - tx_conn - .with_clarity_db(|db| { - db.set_clarity_epoch_version(StacksEpochId::Epoch22); - Ok(()) - }) - .unwrap(); - - // require 2.2 rules henceforth in this connection as well - tx_conn.epoch = StacksEpochId::Epoch22; - }); - - debug!("Epoch 2.2 initialized"); - - (old_cost_tracker, Ok(vec![])) - }) - } - pub fn initialize_epoch_2_1(&mut self) -> Result, Error> { // use the `using!` statement to ensure that the old cost_tracker is placed // back in all branches after initialization @@ -1110,6 +1082,33 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { }) } + pub fn initialize_epoch_2_2(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch22; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch22); + Ok(()) + }) + .unwrap(); + + // require 2.2 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch22; + }); + + debug!("Epoch 2.2 initialized"); + + (old_cost_tracker, Ok(vec![])) + }) + } + pub fn start_transaction_processing<'c>(&'c mut self) -> ClarityTransactionConnection<'c, 'a> { let store = &mut self.datastore; let cost_track = &mut self.cost_track; diff --git a/src/core/mod.rs b/src/core/mod.rs index d902c5f276..a87ffc273f 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -649,7 +649,7 @@ impl StacksEpochExtension for StacksEpoch { read_count: 210210, runtime: 210210, }, - network_epoch: PEER_VERSION_EPOCH_2_1, + network_epoch: PEER_VERSION_EPOCH_2_2, }, ] } diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 21cd425a71..494166bed3 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -462,30 +462,6 @@ fn disable_pox() { .insert(pox_addr_1, 1); } } - - // let mut have_expected_payout = false; - // if height < epoch_2_1 + (reward_cycle_len as u64) { - // for addr_tuple in pox_addrs { - // // can either pay to pox tuple 1, or burn - // assert_ne!(addr_tuple, pox_addr_tuple_2); - // if addr_tuple == pox_addr_tuple_1 { - // have_expected_payout = true; - // } - // } - // } - // } else { - // if pox_addrs.len() > 0 { - // assert_eq!(pox_addrs.len(), 2); - // for addr_tuple in pox_addrs { - // // can either pay to pox tuple 2, or burn - // assert_ne!(addr_tuple, pox_addr_tuple_1); - // if addr_tuple == pox_addr_tuple_2 { - // have_expected_payout = true; - // } - // } - // } - // } - // assert!(have_expected_payout); } } From 9c95b4fe6274028512e1b9c97370c3ac51c37e9a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 24 Apr 2023 16:34:39 -0500 Subject: [PATCH 054/158] fix testnet 2.1 end height --- src/core/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index a87ffc273f..cd8b12873d 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -293,7 +293,7 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch21, start_height: BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1 }, From f9e85d292467d11498d7385f832c5b78947a1fc6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 25 Apr 2023 10:35:13 -0500 Subject: [PATCH 055/158] add pox_v2_unlock_height to "/new_block" event --- testnet/stacks-node/src/event_dispatcher.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 137ed6f3b0..0c49b7f2fa 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -394,6 +394,7 @@ impl EventObserver { "anchored_cost": anchored_consumed, "confirmed_microblocks_cost": mblock_confirmed_consumed, "pox_v1_unlock_height": pox_constants.v1_unlock_height, + "pox_v2_unlock_height": pox_constants.v2_unlock_height, }) } } From db007310e10c3f7eaa7935a369007f222a09cd7e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 25 Apr 2023 14:15:41 -0500 Subject: [PATCH 056/158] fix comments from PR review --- src/clarity_vm/special.rs | 1 - testnet/stacks-node/src/tests/epoch_22.rs | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/clarity_vm/special.rs b/src/clarity_vm/special.rs index c4ed5b9003..4c88dcb98a 100644 --- a/src/clarity_vm/special.rs +++ b/src/clarity_vm/special.rs @@ -994,7 +994,6 @@ pub fn handle_contract_call_special_cases( } else if *contract_id == boot_code_id(POX_2_NAME, global_context.mainnet) { if !is_pox_v2_read_only(function_name) && global_context.epoch_id >= StacksEpochId::Epoch22 { - // NOTE: get-pox-info is read-only, so it can call old pox v1 stuff warn!("PoX-2 function call attempted on an account after Epoch 2.2"; "v2_unlock_ht" => global_context.database.get_v2_unlock_height(), "current_burn_ht" => global_context.database.get_current_burnchain_block_height(), diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 494166bed3..7ffcc1b58e 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -521,9 +521,7 @@ fn disable_pox() { (burn_pox_addr.clone(), 1), ]), ), - // Epoch 2.2 has started, so the reward set should be fixed. - // pox_addr_2 should get 1 extra slot, because stack-increase - // did increase their stacked amount + // Epoch 2.2 has started, so the reward set should be all burns. (26, HashMap::from([(burn_pox_addr.clone(), 14)])), (27, HashMap::from([(burn_pox_addr.clone(), 14)])), ]); From 86dfaeb11cf8b4adc4ef15b531d58cbda45caddf Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 25 Apr 2023 14:21:04 -0500 Subject: [PATCH 057/158] set epoch-2.2 activation height according to sip-022 --- src/core/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index cd8b12873d..5bc8d7753c 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -106,7 +106,8 @@ pub const BITCOIN_MAINNET_FIRST_BLOCK_HASH: &str = pub const BITCOIN_MAINNET_INITIAL_REWARD_START_BLOCK: u64 = 651389; pub const BITCOIN_MAINNET_STACKS_2_05_BURN_HEIGHT: u64 = 713_000; pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; -pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_700; +/// This is Epoch-2.2 activation height proposed in SIP-022 +pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; From cfeb5849a10cf8d6aa19d9ed854167b11fd4f022 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 25 Apr 2023 14:30:46 -0500 Subject: [PATCH 058/158] upgrade debian dockerfiles from stretch (eol) to buster (current stable) --- Dockerfile.stretch | 4 ++-- build-scripts/Dockerfile.linux-arm64 | 2 +- build-scripts/Dockerfile.linux-armv7 | 2 +- build-scripts/Dockerfile.linux-musl-x64 | 2 +- build-scripts/Dockerfile.linux-x64 | 2 +- build-scripts/Dockerfile.macos-arm64 | 2 +- build-scripts/Dockerfile.macos-x64 | 2 +- build-scripts/Dockerfile.windows-x64 | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Dockerfile.stretch b/Dockerfile.stretch index 7f5148dfec..45b8f9387f 100644 --- a/Dockerfile.stretch +++ b/Dockerfile.stretch @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' @@ -14,7 +14,7 @@ RUN cd testnet/stacks-node && cargo build --features monitoring_prom,slog_json - RUN cp target/release/stacks-node /out -FROM debian:stretch-slim +FROM debian:buster-slim RUN apt update && apt install -y netcat COPY --from=build /out/ /bin/ diff --git a/build-scripts/Dockerfile.linux-arm64 b/build-scripts/Dockerfile.linux-arm64 index 7acc30f6bf..f74f5ada08 100644 --- a/build-scripts/Dockerfile.linux-arm64 +++ b/build-scripts/Dockerfile.linux-arm64 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.linux-armv7 b/build-scripts/Dockerfile.linux-armv7 index 9fb50d18bc..f882221a4e 100644 --- a/build-scripts/Dockerfile.linux-armv7 +++ b/build-scripts/Dockerfile.linux-armv7 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.linux-musl-x64 b/build-scripts/Dockerfile.linux-musl-x64 index 9c6c604341..a6ae3edc6a 100644 --- a/build-scripts/Dockerfile.linux-musl-x64 +++ b/build-scripts/Dockerfile.linux-musl-x64 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.linux-x64 b/build-scripts/Dockerfile.linux-x64 index b4abb08aed..3c3e92b61b 100644 --- a/build-scripts/Dockerfile.linux-x64 +++ b/build-scripts/Dockerfile.linux-x64 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.macos-arm64 b/build-scripts/Dockerfile.macos-arm64 index 56cfe684a3..f56dd2e25a 100644 --- a/build-scripts/Dockerfile.macos-arm64 +++ b/build-scripts/Dockerfile.macos-arm64 @@ -1,4 +1,4 @@ -FROM rust:bullseye as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.macos-x64 b/build-scripts/Dockerfile.macos-x64 index 29038b6967..c4fe7a5a58 100644 --- a/build-scripts/Dockerfile.macos-x64 +++ b/build-scripts/Dockerfile.macos-x64 @@ -1,4 +1,4 @@ -FROM rust:bullseye as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.windows-x64 b/build-scripts/Dockerfile.windows-x64 index 58785ccba7..1063154934 100644 --- a/build-scripts/Dockerfile.windows-x64 +++ b/build-scripts/Dockerfile.windows-x64 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' From e4976c87ee3a0d322ee32209726e9732184734c3 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 25 Apr 2023 14:54:20 -0500 Subject: [PATCH 059/158] disable macos builds until osxcross sources can be updated --- .github/workflows/ci.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d1a04a194f..cba92ecc46 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -114,8 +114,9 @@ jobs: platform: [ windows-x64, - macos-x64, - macos-arm64, + # disable mac builds until osxcross can be updated for buster + # macos-x64, + # macos-arm64, linux-x64, linux-musl-x64, linux-armv7, From a27e10a0445eed6bf2071ccc295191b2fdfb3106 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 25 Apr 2023 15:02:56 -0500 Subject: [PATCH 060/158] disable windows builds until cross-compilation sources can be updated --- .github/workflows/ci.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cba92ecc46..5ebd947d84 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -113,8 +113,8 @@ jobs: matrix: platform: [ - windows-x64, - # disable mac builds until osxcross can be updated for buster + # disable mac, windows builds until osxcross can be updated for buster + # windows-x64, # macos-x64, # macos-arm64, linux-x64, @@ -303,9 +303,10 @@ jobs: matrix: platform: [ - windows-x64, - macos-x64, - macos-arm64, + # disable mac, windows builds until osxcross can be updated for buster +# windows-x64, +# macos-x64, +# macos-arm64, linux-x64, linux-musl-x64, linux-armv7, From 51a019cd1ec83c4832da56acacd4ffdc8e957b2a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 25 Apr 2023 14:30:46 -0500 Subject: [PATCH 061/158] upgrade debian dockerfiles from stretch (eol) to buster (current stable) disable macos builds until osxcross sources can be updated disable windows builds until cross-compilation sources can be updated --- .github/workflows/ci.yml | 14 ++++++++------ Dockerfile.stretch | 4 ++-- build-scripts/Dockerfile.linux-arm64 | 2 +- build-scripts/Dockerfile.linux-armv7 | 2 +- build-scripts/Dockerfile.linux-musl-x64 | 2 +- build-scripts/Dockerfile.linux-x64 | 2 +- build-scripts/Dockerfile.macos-arm64 | 2 +- build-scripts/Dockerfile.macos-x64 | 2 +- build-scripts/Dockerfile.windows-x64 | 2 +- 9 files changed, 17 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d1a04a194f..5ebd947d84 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -113,9 +113,10 @@ jobs: matrix: platform: [ - windows-x64, - macos-x64, - macos-arm64, + # disable mac, windows builds until osxcross can be updated for buster + # windows-x64, + # macos-x64, + # macos-arm64, linux-x64, linux-musl-x64, linux-armv7, @@ -302,9 +303,10 @@ jobs: matrix: platform: [ - windows-x64, - macos-x64, - macos-arm64, + # disable mac, windows builds until osxcross can be updated for buster +# windows-x64, +# macos-x64, +# macos-arm64, linux-x64, linux-musl-x64, linux-armv7, diff --git a/Dockerfile.stretch b/Dockerfile.stretch index 7f5148dfec..45b8f9387f 100644 --- a/Dockerfile.stretch +++ b/Dockerfile.stretch @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' @@ -14,7 +14,7 @@ RUN cd testnet/stacks-node && cargo build --features monitoring_prom,slog_json - RUN cp target/release/stacks-node /out -FROM debian:stretch-slim +FROM debian:buster-slim RUN apt update && apt install -y netcat COPY --from=build /out/ /bin/ diff --git a/build-scripts/Dockerfile.linux-arm64 b/build-scripts/Dockerfile.linux-arm64 index 7acc30f6bf..f74f5ada08 100644 --- a/build-scripts/Dockerfile.linux-arm64 +++ b/build-scripts/Dockerfile.linux-arm64 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.linux-armv7 b/build-scripts/Dockerfile.linux-armv7 index 9fb50d18bc..f882221a4e 100644 --- a/build-scripts/Dockerfile.linux-armv7 +++ b/build-scripts/Dockerfile.linux-armv7 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.linux-musl-x64 b/build-scripts/Dockerfile.linux-musl-x64 index 9c6c604341..a6ae3edc6a 100644 --- a/build-scripts/Dockerfile.linux-musl-x64 +++ b/build-scripts/Dockerfile.linux-musl-x64 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.linux-x64 b/build-scripts/Dockerfile.linux-x64 index b4abb08aed..3c3e92b61b 100644 --- a/build-scripts/Dockerfile.linux-x64 +++ b/build-scripts/Dockerfile.linux-x64 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.macos-arm64 b/build-scripts/Dockerfile.macos-arm64 index 56cfe684a3..f56dd2e25a 100644 --- a/build-scripts/Dockerfile.macos-arm64 +++ b/build-scripts/Dockerfile.macos-arm64 @@ -1,4 +1,4 @@ -FROM rust:bullseye as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.macos-x64 b/build-scripts/Dockerfile.macos-x64 index 29038b6967..c4fe7a5a58 100644 --- a/build-scripts/Dockerfile.macos-x64 +++ b/build-scripts/Dockerfile.macos-x64 @@ -1,4 +1,4 @@ -FROM rust:bullseye as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.windows-x64 b/build-scripts/Dockerfile.windows-x64 index 58785ccba7..1063154934 100644 --- a/build-scripts/Dockerfile.windows-x64 +++ b/build-scripts/Dockerfile.windows-x64 @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:buster as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' From 50fc803435e34b4b95026eca614651cce433c951 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 26 Apr 2023 11:20:49 -0500 Subject: [PATCH 062/158] ci: use stable (bullseye) for debian dockerfiles --- .github/workflows/ci.yml | 10 +++++----- .github/workflows/docker-platforms.yml | 2 +- Dockerfile.stretch => Dockerfile.debian | 4 ++-- build-scripts/Dockerfile.linux-arm64 | 2 +- build-scripts/Dockerfile.linux-armv7 | 2 +- build-scripts/Dockerfile.linux-musl-x64 | 2 +- build-scripts/Dockerfile.linux-x64 | 2 +- build-scripts/Dockerfile.macos-arm64 | 2 +- build-scripts/Dockerfile.macos-x64 | 2 +- build-scripts/Dockerfile.windows-x64 | 2 +- 10 files changed, 15 insertions(+), 15 deletions(-) rename Dockerfile.stretch => Dockerfile.debian (87%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5ebd947d84..0ec84fe2e0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -113,8 +113,8 @@ jobs: matrix: platform: [ - # disable mac, windows builds until osxcross can be updated for buster - # windows-x64, + # disable mac builds until osxcross can be updated for bullseye + windows-x64, # macos-x64, # macos-arm64, linux-x64, @@ -258,7 +258,7 @@ jobs: uses: docker/build-push-action@v2 with: platforms: linux/amd64 - file: Dockerfile.stretch + file: Dockerfile.debian tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: | @@ -303,8 +303,8 @@ jobs: matrix: platform: [ - # disable mac, windows builds until osxcross can be updated for buster -# windows-x64, + # disable mac builds until osxcross can be updated for bullseye + windows-x64, # macos-x64, # macos-arm64, linux-x64, diff --git a/.github/workflows/docker-platforms.yml b/.github/workflows/docker-platforms.yml index 7ee44b3eda..d8d7efa4c3 100644 --- a/.github/workflows/docker-platforms.yml +++ b/.github/workflows/docker-platforms.yml @@ -115,7 +115,7 @@ jobs: uses: docker/build-push-action@v2 with: platforms: ${{ env.BUILD_PLATFORMS }} - file: Dockerfile.stretch + file: Dockerfile.debian tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: | diff --git a/Dockerfile.stretch b/Dockerfile.debian similarity index 87% rename from Dockerfile.stretch rename to Dockerfile.debian index 45b8f9387f..4b9a56b8c5 100644 --- a/Dockerfile.stretch +++ b/Dockerfile.debian @@ -1,4 +1,4 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' @@ -14,7 +14,7 @@ RUN cd testnet/stacks-node && cargo build --features monitoring_prom,slog_json - RUN cp target/release/stacks-node /out -FROM debian:buster-slim +FROM debian:bullseye-slim RUN apt update && apt install -y netcat COPY --from=build /out/ /bin/ diff --git a/build-scripts/Dockerfile.linux-arm64 b/build-scripts/Dockerfile.linux-arm64 index f74f5ada08..072fee58b7 100644 --- a/build-scripts/Dockerfile.linux-arm64 +++ b/build-scripts/Dockerfile.linux-arm64 @@ -1,4 +1,4 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.linux-armv7 b/build-scripts/Dockerfile.linux-armv7 index f882221a4e..d871c03dc0 100644 --- a/build-scripts/Dockerfile.linux-armv7 +++ b/build-scripts/Dockerfile.linux-armv7 @@ -1,4 +1,4 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.linux-musl-x64 b/build-scripts/Dockerfile.linux-musl-x64 index a6ae3edc6a..c0a31b190d 100644 --- a/build-scripts/Dockerfile.linux-musl-x64 +++ b/build-scripts/Dockerfile.linux-musl-x64 @@ -1,4 +1,4 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.linux-x64 b/build-scripts/Dockerfile.linux-x64 index 3c3e92b61b..b451b6f427 100644 --- a/build-scripts/Dockerfile.linux-x64 +++ b/build-scripts/Dockerfile.linux-x64 @@ -1,4 +1,4 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.macos-arm64 b/build-scripts/Dockerfile.macos-arm64 index f56dd2e25a..56cfe684a3 100644 --- a/build-scripts/Dockerfile.macos-arm64 +++ b/build-scripts/Dockerfile.macos-arm64 @@ -1,4 +1,4 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.macos-x64 b/build-scripts/Dockerfile.macos-x64 index c4fe7a5a58..29038b6967 100644 --- a/build-scripts/Dockerfile.macos-x64 +++ b/build-scripts/Dockerfile.macos-x64 @@ -1,4 +1,4 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' diff --git a/build-scripts/Dockerfile.windows-x64 b/build-scripts/Dockerfile.windows-x64 index 1063154934..923217b122 100644 --- a/build-scripts/Dockerfile.windows-x64 +++ b/build-scripts/Dockerfile.windows-x64 @@ -1,4 +1,4 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' From a376fecfbd7b6845c8b72794dcf1ec1638363b87 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 26 Apr 2023 11:26:04 -0500 Subject: [PATCH 063/158] update stale comment --- clarity/src/vm/database/clarity_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 9b5e124e78..950ccd2910 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -761,7 +761,7 @@ impl<'a> ClarityDatabase<'a> { self.burn_state_db.get_v1_unlock_height() } - /// Return the height for PoX v1 -> v2 auto unlocks + /// Return the height for PoX v2 -> v3 auto unlocks /// from the burn state db pub fn get_v2_unlock_height(&self) -> u32 { self.burn_state_db.get_v2_unlock_height() From d7827f7cfd20346b67055a3330d4dfbdfa8a0c07 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 26 Apr 2023 15:23:22 -0500 Subject: [PATCH 064/158] add test, address PR comment feedback --- .github/workflows/bitcoin-tests.yml | 1 + src/chainstate/burn/db/sortdb.rs | 6 + testnet/stacks-node/src/tests/epoch_21.rs | 2 +- testnet/stacks-node/src/tests/epoch_22.rs | 440 ++++++++++++++++++++++ 4 files changed, 448 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 3886682b54..66adabc451 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -100,6 +100,7 @@ jobs: - tests::epoch_21::trait_invocation_cross_epoch - tests::epoch_22::pox_2_unlock_all - tests::epoch_22::disable_pox + - tests::epoch_22::test_pox_reorg_one_flap - tests::neon_integrations::bad_microblock_pubkey steps: - uses: actions/checkout@v2 diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 9a68cf17f7..c0eae3046e 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -702,6 +702,8 @@ const SORTITION_DB_SCHEMA_4: &'static [&'static str] = &[ );"#, ]; +/// The changes for version five *just* replace the existing epochs table +/// by deleting all the current entries and inserting the new epochs definition. const SORTITION_DB_SCHEMA_5: &'static [&'static str] = &[r#" DELETE FROM epochs;"#]; @@ -2660,6 +2662,7 @@ impl SortitionDB { SortitionDB::apply_schema_2(&db_tx, epochs_ref)?; SortitionDB::apply_schema_3(&db_tx)?; SortitionDB::apply_schema_4(&db_tx)?; + SortitionDB::apply_schema_5(&db_tx, epochs_ref)?; db_tx.instantiate_index()?; @@ -2928,6 +2931,9 @@ impl SortitionDB { } fn apply_schema_5(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + // the schema 5 changes simply **replace** the contents of the epochs table + // by dropping all the current rows and then revalidating and inserting + // `epochs` for sql_exec in SORTITION_DB_SCHEMA_5 { tx.execute_batch(sql_exec)?; } diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index b6087f17c0..a996fda07a 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1985,7 +1985,7 @@ fn transition_empty_blocks() { } /// Check to see if there are stragglers between a set of nodes syncing -fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: u64) { +pub fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: u64) { loop { let mut straggler = false; let mut stacks_tip_ch = None; diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 7ffcc1b58e..3c3ebb7b51 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -5,16 +5,21 @@ use std::thread; use stacks::burnchains::Burnchain; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::signal_mining_blocked; +use stacks::chainstate::stacks::miner::signal_mining_ready; use stacks::core::PEER_VERSION_EPOCH_2_2; use stacks::core::STACKS_EPOCH_MAX; use stacks::types::chainstate::StacksAddress; +use stacks::types::PrivateKey; use crate::config::EventKeyType; use crate::config::EventObserverConfig; use crate::config::InitialBalance; use crate::neon; +use crate::neon_node::StacksNode; use crate::node::get_account_balances; use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::tests::epoch_21::wait_pox_stragglers; use crate::tests::neon_integrations::*; use crate::tests::*; use crate::BitcoinRegtestController; @@ -1164,3 +1169,438 @@ fn pox_2_unlock_all() { test_observer::clear(); channel.stop_chains_coordinator(); } + +/// PoX reorg with just one flap. Epoch 2.2 activates during bootup +/// Miner 0 mines and hides the anchor block for cycle 22. +/// Miner 1 mines and hides the anchor block for cycle 23, causing a PoX reorg in miner 0. +/// At the very end, miners stop hiding their blocks, and the test verifies that both miners +/// converge on having anchor blocks for cycles 22 and 24, but not 23. +#[test] +#[ignore] +fn test_pox_reorg_one_flap() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_miners = 2; + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let v1_unlock_height = 152; + let epoch_2_2 = 175; + let v2_unlock_height = epoch_2_2 + 1; + + let (mut conf_template, _) = neon_integration_test_conf(); + let block_time_ms = 10_000; + conf_template.node.mine_microblocks = true; + conf_template.miner.microblock_attempt_time_ms = 2_000; + conf_template.node.wait_time_for_microblocks = 0; + conf_template.node.microblock_frequency = 0; + conf_template.miner.first_attempt_time_ms = 2_000; + conf_template.miner.subsequent_attempt_time_ms = 5_000; + conf_template.burnchain.max_rbf = 1000000; + conf_template.node.wait_time_for_blocks = 1_000; + conf_template.burnchain.pox_2_activation = Some(v1_unlock_height); + + conf_template.node.require_affirmed_anchor_blocks = false; + + // make epoch 2.1 and 2.2 start in the middle of boot-up + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = 101; + epochs[2].start_height = 101; + epochs[2].end_height = 151; + epochs[3].start_height = 151; + epochs[3].end_height = epoch_2_2; + epochs.push(StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: epoch_2_2, + end_height: STACKS_EPOCH_MAX, + block_limit: epochs[3].block_limit.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }); + + conf_template.burnchain.epochs = Some(epochs); + + let privks: Vec<_> = (0..5) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + + let stack_privks: Vec<_> = (0..5) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + + let balances: Vec<_> = privks + .iter() + .map(|privk| { + let addr = to_addr(privk); + InitialBalance { + address: addr.into(), + amount: 30_000_000, + } + }) + .collect(); + + let stack_balances: Vec<_> = stack_privks + .iter() + .map(|privk| { + let addr = to_addr(privk); + InitialBalance { + address: addr.into(), + amount: 2_000_000_000_000_000, + } + }) + .collect(); + + let mut confs = vec![]; + let mut burnchain_configs = vec![]; + let mut blocks_processed = vec![]; + let mut channels = vec![]; + let mut miner_status = vec![]; + + for i in 0..num_miners { + let seed = StacksPrivateKey::new().to_bytes(); + let (mut conf, _) = neon_integration_test_conf_with_seed(seed); + + conf.initial_balances.clear(); + conf.initial_balances.append(&mut balances.clone()); + conf.initial_balances.append(&mut stack_balances.clone()); + + conf.node.mine_microblocks = conf_template.node.mine_microblocks; + conf.miner.microblock_attempt_time_ms = conf_template.miner.microblock_attempt_time_ms; + conf.node.wait_time_for_microblocks = conf_template.node.wait_time_for_microblocks; + conf.node.microblock_frequency = conf_template.node.microblock_frequency; + conf.miner.first_attempt_time_ms = conf_template.miner.first_attempt_time_ms; + conf.miner.subsequent_attempt_time_ms = conf_template.miner.subsequent_attempt_time_ms; + conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; + conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; + conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.node.require_affirmed_anchor_blocks = + conf_template.node.require_affirmed_anchor_blocks; + + // multiple nodes so they must download from each other + conf.miner.wait_for_block_download = true; + + // nodes will selectively hide blocks from one another + conf.node.fault_injection_hide_blocks = true; + + let rpc_port = 41063 + 10 * i; + let p2p_port = 41063 + 10 * i + 1; + conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); + conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); + conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + + confs.push(conf); + } + + let node_privkey_1 = + StacksNode::make_node_private_key_from_seed(&confs[0].node.local_peer_seed); + for i in 1..num_miners { + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + + confs[i].node.set_bootstrap_nodes( + format!( + "{}@{}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex(), + p2p_bind + ), + chain_id, + peer_version, + ); + } + + // use short reward cycles + for i in 0..num_miners { + let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + (1600 * reward_cycle_len - 1).into(), + (1700 * reward_cycle_len).into(), + v1_unlock_height, + v2_unlock_height.try_into().unwrap(), + ); + burnchain_config.pox_constants = pox_constants.clone(); + + burnchain_configs.push(burnchain_config); + } + + let mut btcd_controller = BitcoinCoreController::new(confs[0].clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + confs[0].clone(), + None, + Some(burnchain_configs[0].clone()), + None, + ); + + btc_regtest_controller.bootstrap_chain(1); + + // make sure all miners have BTC + for i in 1..num_miners { + let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); + btc_regtest_controller + .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + btc_regtest_controller.bootstrap_chain(1); + btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); + } + + btc_regtest_controller.bootstrap_chain((199 - num_miners) as u64); + + eprintln!("Chain bootstrapped..."); + + for (i, burnchain_config) in burnchain_configs.into_iter().enumerate() { + let mut run_loop = neon::RunLoop::new(confs[i].clone()); + let blocks_processed_arc = run_loop.get_blocks_processed_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); + let this_miner_status = run_loop.get_miner_status(); + + blocks_processed.push(blocks_processed_arc); + channels.push(channel); + miner_status.push(this_miner_status); + + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); + } + + let http_origin = format!("http://{}", &confs[0].node.rpc_bind); + + // give the run loops some time to start up! + for i in 0..num_miners { + wait_for_runloop(&blocks_processed[i as usize]); + } + + // activate miners + eprintln!("\n\nBoot miner 0\n\n"); + loop { + let tip_info_opt = get_chain_info_opt(&confs[0]); + if let Some(tip_info) = tip_info_opt { + eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + if tip_info.stacks_tip_height > 0 { + break; + } + } else { + eprintln!("\n\nWaiting for miner 0...\n\n"); + } + next_block_and_iterate( + &mut btc_regtest_controller, + &blocks_processed[0], + block_time_ms, + ); + } + + for i in 1..num_miners { + eprintln!("\n\nBoot miner {}\n\n", i); + loop { + let tip_info_opt = get_chain_info_opt(&confs[i]); + if let Some(tip_info) = tip_info_opt { + eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + if tip_info.stacks_tip_height > 0 { + break; + } + } else { + eprintln!("\n\nWaiting for miner {}...\n\n", i); + } + next_block_and_iterate( + &mut btc_regtest_controller, + &blocks_processed[i as usize], + 5_000, + ); + } + } + + eprintln!("\n\nBegin transactions\n\n"); + + let pox_pubkey = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey) + .to_bytes() + .to_vec(), + ); + + let sort_height = channels[0].get_sortitions_processed(); + + // make everyone stack + let stacking_txs: Vec<_> = stack_privks + .iter() + .enumerate() + .map(|(_i, pk)| { + make_contract_call( + pk, + 0, + 1360, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(2_000_000_000_000_000 - 30_000_000), + execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + ClarityVersion::Clarity1, + ) + .unwrap() + .unwrap(), + Value::UInt((sort_height + 1) as u128), + Value::UInt(12), + ], + ) + }) + .collect(); + + // keeps the mempool full, and makes it so miners will spend a nontrivial amount of time + // building blocks + let all_txs: Vec<_> = privks + .iter() + .enumerate() + .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .collect(); + + // everyone locks up + let mut cnt = 0; + for tx in stacking_txs { + eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + submit_tx(&http_origin, &tx); + cnt += 1; + } + + // run a reward cycle + let mut at_220 = false; + while !at_220 { + btc_regtest_controller.build_next_block(1); + sleep_ms(block_time_ms); + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + if tip_info.burn_block_height == 220 { + at_220 = true; + } + } + } + + // blast out the rest + let mut cnt = 0; + for tx_chain in all_txs { + for tx in tx_chain { + eprintln!("\n\nSubmit tx {}\n\n", &cnt); + submit_tx(&http_origin, &tx); + cnt += 1; + } + } + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + assert!(tip_info.burn_block_height <= 220); + } + + eprintln!("\n\nBegin mining\n\n"); + + info!("####################### end of cycle ##############################"); + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + info!("####################### end of cycle ##############################"); + + // prevent Stacks at these heights from propagating + env::set_var( + "STACKS_HIDE_BLOCKS_AT_HEIGHT", + "[226,227,228,229,230,236,237,238,239,240,246,247,248,249,250,256,257,258,259,260,266,267,268,269,270,276,277,278,279,280,286,287,288,289,290]" + ); + + // miner 0 mines a prepare phase and confirms a hidden anchor block. + // miner 1 is disabled for these prepare phases + for i in 0..10 { + eprintln!("\n\nBuild block {}\n\n", i); + btc_regtest_controller.build_next_block(1); + sleep_ms(block_time_ms); + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + + if i >= reward_cycle_len - prepare_phase_len - 2 { + signal_mining_blocked(miner_status[1].clone()); + } + } + signal_mining_ready(miner_status[1].clone()); + + info!("####################### end of cycle ##############################"); + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + info!("####################### end of cycle ##############################"); + + // miner 1 mines a prepare phase and confirms a hidden anchor block. + // miner 0 is disabled for this prepare phase + for i in 0..10 { + eprintln!("\n\nBuild block {}\n\n", i); + btc_regtest_controller.build_next_block(1); + sleep_ms(block_time_ms); + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + + if i >= reward_cycle_len - prepare_phase_len - 2 { + signal_mining_blocked(miner_status[0].clone()); + } + } + signal_mining_ready(miner_status[0].clone()); + + info!("####################### end of cycle ##############################"); + let mut max_stacks_tip = 0; + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + + // miner 1's history overtakes miner 0's. + // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle + // 23 and affirmed cycle 22's anchor block's absence. + max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); + } + info!("####################### end of cycle ##############################"); + + // advance to start of next reward cycle + eprintln!("\n\nBuild final block\n\n"); + btc_regtest_controller.build_next_block(1); + sleep_ms(block_time_ms); + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + + // resume block propagation + env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); + + // wait for all blocks to propagate + eprintln!( + "Wait for all blocks to propagate; stacks tip height is {}", + max_stacks_tip + ); + wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); + + // nodes now agree on stacks affirmation map + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Final tip for miner {}: {:?}", i, &tip_info); + } +} From c522153bf5eb1778b0de8d34777ab3d2b5414cc4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 27 Apr 2023 03:43:12 -0500 Subject: [PATCH 065/158] add epoch gate to claritydb::get_v2_unlock_height() --- clarity/src/vm/database/clarity_db.rs | 8 ++++++-- clarity/src/vm/database/structures.rs | 12 ++++++------ clarity/src/vm/functions/assets.rs | 4 ++-- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 950ccd2910..b90d401fbd 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -763,8 +763,12 @@ impl<'a> ClarityDatabase<'a> { /// Return the height for PoX v2 -> v3 auto unlocks /// from the burn state db - pub fn get_v2_unlock_height(&self) -> u32 { - self.burn_state_db.get_v2_unlock_height() + pub fn get_v2_unlock_height(&mut self) -> u32 { + if self.get_clarity_epoch_version() >= StacksEpochId::Epoch22 { + self.burn_state_db.get_v2_unlock_height() + } else { + u32::MAX + } } /// Get the last-known burnchain block height. diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 40472bb350..8dcd6924ae 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -322,7 +322,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { Ok(()) } - pub fn get_available_balance(&self) -> u128 { + pub fn get_available_balance(&mut self) -> u128 { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); self.balance.get_available_balance_at_burn_block( @@ -332,7 +332,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { ) } - pub fn canonical_balance_repr(&self) -> STXBalance { + pub fn canonical_balance_repr(&mut self) -> STXBalance { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); self.balance @@ -340,7 +340,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { .0 } - pub fn has_locked_tokens(&self) -> bool { + pub fn has_locked_tokens(&mut self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); self.balance.has_locked_tokens_at_burn_block( @@ -350,7 +350,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { ) } - pub fn has_unlockable_tokens(&self) -> bool { + pub fn has_unlockable_tokens(&mut self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); self.balance.has_unlockable_tokens_at_burn_block( @@ -360,7 +360,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { ) } - pub fn can_transfer(&self, amount: u128) -> bool { + pub fn can_transfer(&mut self, amount: u128) -> bool { self.get_available_balance() >= amount } @@ -425,7 +425,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { /// Return true iff `self` represents a snapshot that has a lock /// created by PoX v2. - pub fn is_v2_locked(&self) -> bool { + pub fn is_v2_locked(&mut self) -> bool { match self.canonical_balance_repr() { STXBalance::Unlocked { .. } => false, STXBalance::LockedPoxOne { .. } => false, diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index b54c8b1b5f..6906543d2a 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -106,7 +106,7 @@ pub fn special_stx_balance( if let Value::Principal(ref principal) = owner { let balance = { - let snapshot = env + let mut snapshot = env .global_context .database .get_stx_balance_snapshot(principal); @@ -149,7 +149,7 @@ pub fn stx_transfer_consolidated( env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; - let sender_snapshot = env.global_context.database.get_stx_balance_snapshot(from); + let mut sender_snapshot = env.global_context.database.get_stx_balance_snapshot(from); if !sender_snapshot.can_transfer(amount) { return clarity_ecode!(StxErrorCodes::NOT_ENOUGH_BALANCE); } From 6f5700327f1dd18b71767c46762608a07c84a9a6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 23 Mar 2023 13:34:34 -0500 Subject: [PATCH 066/158] build: use optimizations in packages for dev builds --- Cargo.toml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b930bacb78..4af1002184 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -109,8 +109,16 @@ monitoring_prom = ["prometheus"] slog_json = ["slog-json", "stacks_common/slog_json", "clarity/slog_json"] testing = [] -[profile.dev.package.regex] -opt-level = 2 +# Use a bit more than default optimization for +# dev builds to speed up test execution +[profile.dev] +opt-level = 1 + +# Use release-level optimization for dependencies +# This slows down "first" builds on development environments, +# but won't impact subsequent builds. +[profile.dev.package."*"] +opt-level = 3 [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(target_env = "msvc")))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } From dd71dded7aee8ccd7e3d1d14417348e193c94236 Mon Sep 17 00:00:00 2001 From: Diwaker Gupta <15990+diwakergupta@users.noreply.github.com> Date: Thu, 27 Apr 2023 13:39:49 -0400 Subject: [PATCH 067/158] chore: update CHANGELOG. --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f1edd70e25..1565193d90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.2.0.0.1] + +This is a **consensus-breaking** release to address a bug and DoS vector in `stacks-increase`. +Additional context and rationale can be found in [SIP-022](https://github.com/stacksgov/sips/blob/main/sips/sip-022/sip-022-emergency-pox-fix.md). + +This release is compatible with chainstate directories from 2.1.0.0.x. + ## [2.1.0.0.3] This is a high-priority hotfix release to address a bug in the From 472649ca31b8388b25bba1f8ff18c286daa74db6 Mon Sep 17 00:00:00 2001 From: Diwaker Gupta <15990+diwakergupta@users.noreply.github.com> Date: Thu, 27 Apr 2023 16:12:24 -0400 Subject: [PATCH 068/158] Update CHANGELOG.md Co-authored-by: Brice Dobry --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1565193d90..a1e2c9657a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [2.2.0.0.1] -This is a **consensus-breaking** release to address a bug and DoS vector in `stacks-increase`. +This is a **consensus-breaking** release to address a bug and DoS vector in pox-2's `stack-increase` function. Additional context and rationale can be found in [SIP-022](https://github.com/stacksgov/sips/blob/main/sips/sip-022/sip-022-emergency-pox-fix.md). This release is compatible with chainstate directories from 2.1.0.0.x. From e6ffcdcb6173dfe704ded55ea938b18362d4fa3a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 27 Apr 2023 15:29:46 -0500 Subject: [PATCH 069/158] add integration test --- testnet/stacks-node/src/tests/epoch_22.rs | 100 +++++++++++++++++++--- 1 file changed, 88 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 3c3ebb7b51..b679c93e2b 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -828,10 +828,20 @@ fn pox_2_unlock_all() { ) .unwrap() .unwrap(); - let tx = make_contract_call( + + let tx = make_contract_publish( &spender_sk, 1, tx_fee, + "unlock-height", + "(define-public (unlock-height (x principal)) (ok (get unlock-height (stx-account x))))", + ); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_sk, + 2, + tx_fee, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), "pox-2", "stack-stx", @@ -867,18 +877,46 @@ fn pox_2_unlock_all() { // that it can mine _at all_ is a success criterion let mut last_block_height = get_chain_info(&conf).burn_block_height; - // advance to 1 block before 2.2 activation + // advance to 2 blocks before 2.2 activation loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_2 - 1 { + if tip_info.burn_block_height >= epoch_2_2 - 2 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); } + let tx = make_contract_call( + &spender_sk, + 3, + tx_fee, + &to_addr(&spender_sk), + "unlock-height", + "unlock-height", + &[spender_addr.clone().into()], + ); + + submit_tx(&http_origin, &tx); + let nonce_of_2_1_unlock_ht_call = 3; + // this is the last block before 2.2 activates + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + // this block activates 2.2 next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + let tx = make_contract_call( + &spender_sk, + 4, + tx_fee, + &to_addr(&spender_sk), + "unlock-height", + "unlock-height", + &[spender_addr.clone().into()], + ); + + submit_tx(&http_origin, &tx); + let nonce_of_2_2_unlock_ht_call = 4; + // this *burn block* is when the unlock occurs next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); @@ -890,7 +928,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_1_account.balance as u64, - spender_1_initial_balance - stacked - (2 * tx_fee), + spender_1_initial_balance - stacked - (4 * tx_fee), "Spender 1 should still be locked" ); assert_eq!( @@ -898,8 +936,8 @@ fn pox_2_unlock_all() { "Spender 1 should still be locked" ); assert_eq!( - spender_1_account.nonce, 2, - "Spender 1 should have two accepted transactions" + spender_1_account.nonce, 4, + "Spender 1 should have 4 accepted transactions" ); assert_eq!( @@ -928,13 +966,13 @@ fn pox_2_unlock_all() { assert_eq!( spender_1_account.balance, - spender_1_initial_balance as u128 - (2 * tx_fee as u128), + spender_1_initial_balance as u128 - (5 * tx_fee as u128), "Spender 1 should be unlocked" ); assert_eq!(spender_1_account.locked, 0, "Spender 1 should be unlocked"); assert_eq!( - spender_1_account.nonce, 2, - "Spender 1 should have two accepted transactions" + spender_1_account.nonce, 5, + "Spender 1 should have 5 accepted transactions" ); assert_eq!( @@ -949,7 +987,7 @@ fn pox_2_unlock_all() { ); // perform a transfer - let tx = make_stacks_transfer(&spender_sk, 2, tx_fee, &spender_3_addr, 1_000_000); + let tx = make_stacks_transfer(&spender_sk, 5, tx_fee, &spender_3_addr, 1_000_000); info!("Submit stack transfer tx to {:?}", &http_origin); submit_tx(&http_origin, &tx); @@ -981,12 +1019,12 @@ fn pox_2_unlock_all() { assert_eq!( spender_1_account.balance, - spender_1_initial_balance as u128 - (3 * tx_fee as u128) - 1_000_000, + spender_1_initial_balance as u128 - (6 * tx_fee as u128) - 1_000_000, "Spender 1 should be unlocked" ); assert_eq!(spender_1_account.locked, 0, "Spender 1 should be unlocked"); assert_eq!( - spender_1_account.nonce, 3, + spender_1_account.nonce, 6, "Spender 1 should have three accepted transactions" ); @@ -1151,6 +1189,9 @@ fn pox_2_unlock_all() { } } + let mut unlock_ht_22_tested = false; + let mut unlock_ht_21_tested = false; + let blocks = test_observer::get_blocks(); for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); @@ -1163,9 +1204,44 @@ fn pox_2_unlock_all() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); + if &tx_sender == &spender_addr + && parsed.auth.get_origin_nonce() == nonce_of_2_2_unlock_ht_call + { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + _ => panic!("Expected aborted_increase_nonce to be a contract call"), + }; + assert_eq!(contract_call.contract_name.as_str(), "unlock-height"); + assert_eq!(contract_call.function_name.as_str(), "unlock-height"); + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + assert_eq!(result.to_string(), format!("(ok u{})", epoch_2_2 + 1)); + unlock_ht_22_tested = true; + } + if &tx_sender == &spender_addr + && parsed.auth.get_origin_nonce() == nonce_of_2_1_unlock_ht_call + { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + _ => panic!("Expected aborted_increase_nonce to be a contract call"), + }; + assert_eq!(contract_call.contract_name.as_str(), "unlock-height"); + assert_eq!(contract_call.function_name.as_str(), "unlock-height"); + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + assert_eq!(result.to_string(), format!("(ok u{})", 225 + 60)); + unlock_ht_21_tested = true; + } } } + assert!(unlock_ht_21_tested); + assert!(unlock_ht_22_tested); + test_observer::clear(); channel.stop_chains_coordinator(); } From 225549ce72c03338afa57003d83ff325084efe69 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 27 Apr 2023 16:37:27 -0500 Subject: [PATCH 070/158] fix test --- testnet/stacks-node/src/tests/epoch_22.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index b679c93e2b..304825e2e0 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use std::env; use std::thread; +use std::time::Duration; use stacks::burnchains::Burnchain; use stacks::chainstate::stacks::address::PoxAddress; @@ -877,10 +878,10 @@ fn pox_2_unlock_all() { // that it can mine _at all_ is a success criterion let mut last_block_height = get_chain_info(&conf).burn_block_height; - // advance to 2 blocks before 2.2 activation + // advance to 3 blocks before 2.2 activation loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_2 - 2 { + if tip_info.burn_block_height >= epoch_2_2 - 3 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -898,10 +899,7 @@ fn pox_2_unlock_all() { submit_tx(&http_origin, &tx); let nonce_of_2_1_unlock_ht_call = 3; - // this is the last block before 2.2 activates - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); - - // this block activates 2.2 + // this will build the last block before 2.2 activates next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); let tx = make_contract_call( @@ -917,9 +915,15 @@ fn pox_2_unlock_all() { submit_tx(&http_origin, &tx); let nonce_of_2_2_unlock_ht_call = 4; + // this block activates 2.2 + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + // this *burn block* is when the unlock occurs next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + // and this will wake up the node + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); @@ -928,7 +932,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_1_account.balance as u64, - spender_1_initial_balance - stacked - (4 * tx_fee), + spender_1_initial_balance - stacked - (5 * tx_fee), "Spender 1 should still be locked" ); assert_eq!( @@ -936,7 +940,7 @@ fn pox_2_unlock_all() { "Spender 1 should still be locked" ); assert_eq!( - spender_1_account.nonce, 4, + spender_1_account.nonce, 5, "Spender 1 should have 4 accepted transactions" ); @@ -1233,7 +1237,7 @@ fn pox_2_unlock_all() { tx.get("raw_result").unwrap().as_str().unwrap(), ) .unwrap(); - assert_eq!(result.to_string(), format!("(ok u{})", 225 + 60)); + assert_eq!(result.to_string(), format!("(ok u{})", 230 + 60)); unlock_ht_21_tested = true; } } From 0abe7e82825523dc2983b9c9ab766693a174ce85 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 26 Apr 2023 12:45:56 -0500 Subject: [PATCH 071/158] introduce epoch 2.3 --- clarity/src/vm/analysis/mod.rs | 2 +- clarity/src/vm/analysis/type_checker/mod.rs | 4 +- clarity/src/vm/costs/mod.rs | 4 +- clarity/src/vm/functions/mod.rs | 2 + clarity/src/vm/types/signatures.rs | 8 +- clarity/src/vm/version.rs | 1 + src/chainstate/burn/db/sortdb.rs | 43 +++++++- .../burn/operations/leader_block_commit.rs | 4 +- src/chainstate/coordinator/mod.rs | 6 +- src/chainstate/stacks/db/blocks.rs | 41 ++++++-- src/chainstate/stacks/db/mod.rs | 1 + src/chainstate/stacks/db/transactions.rs | 1 + src/clarity_vm/clarity.rs | 27 +++++ src/core/mod.rs | 98 ++++++++++++++++++- src/cost_estimates/pessimistic.rs | 2 + stacks-common/src/types/mod.rs | 3 + testnet/stacks-node/src/neon_node.rs | 4 +- 17 files changed, 227 insertions(+), 24 deletions(-) diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 443fbc1954..d94e2fd1b3 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -137,7 +137,7 @@ pub fn run_analysis( StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { TypeChecker2_05::run_pass(&epoch, &mut contract_analysis, db) } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db) } StacksEpochId::Epoch10 => unreachable!("Epoch 1.0 is not a valid epoch for analysis"), diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index 8286cbdc87..bbcd9270cb 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -50,7 +50,7 @@ impl FunctionType { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { self.check_args_2_05(accounting, args) } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { self.check_args_2_1(accounting, args, clarity_version) } StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), @@ -68,7 +68,7 @@ impl FunctionType { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { self.check_args_by_allowing_trait_cast_2_05(db, func_args) } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index e8998d0759..dfb7cd81a6 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -699,7 +699,9 @@ impl LimitedCostTracker { } StacksEpochId::Epoch20 => COSTS_1_NAME.to_string(), StacksEpochId::Epoch2_05 => COSTS_2_NAME.to_string(), - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => COSTS_3_NAME.to_string(), + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { + COSTS_3_NAME.to_string() + } } } } diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 21e6fcdfb2..e2d0f5c6e4 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -58,6 +58,8 @@ macro_rules! switch_on_global_epoch { StacksEpochId::Epoch21 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 2.2. StacksEpochId::Epoch22 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 2.3. + StacksEpochId::Epoch23 => $Epoch205Version(args, env, context), } } }; diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 446a2cb5f2..b189897638 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -529,7 +529,9 @@ impl TypeSignature { pub fn admits_type(&self, epoch: &StacksEpochId, other: &TypeSignature) -> Result { match epoch { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => self.admits_type_v2_0(&other), - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => self.admits_type_v2_1(other), + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { + self.admits_type_v2_1(other) + } StacksEpochId::Epoch10 => unreachable!("epoch 1.0 not supported"), } } @@ -1045,7 +1047,9 @@ impl TypeSignature { ) -> Result { match epoch { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => Self::least_supertype_v2_0(a, b), - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => Self::least_supertype_v2_1(a, b), + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { + Self::least_supertype_v2_1(a, b) + } StacksEpochId::Epoch10 => unreachable!("Clarity 1.0 is not supported"), } } diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 3b667fd507..46ad1500aa 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -32,6 +32,7 @@ impl ClarityVersion { StacksEpochId::Epoch2_05 => ClarityVersion::Clarity1, StacksEpochId::Epoch21 => ClarityVersion::Clarity2, StacksEpochId::Epoch22 => ClarityVersion::Clarity2, + StacksEpochId::Epoch23 => ClarityVersion::Clarity2, } } } diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index c0eae3046e..3355191901 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -501,7 +501,7 @@ impl FromRow for StacksEpoch { } } -pub const SORTITION_DB_VERSION: &'static str = "5"; +pub const SORTITION_DB_VERSION: &'static str = "6"; const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ r#" @@ -707,6 +707,9 @@ const SORTITION_DB_SCHEMA_4: &'static [&'static str] = &[ const SORTITION_DB_SCHEMA_5: &'static [&'static str] = &[r#" DELETE FROM epochs;"#]; +const SORTITION_DB_SCHEMA_6: &'static [&'static str] = &[r#" + DELETE FROM epochs;"#]; + // update this to add new indexes const LAST_SORTITION_DB_INDEX: &'static str = "index_delegate_stx_burn_header_hash"; @@ -2663,6 +2666,7 @@ impl SortitionDB { SortitionDB::apply_schema_3(&db_tx)?; SortitionDB::apply_schema_4(&db_tx)?; SortitionDB::apply_schema_5(&db_tx, epochs_ref)?; + SortitionDB::apply_schema_6(&db_tx, epochs_ref)?; db_tx.instantiate_index()?; @@ -2856,12 +2860,24 @@ impl SortitionDB { || version == "3" || version == "4" || version == "5" + || version == "6" } StacksEpochId::Epoch2_05 => { - version == "2" || version == "3" || version == "4" || version == "5" + version == "2" + || version == "3" + || version == "4" + || version == "5" + || version == "6" + } + StacksEpochId::Epoch21 => { + version == "3" || version == "4" || version == "5" || version == "6" + } + StacksEpochId::Epoch22 => { + version == "3" || version == "4" || version == "5" || version == "6" + } + StacksEpochId::Epoch23 => { + version == "3" || version == "4" || version == "5" || version == "6" } - StacksEpochId::Epoch21 => version == "3" || version == "4" || version == "5", - StacksEpochId::Epoch22 => version == "3" || version == "4" || version == "5", } } @@ -2948,6 +2964,21 @@ impl SortitionDB { Ok(()) } + fn apply_schema_6(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + for sql_exec in SORTITION_DB_SCHEMA_6 { + tx.execute_batch(sql_exec)?; + } + + SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["6"], + )?; + + Ok(()) + } + fn check_schema_version_or_error(&mut self) -> Result<(), db_error> { match SortitionDB::get_schema_version(self.conn()) { Ok(Some(version)) => { @@ -2990,6 +3021,10 @@ impl SortitionDB { let tx = self.tx_begin()?; SortitionDB::apply_schema_5(&tx.deref(), epochs)?; tx.commit()?; + } else if version == "5" { + let tx = self.tx_begin()?; + SortitionDB::apply_schema_6(&tx.deref(), epochs)?; + tx.commit()?; } else if version == expected_version { return Ok(()); } else { diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index 2e9ece5cb3..6f00e186b3 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -39,6 +39,7 @@ use crate::chainstate::stacks::index::storage::TrieFileStorage; use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; use crate::codec::{write_next, Error as codec_error, StacksMessageCodec}; use crate::core::STACKS_EPOCH_2_2_MARKER; +use crate::core::STACKS_EPOCH_2_3_MARKER; use crate::core::{StacksEpoch, StacksEpochId}; use crate::core::{STACKS_EPOCH_2_05_MARKER, STACKS_EPOCH_2_1_MARKER}; use crate::net::Error as net_error; @@ -755,6 +756,7 @@ impl LeaderBlockCommitOp { StacksEpochId::Epoch2_05 => self.check_epoch_commit_marker(STACKS_EPOCH_2_05_MARKER), StacksEpochId::Epoch21 => self.check_epoch_commit_marker(STACKS_EPOCH_2_1_MARKER), StacksEpochId::Epoch22 => self.check_epoch_commit_marker(STACKS_EPOCH_2_2_MARKER), + StacksEpochId::Epoch23 => self.check_epoch_commit_marker(STACKS_EPOCH_2_3_MARKER), } } @@ -769,7 +771,7 @@ impl LeaderBlockCommitOp { ) -> Result { let tx_tip = tx.context.chain_tip.clone(); let intended_sortition = match epoch_id { - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { // correct behavior -- uses *sortition height* to find the intended sortition ID let sortition_height = self .block_height diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index c033254e07..16f8f30c1b 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -2991,8 +2991,10 @@ impl< return Ok(Some(pox_anchor)); } } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { - // 2.1 behavior: the anchor block must also be the + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 => { + // 2.1 and onward behavior: the anchor block must also be the // heaviest-confirmed anchor block by BTC weight, and the highest // such anchor block if there are multiple contenders. if let Some(pox_anchor) = diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 512a1cf4e0..440029166f 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -4892,6 +4892,13 @@ impl StacksChainState { receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); applied = true; } + StacksEpochId::Epoch23 => { + receipts.push(clarity_tx.block.initialize_epoch_2_05()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + applied = true; + } _ => { panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); } @@ -4906,21 +4913,41 @@ impl StacksChainState { receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); applied = true; } + StacksEpochId::Epoch23 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + applied = true; + } + _ => { + panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); + } + }, + StacksEpochId::Epoch21 => match sortition_epoch.epoch_id { + StacksEpochId::Epoch22 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + applied = true; + } + StacksEpochId::Epoch23 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + applied = true; + } _ => { panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); } }, - StacksEpochId::Epoch21 => { + StacksEpochId::Epoch22 => { assert_eq!( sortition_epoch.epoch_id, - StacksEpochId::Epoch22, - "Should only transition from Epoch21 to Epoch22" + StacksEpochId::Epoch23, + "Should only transition from Epoch22 to Epoch23" ); - receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); applied = true; } - StacksEpochId::Epoch22 => { - panic!("No defined transition from Epoch22 forward") + StacksEpochId::Epoch23 => { + panic!("No defined transition from Epoch23 forward") } } } @@ -5507,7 +5534,7 @@ impl StacksChainState { // The DelegateStx bitcoin wire format does not exist before Epoch 2.1. Ok((stack_ops, transfer_ops, vec![])) } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 => { + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, parent_index_hash, diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index 00b56892c8..faec4e7da2 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -224,6 +224,7 @@ impl DBConfig { } StacksEpochId::Epoch21 => self.version == "3" || self.version == "4", StacksEpochId::Epoch22 => self.version == "3" || self.version == "4", + StacksEpochId::Epoch23 => self.version == "3" || self.version == "4", } } } diff --git a/src/chainstate/stacks/db/transactions.rs b/src/chainstate/stacks/db/transactions.rs index 17868ddaab..e4461722d8 100644 --- a/src/chainstate/stacks/db/transactions.rs +++ b/src/chainstate/stacks/db/transactions.rs @@ -8356,6 +8356,7 @@ pub mod test { StacksEpochId::Epoch2_05 => self.get_stacks_epoch(1), StacksEpochId::Epoch21 => self.get_stacks_epoch(2), StacksEpochId::Epoch22 => self.get_stacks_epoch(3), + StacksEpochId::Epoch23 => self.get_stacks_epoch(4), } } fn get_pox_payout_addrs( diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index 071185650b..ac4f562f72 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -1109,6 +1109,33 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { }) } + pub fn initialize_epoch_2_3(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch23; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch23); + Ok(()) + }) + .unwrap(); + + // require 2.2 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch23; + }); + + debug!("Epoch 2.3 initialized"); + + (old_cost_tracker, Ok(vec![])) + }) + } + pub fn start_transaction_processing<'c>(&'c mut self) -> ClarityTransactionConnection<'c, 'a> { let store = &mut self.datastore; let cost_track = &mut self.cost_track; diff --git a/src/core/mod.rs b/src/core/mod.rs index 5bc8d7753c..75c53de712 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -58,6 +58,7 @@ pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; pub const PEER_VERSION_EPOCH_2_05: u8 = 0x05; pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; +pub const PEER_VERSION_EPOCH_2_3: u8 = 0x08; // network identifiers pub const NETWORK_ID_MAINNET: u32 = 0x17000000; @@ -108,6 +109,8 @@ pub const BITCOIN_MAINNET_STACKS_2_05_BURN_HEIGHT: u64 = 713_000; pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; /// This is Epoch-2.2 activation height proposed in SIP-022 pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; +/// This is Epoch-2.3 activation height proposed in SIP-022 +pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 789_751; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; @@ -229,7 +232,7 @@ pub fn check_fault_injection(fault_name: &str) -> bool { } lazy_static! { - pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 5] = [ + pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 6] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -261,10 +264,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch22, start_height: BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_2 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3 + }, ]; } @@ -353,6 +363,10 @@ pub static STACKS_EPOCH_2_1_MARKER: u8 = 0x06; /// *or greater*. pub static STACKS_EPOCH_2_2_MARKER: u8 = 0x07; +/// Stacks 2.3 epoch marker. All block-commits in 2.3 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_2_3_MARKER: u8 = 0x08; + #[test] fn test_ord_for_stacks_epoch() { let epochs = STACKS_EPOCHS_MAINNET.clone(); @@ -429,6 +443,8 @@ pub trait StacksEpochExtension { #[cfg(test)] fn unit_test_2_2(epoch_2_0_block_height: u64) -> Vec; #[cfg(test)] + fn unit_test_2_3(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, @@ -655,6 +671,83 @@ impl StacksEpochExtension for StacksEpoch { ] } + #[cfg(test)] + fn unit_test_2_3(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test_2_3 first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + ] + } + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( @@ -715,6 +808,7 @@ impl StacksEpochExtension for StacksEpoch { StacksEpochId::Epoch2_05 => StacksEpoch::unit_test_2_05(first_burnchain_height), StacksEpochId::Epoch21 => StacksEpoch::unit_test_2_1(first_burnchain_height), StacksEpochId::Epoch22 => StacksEpoch::unit_test_2_2(first_burnchain_height), + StacksEpochId::Epoch23 => StacksEpoch::unit_test_2_3(first_burnchain_height), } } diff --git a/src/cost_estimates/pessimistic.rs b/src/cost_estimates/pessimistic.rs index 4264151160..4fdf109792 100644 --- a/src/cost_estimates/pessimistic.rs +++ b/src/cost_estimates/pessimistic.rs @@ -232,6 +232,8 @@ impl PessimisticEstimator { StacksEpochId::Epoch21 => ":2.1", // reuse cost estimates in Epoch22 StacksEpochId::Epoch22 => ":2.1", + // reuse cost estimates in Epoch23 + StacksEpochId::Epoch23 => ":2.1", }; format!( "cc{}:{}:{}.{}", diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index f39691072f..679b100bb4 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -73,6 +73,7 @@ pub enum StacksEpochId { Epoch2_05 = 0x02005, Epoch21 = 0x0200a, Epoch22 = 0x0200f, + Epoch23 = 0x02014, } impl StacksEpochId { @@ -89,6 +90,7 @@ impl std::fmt::Display for StacksEpochId { StacksEpochId::Epoch2_05 => write!(f, "2.05"), StacksEpochId::Epoch21 => write!(f, "2.1"), StacksEpochId::Epoch22 => write!(f, "2.2"), + StacksEpochId::Epoch23 => write!(f, "2.3"), } } } @@ -103,6 +105,7 @@ impl TryFrom for StacksEpochId { x if x == StacksEpochId::Epoch2_05 as u32 => Ok(StacksEpochId::Epoch2_05), x if x == StacksEpochId::Epoch21 as u32 => Ok(StacksEpochId::Epoch21), x if x == StacksEpochId::Epoch22 as u32 => Ok(StacksEpochId::Epoch22), + x if x == StacksEpochId::Epoch23 as u32 => Ok(StacksEpochId::Epoch23), _ => Err("Invalid epoch"), } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 42da5ac074..5ad0ecc586 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -177,7 +177,7 @@ use stacks::chainstate::stacks::{ use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; -use stacks::core::STACKS_EPOCH_2_2_MARKER; +use stacks::core::STACKS_EPOCH_2_3_MARKER; use stacks::cost_estimates::metrics::CostMetric; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; @@ -1326,7 +1326,7 @@ impl BlockMinerThread { apparent_sender: sender, key_block_ptr: key.block_height as u32, key_vtxindex: key.op_vtxindex as u16, - memo: vec![STACKS_EPOCH_2_2_MARKER], + memo: vec![STACKS_EPOCH_2_3_MARKER], new_seed: vrf_seed, parent_block_ptr, parent_vtxindex, From 71cf5f7c53861d2fa10b51c92fd6b52c5c763889 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 1 May 2023 11:54:41 -0400 Subject: [PATCH 072/158] fix: preserve buggy canonicalize behavior in 2.2 --- clarity/src/vm/types/signatures.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index b189897638..be5149eac0 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -724,8 +724,11 @@ impl TypeSignature { /// types for the specified epoch. pub fn canonicalize(&self, epoch: &StacksEpochId) -> TypeSignature { match epoch { - StacksEpochId::Epoch21 => self.canonicalize_v2_1(), - _ => self.clone(), + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch22 => self.clone(), + StacksEpochId::Epoch21 | StacksEpochId::Epoch23 => self.canonicalize_v2_1(), } } From 282f89f19e7b789f4f023f10fc47236ab4c794cb Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 1 May 2023 11:27:08 -0500 Subject: [PATCH 073/158] test: add coverage for cross-epoch trait invocations for epochs 2.2 and 2.3 --- clarity/src/vm/tests/traits.rs | 1 + clarity/src/vm/types/signatures.rs | 2 + src/chainstate/stacks/boot/contract_tests.rs | 4 +- src/clarity_vm/tests/contracts.rs | 75 +++++++++++++++++++- 4 files changed, 78 insertions(+), 4 deletions(-) diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index 6e1e7fa2d0..b1ba00b9de 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -37,6 +37,7 @@ use crate::vm::ContractContext; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] fn test_epoch_clarity_versions(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} #[apply(test_epoch_clarity_versions)] diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index be5149eac0..00060c9d56 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -727,7 +727,9 @@ impl TypeSignature { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 + // Epoch-2.2 had a regression in canonicalization, so it must be preserved here. | StacksEpochId::Epoch22 => self.clone(), + // Note for future epochs: Epochs >= 2.3 should use the canonicalize_v2_1() routine StacksEpochId::Epoch21 | StacksEpochId::Epoch23 => self.canonicalize_v2_1(), } } diff --git a/src/chainstate/stacks/boot/contract_tests.rs b/src/chainstate/stacks/boot/contract_tests.rs index 5b565f64ae..55be1ff722 100644 --- a/src/chainstate/stacks/boot/contract_tests.rs +++ b/src/chainstate/stacks/boot/contract_tests.rs @@ -97,7 +97,7 @@ lazy_static! { pub struct ClarityTestSim { marf: MarfedKV, - height: u64, + pub height: u64, fork: u64, /// This vec specifies the transitions for each epoch. /// It is a list of heights at which the simulated chain transitions @@ -379,6 +379,8 @@ impl BurnStateDB for TestSimBurnStateDB { 0 => StacksEpochId::Epoch20, 1 => StacksEpochId::Epoch2_05, 2 => StacksEpochId::Epoch21, + 3 => StacksEpochId::Epoch22, + 4 => StacksEpochId::Epoch23, _ => panic!("Epoch unknown"), }; diff --git a/src/clarity_vm/tests/contracts.rs b/src/clarity_vm/tests/contracts.rs index 2f2cd2cf96..6e217ece65 100644 --- a/src/clarity_vm/tests/contracts.rs +++ b/src/clarity_vm/tests/contracts.rs @@ -400,7 +400,7 @@ fn trait_invocation_205_with_stored_principal() { #[test] fn trait_invocation_cross_epoch() { let mut sim = ClarityTestSim::new(); - sim.epoch_bounds = vec![0, 3, 5]; + sim.epoch_bounds = vec![0, 3, 5, 7, 9]; // Advance two blocks so we get to Stacks 2.05. sim.execute_next_block(|_env| {}); @@ -426,6 +426,7 @@ fn trait_invocation_cross_epoch() { let sender = StacksAddress::burn_address(false).into(); + info!("Sim height = {}", sim.height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); let clarity_version = ClarityVersion::default_for_epoch(epoch); @@ -434,6 +435,7 @@ fn trait_invocation_cross_epoch() { publish_contract(conn, &use_contract_id, use_contract, clarity_version).unwrap(); }); // Advance another block so we get to Stacks 2.1. This is the last block in 2.05 + info!("Sim height = {}", sim.height); sim.execute_next_block(|_| {}); // now in Stacks 2.1 sim.execute_next_block_as_conn(|conn| { @@ -443,6 +445,7 @@ fn trait_invocation_cross_epoch() { publish_contract(conn, &invoke_contract_id, invoke_contract, clarity_version).unwrap(); }); + info!("Sim height = {}", sim.height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -459,16 +462,82 @@ fn trait_invocation_cross_epoch() { }); }); + info!("Sim height = {}", sim.height); + // now in Stacks 2.2 sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { - clarity_db + let error = clarity_db .run_contract_call( &sender, None, &invoke_contract_id, "invocation-2", - &[Value::Principal(impl_contract_id.into())], + &[Value::Principal(impl_contract_id.clone().into())], + |_, _| false, + ) + .unwrap_err(); + + if let ClarityError::Interpreter(Error::Unchecked(CheckErrors::TypeValueError(TypeSignature::TraitReferenceType(_), value))) = error { + // pass + } else { + panic!("Expected an Interpreter(UncheckedError(TypeValue(TraitReferenceType, Principal))) during Epoch-2.2"); + }; + }); + }); + + info!("Sim height = {}", sim.height); + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + conn.as_transaction(|clarity_db| { + let error = clarity_db + .run_contract_call( + &sender, + None, + &invoke_contract_id, + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + |_, _| false, + ) + .unwrap_err(); + + if let ClarityError::Interpreter(Error::Unchecked(CheckErrors::TypeValueError(TypeSignature::TraitReferenceType(_), value))) = error { + // pass + } else { + panic!("Expected an Interpreter(UncheckedError(TypeValue(TraitReferenceType, Principal))) during Epoch-2.2"); + }; + }); + }); + + // should now be in Stacks 2.3, so the invocation should work again! + info!("Sim height = {}", sim.height); + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + conn.as_transaction(|clarity_db| { + clarity_db + .run_contract_call( + &sender, + None, + &invoke_contract_id, + "invocation-1", + &[], + |_, _| false, + ) + .unwrap(); + }); + }); + + info!("Sim height = {}", sim.height); + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + conn.as_transaction(|clarity_db| { + clarity_db + .run_contract_call( + &sender, + None, + &invoke_contract_id, + "invocation-1", + &[], |_, _| false, ) .unwrap(); From ba247b59f1bed80b7ad300bf16b185e519198498 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 1 May 2023 13:30:17 -0500 Subject: [PATCH 074/158] add more test rstest cases, update EpochId::latest() --- clarity/src/vm/analysis/trait_checker/tests.rs | 4 ++++ clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs | 4 ++++ clarity/src/vm/types/signatures.rs | 4 ++++ stacks-common/src/types/mod.rs | 2 +- 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/clarity/src/vm/analysis/trait_checker/tests.rs b/clarity/src/vm/analysis/trait_checker/tests.rs index d0bffe2c10..42f42777c0 100644 --- a/clarity/src/vm/analysis/trait_checker/tests.rs +++ b/clarity/src/vm/analysis/trait_checker/tests.rs @@ -34,6 +34,10 @@ use stacks_common::types::StacksEpochId; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_trait_checker( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 8fdfefc6b6..cdbdf8d85f 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -58,6 +58,10 @@ pub mod contracts; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_type_checker( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 00060c9d56..b2ec129a76 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -1941,6 +1941,10 @@ mod test { #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] + #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] + #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] + #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] + #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] fn test_clarity_versions_signatures( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 679b100bb4..f71cd7a475 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -78,7 +78,7 @@ pub enum StacksEpochId { impl StacksEpochId { pub fn latest() -> StacksEpochId { - StacksEpochId::Epoch22 + StacksEpochId::Epoch23 } } From fd9fece715c2516404405091cbc847c74f44139c Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 1 May 2023 14:16:11 -0500 Subject: [PATCH 075/158] test: both invocation-1 and invocation-2 --- src/clarity_vm/tests/contracts.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/clarity_vm/tests/contracts.rs b/src/clarity_vm/tests/contracts.rs index 6e217ece65..1e366346ce 100644 --- a/src/clarity_vm/tests/contracts.rs +++ b/src/clarity_vm/tests/contracts.rs @@ -397,6 +397,7 @@ fn trait_invocation_205_with_stored_principal() { } /// Publish a trait in epoch 2.05 and then invoke it in epoch 2.1. +/// Test the behaviors in 2.2 and 2.3 as well. #[test] fn trait_invocation_cross_epoch() { let mut sim = ClarityTestSim::new(); @@ -536,8 +537,8 @@ fn trait_invocation_cross_epoch() { &sender, None, &invoke_contract_id, - "invocation-1", - &[], + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], |_, _| false, ) .unwrap(); From c28183520757b54618001f9b88a85df10789c46f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 1 May 2023 15:03:06 -0500 Subject: [PATCH 076/158] bump block version --- src/chainstate/stacks/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/chainstate/stacks/mod.rs b/src/chainstate/stacks/mod.rs index e10460930e..4ed4169d4a 100644 --- a/src/chainstate/stacks/mod.rs +++ b/src/chainstate/stacks/mod.rs @@ -84,7 +84,7 @@ pub use stacks_common::address::{ C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; -pub const STACKS_BLOCK_VERSION: u8 = 5; +pub const STACKS_BLOCK_VERSION: u8 = 6; pub const STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE: u8 = 1; pub const MAX_BLOCK_LEN: u32 = 2 * 1024 * 1024; From 7a14537b4bd2710d110703157c48699130f0aa55 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 1 May 2023 17:18:32 -0500 Subject: [PATCH 077/158] test: add epoch_23::trait_invocation_behavior --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/epoch_23.rs | 626 ++++++++++++++++++++++ testnet/stacks-node/src/tests/mod.rs | 1 + 3 files changed, 628 insertions(+) create mode 100644 testnet/stacks-node/src/tests/epoch_23.rs diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 66adabc451..212ded7b76 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -101,6 +101,7 @@ jobs: - tests::epoch_22::pox_2_unlock_all - tests::epoch_22::disable_pox - tests::epoch_22::test_pox_reorg_one_flap + - tests::epoch_23::trait_invocation_behavior - tests::neon_integrations::bad_microblock_pubkey steps: - uses: actions/checkout@v2 diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs new file mode 100644 index 0000000000..1db3f922d8 --- /dev/null +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -0,0 +1,626 @@ +use std::collections::HashMap; +use std::env; +use std::thread; + +use stacks::burnchains::Burnchain; +use stacks::core::PEER_VERSION_EPOCH_2_2; +use stacks::core::PEER_VERSION_EPOCH_2_3; +use stacks::core::STACKS_EPOCH_MAX; +use stacks::vm::types::QualifiedContractIdentifier; + +use crate::config::EventKeyType; +use crate::config::EventObserverConfig; +use crate::config::InitialBalance; +use crate::neon; +use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::tests::neon_integrations::*; +use crate::tests::*; +use crate::BitcoinRegtestController; +use crate::BurnchainController; +use stacks::core; + +use stacks::burnchains::PoxConstants; + +use clarity::vm::types::PrincipalData; + +#[test] +#[ignore] +/// Test the trait invocation behavior for contracts instantiated in epoch 2.05 +/// * in epoch 2.1: the trait invocation works +/// * in epoch 2.2: trait invocation is broken, and returns a runtime error, even when wrapped +/// * in epoch 2.3: the trait invocation works +fn trait_invocation_behavior() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 230; + let v1_unlock_height = 231; + let epoch_2_2 = 235; + let epoch_2_3 = 241; + + let spender_sk = StacksPrivateKey::new(); + let contract_addr = to_addr(&spender_sk); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let impl_contract_id = + QualifiedContractIdentifier::new(contract_addr.clone().into(), "impl-simple".into()); + + let mut spender_nonce = 0; + let fee_amount = 10_000; + + let mut initial_balances = vec![]; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1_000_000, + }); + + let trait_contract = "(define-trait simple-method ((foo (uint) (response uint uint)) ))"; + let impl_contract = + "(impl-trait .simple-trait.simple-method) (define-read-only (foo (x uint)) (ok x))"; + let use_contract = "(use-trait simple .simple-trait.simple-method) + (define-public (call-simple (s )) (contract-call? s foo u0))"; + let invoke_contract = " + (use-trait simple .simple-trait.simple-method) + (define-public (invocation-1) + (contract-call? .use-simple call-simple .impl-simple)) + (define-public (invocation-2 (st )) + (contract-call? .use-simple call-simple st)) + "; + + let wrapper_contract = " + (use-trait simple .simple-trait.simple-method) + (define-public (invocation-1) + (contract-call? .invoke-simple invocation-1)) + (define-public (invocation-2 (st )) + (contract-call? .invoke-simple invocation-2 st)) + "; + + let (mut conf, _) = neon_integration_test_conf(); + + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs.push(StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: epoch_2_2, + end_height: epoch_2_3, + block_limit: epochs[3].block_limit.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }); + epochs.push(StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: epoch_2_3, + end_height: STACKS_EPOCH_MAX, + block_limit: epochs[3].block_limit.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }); + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // publish contracts right away! + let publish_trait = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "simple-trait", + trait_contract, + ); + + spender_nonce += 1; + + let publish_impl = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "impl-simple", + impl_contract, + ); + + spender_nonce += 1; + + let publish_use = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "use-simple", + use_contract, + ); + + spender_nonce += 1; + + let publish_invoke = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "invoke-simple", + invoke_contract, + ); + + spender_nonce += 1; + + info!("Submit 2.05 txs"); + submit_tx(&http_origin, &publish_trait); + submit_tx(&http_origin, &publish_impl); + submit_tx(&http_origin, &publish_use); + submit_tx(&http_origin, &publish_invoke); + + info!( + "At height = {}, epoch-2.1 = {}", + get_chain_info(&conf).burn_block_height, + epoch_2_1 + ); + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 3 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-1", + &[], + ); + let expected_good_205_1_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_205_2_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines bitcoin block epoch_2_1 - 2, and causes the the + // stacks node to mine the stacks block which will be included in + // epoch_2_1 - 1, so these are the last transactions processed pre-2.1. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-1", + &[], + ); + let expected_good_21_1_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_21_2_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines those transactions into epoch 2.1 + // mine until just before epoch 2.2 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_2 - 3 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-1", + &[], + ); + let expected_good_21_3_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_21_4_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines bitcoin block epoch_2_2 - 2, and causes the the + // stacks node to mine the stacks block which will be included in + // epoch_2_2 - 1, so these are the last transactions processed pre-2.2. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let publish_invoke = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "wrap-simple", + wrapper_contract, + ); + + spender_nonce += 1; + submit_tx(&http_origin, &publish_invoke); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-1", + &[], + ); + let expected_bad_22_1_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_bad_22_2_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines those transactions into epoch 2.2 + // mine until just before epoch 2.3 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_3 - 3 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // submit invocation txs in epoch 2.2. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-1", + &[], + ); + let expected_bad_22_3_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_bad_22_4_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines bitcoin block epoch_2_3 - 2, and causes the the + // stacks node to mine the stacks block which will be included in + // epoch_2_3 - 1, so these are the last transactions processed pre-2.3. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let tx_3 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-1", + &[], + ); + let expected_good_23_3_nonce = spender_nonce; + spender_nonce += 1; + + let tx_4 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_23_4_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_3); + submit_tx(&http_origin, &tx_4); + + // advance to epoch_2_3 before submitting the next transactions, + // so that they can pass the mempool. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-1", + &[], + ); + let expected_good_23_1_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_23_2_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Total spender txs = {}", spender_nonce); + + let blocks = test_observer::get_blocks(); + + let mut transaction_receipts = Vec::new(); + + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); + if &tx_sender == &spender_addr { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + // only interested in contract calls + _ => continue, + }; + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + + transaction_receipts.push(( + parsed.auth.get_origin_nonce(), + (contract_call.clone(), result), + )); + } + } + } + + transaction_receipts.sort_by_key(|x| x.0); + + let transaction_receipts: HashMap<_, _> = transaction_receipts.into_iter().collect(); + + for tx_nonce in [ + expected_good_205_1_nonce, + expected_good_21_1_nonce, + expected_good_21_3_nonce, + expected_good_23_1_nonce, + ] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "invoke-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); + } + + for tx_nonce in [ + expected_good_205_2_nonce, + expected_good_21_2_nonce, + expected_good_21_4_nonce, + expected_good_23_2_nonce, + ] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "invoke-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); + } + + for tx_nonce in [expected_good_23_3_nonce] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); + } + + for tx_nonce in [expected_good_23_4_nonce] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); + } + + for tx_nonce in [expected_bad_22_1_nonce, expected_bad_22_3_nonce] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(err none)"); + } + + for tx_nonce in [expected_bad_22_2_nonce, expected_bad_22_4_nonce] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(err none)"); + } + + for (key, value) in transaction_receipts.iter() { + eprintln!("{} => {} of {}", key, value.0, value.1); + } + + test_observer::clear(); + channel.stop_chains_coordinator(); +} diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 766123b6f1..8eca8f21fc 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -43,6 +43,7 @@ mod bitcoin_regtest; mod epoch_205; mod epoch_21; mod epoch_22; +mod epoch_23; mod integrations; mod mempool; pub mod neon_integrations; From c6e9d35827a68ee0942e5c626fafcdc9b6260877 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 May 2023 08:30:03 -0500 Subject: [PATCH 078/158] set 2.3 activation height to reflect SIP-023, set testnet activation height --- src/core/mod.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 75c53de712..5eed1f5ab6 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -110,7 +110,7 @@ pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; /// This is Epoch-2.2 activation height proposed in SIP-022 pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; /// This is Epoch-2.3 activation height proposed in SIP-022 -pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 789_751; +pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_287; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; @@ -119,6 +119,7 @@ pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_104_380; pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_431_300; +pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_648; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; @@ -311,10 +312,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch22, start_height: BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_2 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3 + }, ]; } From f1b7d1a5aa2124fb56ea08d5cd32a05feebff4a2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 May 2023 08:40:48 -0500 Subject: [PATCH 079/158] updated testnet height + expand vm::tests::traits --- clarity/src/vm/tests/traits.rs | 3 +++ src/core/mod.rs | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index b1ba00b9de..0ee695c837 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -37,7 +37,10 @@ use crate::vm::ContractContext; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_epoch_clarity_versions(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} #[apply(test_epoch_clarity_versions)] diff --git a/src/core/mod.rs b/src/core/mod.rs index 5eed1f5ab6..c4ae2e26c0 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -119,7 +119,7 @@ pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_104_380; pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_431_300; -pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_648; +pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; @@ -280,7 +280,7 @@ lazy_static! { } lazy_static! { - pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 5] = [ + pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 6] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, From 9b8611ecec51767217126b1e993db3533e8e7106 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 May 2023 08:47:31 -0500 Subject: [PATCH 080/158] chore: add copyright comment to epoch_23.rs --- testnet/stacks-node/src/tests/epoch_23.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 1db3f922d8..84072c9a0f 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -1,3 +1,18 @@ +// Copyright (C) 2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::collections::HashMap; use std::env; use std::thread; From 13f0d280ce416219740a850fc0a6c5cf37341e80 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 May 2023 09:46:03 -0500 Subject: [PATCH 081/158] correct comment text and loosen request time bound in atlas test --- src/core/mod.rs | 2 +- .../stacks-node/src/tests/neon_integrations.rs | 16 ++++++++++------ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index c4ae2e26c0..9a273dc337 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -109,7 +109,7 @@ pub const BITCOIN_MAINNET_STACKS_2_05_BURN_HEIGHT: u64 = 713_000; pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; /// This is Epoch-2.2 activation height proposed in SIP-022 pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; -/// This is Epoch-2.3 activation height proposed in SIP-022 +/// This is Epoch-2.3 activation height proposed in SIP-023 pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_287; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 5456505a89..34f2da930d 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -7711,6 +7711,8 @@ fn atlas_stress_integration_test() { } eprintln!("attachment_indexes = {:?}", &attachment_indexes); + let max_request_time_ms = 100; + for (ibh, attachments) in attachment_indexes.iter() { let l = attachments.len(); for i in 0..(l / MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + 1) { @@ -7754,10 +7756,11 @@ fn atlas_stress_integration_test() { // requests should take no more than 20ms assert!( - total_time < attempts * 50, - "Atlas inventory request is too slow: {} >= {} * 50", + total_time < attempts * max_request_time_ms, + "Atlas inventory request is too slow: {} >= {} * {}", total_time, - attempts + attempts, + max_request_time_ms ); } @@ -7795,10 +7798,11 @@ fn atlas_stress_integration_test() { // requests should take no more than 40ms assert!( - total_time < attempts * 50, - "Atlas chunk request is too slow: {} >= {} * 50", + total_time < attempts * max_request_time_ms, + "Atlas chunk request is too slow: {} >= {} * {}", total_time, - attempts + attempts, + max_request_time_ms ); } } From 0c23a836058219013a917eace4c1e7526aeb76b4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 May 2023 10:01:17 -0500 Subject: [PATCH 082/158] update mainnet 2.3 activation height with latest from SIP --- src/core/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 9a273dc337..3e78dee91e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -110,7 +110,7 @@ pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; /// This is Epoch-2.2 activation height proposed in SIP-022 pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; /// This is Epoch-2.3 activation height proposed in SIP-023 -pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_287; +pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_240; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; From 754146c861fb6c6c5cb4d7537e3ce8ccf7144a25 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 2 May 2023 14:28:05 -0700 Subject: [PATCH 083/158] chore - update changelog for 2.3.0.0.0 --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1e2c9657a..509841d15f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,14 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.3.0.0.0] + +This is a **consensus-breaking** release to address a Clarity VM bug discovered in 2.2.0.0.1. +Tx and read-only calls to functions with traits as parameters are rejected with unchecked TypeValueError. +Additional context and rationale can be found in [SIP-023](https://github.com/stacksgov/sips/blob/main/sips/sip-023/sip-023-emergency-fix-traits.md). + +This release is compatible with chainstate directories from 2.1.0.0.x. + ## [2.2.0.0.1] This is a **consensus-breaking** release to address a bug and DoS vector in pox-2's `stack-increase` function. From 4001fdb9d6618db1e90d084fbb5c041e9a9dc000 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 5 May 2023 09:57:31 -0400 Subject: [PATCH 084/158] Fix merge failures Signed-off-by: Jacinta Ferrant --- src/chainstate/burn/db/sortdb.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 0e89bad380..c1210509b1 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -818,7 +818,7 @@ const SORTITION_DB_SCHEMA_6: &'static [&'static str] = &[r#" // update this to add new indexes const LAST_SORTITION_DB_INDEX: &'static str = "index_peg_out_fulfill_burn_header_hash "; -const SORTITION_DB_SCHEMA_6: &'static [&'static str] = &[ +const SORTITION_DB_SCHEMA_7: &'static [&'static str] = &[ r#" CREATE TABLE peg_in ( txid TEXT NOT NULL, @@ -3151,7 +3151,7 @@ impl SortitionDB { } fn apply_schema_7(tx: &DBTx) -> Result<(), db_error> { - for sql_exec in SORTITION_DB_SCHEMA_6 { + for sql_exec in SORTITION_DB_SCHEMA_7 { tx.execute_batch(sql_exec)?; } From 2f36efad42dd15d72f28b886ea84b34c11bdffc0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 3 May 2023 15:47:36 -0500 Subject: [PATCH 085/158] fix: update peer versions to latest --- src/core/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 3e78dee91e..53cbc7e1cc 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -50,8 +50,8 @@ pub use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, STACKS_EPOCH // fourth byte == highest epoch supported by this node // - 0x05 for 2.05 // - 0x06 for 2.1 -pub const PEER_VERSION_MAINNET: u32 = 0x18000007; -pub const PEER_VERSION_TESTNET: u32 = 0xfacade07; +pub const PEER_VERSION_MAINNET: u32 = 0x18000008; +pub const PEER_VERSION_TESTNET: u32 = 0xfacade08; pub const PEER_VERSION_EPOCH_1_0: u8 = 0x00; pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; From 231491a288989340a3e19346943462f571eaa3bf Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 3 May 2023 15:49:01 -0500 Subject: [PATCH 086/158] chore: update CHANGELOG.md --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 509841d15f..94ea9ea592 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.3.0.0.1] + +This is a hotfix release to update the peer version identifier used by the stacks-node p2p network. + +This release is compatible with chainstate directories from 2.3.0.0.x and 2.1.0.0.x + ## [2.3.0.0.0] This is a **consensus-breaking** release to address a Clarity VM bug discovered in 2.2.0.0.1. From adc87a6d8a94760d8e177f60ff109319b39e9147 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 3 May 2023 16:04:02 -0500 Subject: [PATCH 087/158] chore: be permissive in 2.2/2.3 boundary on peer versions --- src/net/chat.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/net/chat.rs b/src/net/chat.rs index 88337f40b7..f46db51fba 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -712,6 +712,17 @@ impl ConversationP2P { return true; } + // be a little more permissive with epochs 2.3 and 2.2, because 2.3.0.0.0 shipped with + // PEER_VERSION_MAINNET = 0x18000007 and PEER_VERSION_TESTNET = 0xfacade07 + if cur_epoch == PEER_VERSION_EPOCH_2_3 && remote_epoch == PEER_VERSION_EPOCH_2_2 { + debug!( + "Remote peer has epoch {} and current epoch is {}, but we're permissive about 2.2/2.3 boundary", + remote_epoch, + cur_epoch + ); + return true; + } + return false; } From fbffd9e3786dbb3d2128658a435a1945ddd47c42 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 3 May 2023 16:18:12 -0500 Subject: [PATCH 088/158] imports --- src/net/chat.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/net/chat.rs b/src/net/chat.rs index f46db51fba..365faa389a 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -35,6 +35,8 @@ use crate::chainstate::burn::db::sortdb; use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::StacksPublicKey; +use crate::core::PEER_VERSION_EPOCH_2_2; +use crate::core::PEER_VERSION_EPOCH_2_3; use crate::monitoring; use crate::net::asn::ASEntry4; use crate::net::codec::*; From 698434802e1381c3fd14b93586b6b5d2bbb4dbf5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 6 May 2023 00:46:19 -0500 Subject: [PATCH 089/158] fix: yield interpreter errors in deser_hex --- clarity/src/vm/costs/mod.rs | 6 ++-- clarity/src/vm/database/clarity_db.rs | 22 ++++++++---- clarity/src/vm/database/key_value_wrapper.rs | 35 +++++++++++--------- 3 files changed, 38 insertions(+), 25 deletions(-) diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index dfb7cd81a6..cc5296909a 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -329,8 +329,9 @@ fn load_state_summary(mainnet: bool, clarity_db: &mut ClarityDatabase) -> Result "vm-costs::last-processed-at-height", &TypeSignature::UIntType, ) { - Some(v) => u32::try_from(v.value.expect_u128()).expect("Block height overflowed u32"), - None => return Ok(CostStateSummary::empty()), + Ok(Some(v)) => u32::try_from(v.value.expect_u128()).expect("Block height overflowed u32"), + Ok(None) => return Ok(CostStateSummary::empty()), + Err(e) => return Err(CostErrors::CostComputationFailed(e.to_string())), }; let metadata_result = clarity_db @@ -388,6 +389,7 @@ fn load_cost_functions( ) -> Result { let last_processed_count = clarity_db .get_value("vm-costs::last_processed_count", &TypeSignature::UIntType) + .map_err(|_e| CostErrors::CostContractLoadFailure)? .map(|result| result.value) .unwrap_or(Value::UInt(0)) .expect_u128(); diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index b90d401fbd..3eac1510cf 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -34,6 +34,7 @@ use crate::vm::errors::{ RuntimeErrorType, }; use crate::vm::representations::ClarityName; +use crate::vm::types::serialization::SerializationError; use crate::vm::types::{ serialization::NONE_SERIALIZATION_LEN, OptionalData, PrincipalData, QualifiedContractIdentifier, SequenceData, StandardPrincipalData, TupleData, @@ -472,8 +473,14 @@ impl<'a> ClarityDatabase<'a> { self.store.get::(key) } - pub fn get_value(&mut self, key: &str, expected: &TypeSignature) -> Option { - self.store.get_value(key, expected) + pub fn get_value( + &mut self, + key: &str, + expected: &TypeSignature, + ) -> Result> { + self.store + .get_value(key, expected) + .map_err(|e| InterpreterError::DBError(e.to_string()).into()) } pub fn get_with_proof(&mut self, key: &str) -> Option<(T, Vec)> @@ -696,6 +703,7 @@ impl<'a> ClarityDatabase<'a> { ClarityDatabase::ustx_liquid_supply_key(), &TypeSignature::UIntType, ) + .expect("FATAL: failed to load ustx_liquid_supply Clarity key") .map(|v| v.value.expect_u128()) .unwrap_or(0) } @@ -1152,7 +1160,7 @@ impl<'a> ClarityDatabase<'a> { variable_name, ); - let result = self.get_value(&key, &variable_descriptor.value_type); + let result = self.get_value(&key, &variable_descriptor.value_type)?; match result { None => Ok(Value::none()), @@ -1174,7 +1182,7 @@ impl<'a> ClarityDatabase<'a> { variable_name, ); - let result = self.get_value(&key, &variable_descriptor.value_type); + let result = self.get_value(&key, &variable_descriptor.value_type)?; match result { None => Ok(ValueResult { @@ -1275,7 +1283,7 @@ impl<'a> ClarityDatabase<'a> { ClarityDatabase::make_key_for_data_map_entry(contract_identifier, map_name, key_value); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - let result = self.get_value(&key, &stored_type); + let result = self.get_value(&key, &stored_type)?; match result { None => Ok(Value::none()), @@ -1309,7 +1317,7 @@ impl<'a> ClarityDatabase<'a> { ); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - let result = self.get_value(&key, &stored_type); + let result = self.get_value(&key, &stored_type)?; match result { None => Ok(ValueResult { @@ -1389,7 +1397,7 @@ impl<'a> ClarityDatabase<'a> { } fn data_map_entry_exists(&mut self, key: &str, expected_value: &TypeSignature) -> Result { - match self.get_value(key, expected_value) { + match self.get_value(key, expected_value)? { None => Ok(false), Some(value) => Ok(value.value != Value::none()), } diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index bf26670fd4..9c7aa33569 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -19,6 +19,7 @@ use std::{clone::Clone, cmp::Eq, hash::Hash}; use crate::vm::database::clarity_store::make_contract_hash_key; use crate::vm::errors::InterpreterResult as Result; +use crate::vm::types::serialization::SerializationError; use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; use crate::vm::Value; use stacks_common::util::hash::Sha512Trunc256Sum; @@ -367,29 +368,31 @@ impl<'a> RollbackWrapper<'a> { /// Get a Clarity value from the underlying Clarity KV store. /// Returns Some if found, with the Clarity Value and the serialized byte length of the value. - pub fn get_value(&mut self, key: &str, expected: &TypeSignature) -> Option { + pub fn get_value( + &mut self, + key: &str, + expected: &TypeSignature, + ) -> std::result::Result, SerializationError> { self.stack .last() .expect("ERROR: Clarity VM attempted GET on non-nested context."); - let lookup_result = if self.query_pending_data { - self.lookup_map - .get(key) - .and_then(|x| x.last()) - .map(|x| ValueResult { - value: Value::deserialize(x, expected), + if self.query_pending_data { + if let Some(x) = self.lookup_map.get(key).and_then(|x| x.last()) { + return Ok(Some(ValueResult { + value: Value::try_deserialize_hex(x, expected)?, serialized_byte_len: x.len() as u64 / 2, - }) - } else { - None - }; + })); + } + } - lookup_result.or_else(|| { - self.store.get(key).map(|x| ValueResult { - value: Value::deserialize(&x, expected), + match self.store.get(key) { + Some(x) => Ok(Some(ValueResult { + value: Value::try_deserialize_hex(&x, expected)?, serialized_byte_len: x.len() as u64 / 2, - }) - }) + })), + None => Ok(None), + } } /// This is the height we are currently constructing. It comes from the MARF. From 0ce213b90376f1a93de8c8415e6ef08a6b5c6599 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 8 May 2023 19:27:07 +0000 Subject: [PATCH 090/158] chore: updating changelog --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94ea9ea592..d6ab837573 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,9 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [2.3.0.0.1] -This is a hotfix release to update the peer version identifier used by the stacks-node p2p network. +This is a hotfix release to update: +- peer version identifier used by the stacks-node p2p network. +- yield interpreter errors in deser_hex This release is compatible with chainstate directories from 2.3.0.0.x and 2.1.0.0.x From baad222e3d3aa5b0ce582156a6e0cbbf21b8f5e6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 9 May 2023 14:05:16 -0500 Subject: [PATCH 091/158] fix: yield too large supertype from checks --- CHANGELOG.md | 7 +++++++ clarity/src/vm/analysis/errors.rs | 2 ++ clarity/src/vm/types/signatures.rs | 14 ++++++++------ src/chainstate/stacks/db/transactions.rs | 9 +++++++++ 4 files changed, 26 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6ab837573..368ff525c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.3.0.0.2] + +This is a high-priority hotfix release to address a bug in the +stacks-node miner logic which could impact miner availability. + +This release is compatible with chainstate directories from 2.3.0.0.x and 2.1.0.0.x + ## [2.3.0.0.1] This is a hotfix release to update: diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index ffcf8c27c7..76895c1325 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -35,6 +35,7 @@ pub enum CheckErrors { ValueOutOfBounds, TypeSignatureTooDeep, ExpectedName, + SupertypeTooLarge, // match errors BadMatchOptionSyntax(Box), @@ -320,6 +321,7 @@ impl DiagnosableError for CheckErrors { fn message(&self) -> String { match &self { CheckErrors::ExpectedLiteral => "expected a literal argument".into(), + CheckErrors::SupertypeTooLarge => "supertype of two types is too large".into(), CheckErrors::BadMatchOptionSyntax(source) => format!("match on a optional type uses the following syntax: (match input some-name if-some-expression if-none-expression). Caused by: {}", source.message()), diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index b2ec129a76..48b13a320f 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -1073,8 +1073,9 @@ impl TypeSignature { let entry_out = Self::least_supertype_v2_0(entry_a, entry_b)?; type_map_out.insert(name.clone(), entry_out); } - Ok(TupleTypeSignature::try_from(type_map_out).map(|x| x.into()) - .expect("ERR: least_supertype_v2_0 attempted to construct a too-large supertype of two types")) + Ok(TupleTypeSignature::try_from(type_map_out) + .map(|x| x.into()) + .map_err(|_| CheckErrors::SupertypeTooLarge)?) } ( SequenceType(SequenceSubtype::ListType(ListTypeData { @@ -1095,7 +1096,7 @@ impl TypeSignature { }; let max_len = cmp::max(len_a, len_b); Ok(Self::list_of(entry_type, *max_len) - .expect("ERR: least_supertype_v2_0 attempted to construct a too-large supertype of two types")) + .map_err(|_| CheckErrors::SupertypeTooLarge)?) } (ResponseType(resp_a), ResponseType(resp_b)) => { let ok_type = @@ -1174,8 +1175,9 @@ impl TypeSignature { let entry_out = Self::least_supertype_v2_1(entry_a, entry_b)?; type_map_out.insert(name.clone(), entry_out); } - Ok(TupleTypeSignature::try_from(type_map_out).map(|x| x.into()) - .expect("ERR: least_supertype_v2_1 attempted to construct a too-large supertype of two types")) + Ok(TupleTypeSignature::try_from(type_map_out) + .map(|x| x.into()) + .map_err(|_| CheckErrors::SupertypeTooLarge)?) } ( SequenceType(SequenceSubtype::ListType(ListTypeData { @@ -1196,7 +1198,7 @@ impl TypeSignature { }; let max_len = cmp::max(len_a, len_b); Ok(Self::list_of(entry_type, *max_len) - .expect("ERR: least_supertype_v2_1 attempted to construct a too-large supertype of two types")) + .map_err(|_| CheckErrors::SupertypeTooLarge)?) } (ResponseType(resp_a), ResponseType(resp_b)) => { let ok_type = diff --git a/src/chainstate/stacks/db/transactions.rs b/src/chainstate/stacks/db/transactions.rs index e4461722d8..596ff4f035 100644 --- a/src/chainstate/stacks/db/transactions.rs +++ b/src/chainstate/stacks/db/transactions.rs @@ -329,6 +329,9 @@ pub fn handle_clarity_runtime_error(error: clarity_error) -> ClarityRuntimeTxErr err_type: "short return/panic", } } + clarity_error::Interpreter(InterpreterError::Unchecked(CheckErrors::SupertypeTooLarge)) => { + ClarityRuntimeTxError::Rejectable(error) + } clarity_error::Interpreter(InterpreterError::Unchecked(check_error)) => { ClarityRuntimeTxError::AnalysisError(check_error) } @@ -1115,6 +1118,12 @@ impl StacksChainState { } } } + if let clarity_error::Analysis(err) = &other_error { + if let CheckErrors::SupertypeTooLarge = err.err { + info!("Transaction {} is problematic and should have prevented this block from being relayed", tx.txid()); + return Err(Error::ClarityError(other_error)); + } + } // this analysis isn't free -- convert to runtime error let mut analysis_cost = clarity_tx.cost_so_far(); analysis_cost From 0f4fae33261b83f4a84623153c7daab9aab29a41 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 26 Apr 2023 12:45:56 -0500 Subject: [PATCH 092/158] introduce epoch 2.4 --- src/core/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/core/mod.rs b/src/core/mod.rs index 53cbc7e1cc..5e738ed9c1 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -111,6 +111,8 @@ pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; /// This is Epoch-2.3 activation height proposed in SIP-023 pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_240; +/// This is Epoch-2.3, now Epoch-2.4, activation height proposed in SIP-022 +pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 789_751; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; From 4e606b4c45a5289e01fd6857cb7b8b942c25a8a8 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 26 Apr 2023 13:48:52 -0500 Subject: [PATCH 093/158] add STXBalance::LockedPoxThree variant --- clarity/src/vm/database/structures.rs | 148 ++++++++++++++++++++------ 1 file changed, 117 insertions(+), 31 deletions(-) diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 8dcd6924ae..601500d372 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -139,6 +139,11 @@ pub enum STXBalance { amount_locked: u128, unlock_height: u64, }, + LockedPoxThree { + amount_unlocked: u128, + amount_locked: u128, + unlock_height: u64, + }, } /// Lifetime-limited handle to an uncommitted balance structure. @@ -200,6 +205,24 @@ impl ClaritySerializable for STXBalance { .write_all(&unlock_height.to_be_bytes()) .expect("STXBalance serialization: failed writing unlock_height."); } + STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + unlock_height, + } => { + buffer + .write_all(&[STXBalance::pox_3_version]) + .expect("STXBalance serialization: failed to write PoX version byte"); + buffer + .write_all(&amount_unlocked.to_be_bytes()) + .expect("STXBalance serialization: failed writing amount_unlocked."); + buffer + .write_all(&amount_locked.to_be_bytes()) + .expect("STXBalance serialization: failed writing amount_locked."); + buffer + .write_all(&unlock_height.to_be_bytes()) + .expect("STXBalance serialization: failed writing unlock_height."); + } } to_hex(buffer.as_slice()) } @@ -236,9 +259,9 @@ impl ClarityDeserializable for STXBalance { unlock_height, } } - } else if bytes.len() == STXBalance::v2_size { + } else if bytes.len() == STXBalance::v2_and_v3_size { let version = &bytes[0]; - if version != &STXBalance::pox_2_version { + if version != &STXBalance::pox_2_version || version != &STXBalance::pox_3_version { panic!( "Bad version byte in STX Balance serialization = {}", version @@ -264,12 +287,20 @@ impl ClarityDeserializable for STXBalance { STXBalance::Unlocked { amount: amount_unlocked, } - } else { + } else if version == &STXBalance::pox_2_version { STXBalance::LockedPoxTwo { amount_unlocked, amount_locked, unlock_height, } + } else if version == &STXBalance::pox_3_version { + STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + unlock_height, + } + } else { + unreachable!("Version is checked for pox_3 or pox_2 version compliance above"); } } else { panic!("Bad STX Balance serialization size = {}", bytes.len()); @@ -427,9 +458,8 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { /// created by PoX v2. pub fn is_v2_locked(&mut self) -> bool { match self.canonical_balance_repr() { - STXBalance::Unlocked { .. } => false, - STXBalance::LockedPoxOne { .. } => false, STXBalance::LockedPoxTwo { .. } => true, + _ => false, } } @@ -559,6 +589,15 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { amount_locked, unlock_height: new_unlock_height, }, + STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + .. + } => STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + unlock_height: new_unlock_height, + }, }; } @@ -578,8 +617,9 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { // NOTE: do _not_ add mutation methods to this struct. Put them in STXBalanceSnapshot! impl STXBalance { pub const unlocked_and_v1_size: usize = 40; - pub const v2_size: usize = 41; + pub const v2_and_v3_size: usize = 41; pub const pox_2_version: u8 = 0; + pub const pox_3_version: u8 = 1; pub fn zero() -> STXBalance { STXBalance::Unlocked { amount: 0 } @@ -595,12 +635,13 @@ impl STXBalance { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } - | STXBalance::LockedPoxTwo { unlock_height, .. } => *unlock_height, + | STXBalance::LockedPoxTwo { unlock_height, .. } + | STXBalance::LockedPoxThree { unlock_height, .. } => *unlock_height, } } /// This method returns the datastructure's lazy view of the unlock_height - /// *while* factoring in the PoX 2 early unlock for PoX 1. + /// *while* factoring in the PoX 2 early unlock for PoX 1 and PoX 3 early unlock for PoX 2. /// This value is still lazy: this unlock height may be less than the current /// burn block height, if so it will be updated in a canonicalized view. pub fn effective_unlock_height(&self, v1_unlock_height: u32, v2_unlock_height: u32) -> u64 { @@ -620,6 +661,7 @@ impl STXBalance { *unlock_height } } + STXBalance::LockedPoxThree { unlock_height, .. } => *unlock_height, } } @@ -629,7 +671,8 @@ impl STXBalance { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { amount_locked, .. } - | STXBalance::LockedPoxTwo { amount_locked, .. } => *amount_locked, + | STXBalance::LockedPoxTwo { amount_locked, .. } + | STXBalance::LockedPoxThree { amount_locked, .. } => *amount_locked, } } @@ -645,6 +688,9 @@ impl STXBalance { } | STXBalance::LockedPoxTwo { amount_unlocked, .. + } + | STXBalance::LockedPoxThree { + amount_unlocked, .. } => *amount_unlocked, } } @@ -659,6 +705,9 @@ impl STXBalance { } | STXBalance::LockedPoxTwo { amount_unlocked, .. + } + | STXBalance::LockedPoxThree { + amount_unlocked, .. } => { *amount_unlocked = amount_unlocked.checked_sub(delta).expect("STX underflow"); } @@ -675,6 +724,9 @@ impl STXBalance { } | STXBalance::LockedPoxTwo { amount_unlocked, .. + } + | STXBalance::LockedPoxThree { + amount_unlocked, .. } => { if let Some(new_amount) = amount_unlocked.checked_add(delta) { *amount_unlocked = new_amount; @@ -686,28 +738,6 @@ impl STXBalance { } } - fn set_locked(&mut self) { - match self { - STXBalance::Unlocked { .. } => {} - STXBalance::LockedPoxOne { - unlock_height, - amount_locked, - .. - } => { - *unlock_height = 0; - *amount_locked = 0; - } - STXBalance::LockedPoxTwo { - unlock_height, - amount_locked, - .. - } => { - *unlock_height = 0; - *amount_locked = 0; - } - } - } - /// Returns a canonicalized STXBalance at a given burn_block_height /// (i.e., if burn_block_height >= unlock_height, then return struct where /// amount_unlocked = 0, unlock_height = 0), and the amount of tokens which @@ -755,6 +785,9 @@ impl STXBalance { STXBalance::LockedPoxTwo { amount_unlocked, .. } => *amount_unlocked, + STXBalance::LockedPoxThree { + amount_unlocked, .. + } => *amount_unlocked, } } } @@ -784,6 +817,11 @@ impl STXBalance { unlock_height, .. } => (*amount_locked, *unlock_height), + STXBalance::LockedPoxThree { + amount_locked, + unlock_height, + .. + } => (*amount_locked, *unlock_height), } } } @@ -801,10 +839,23 @@ impl STXBalance { amount_locked, .. } => (*amount_unlocked, *amount_locked), + STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + .. + } => (*amount_unlocked, *amount_locked), }; unlocked.checked_add(locked).expect("STX overflow") } + pub fn was_locked_by_v1(&self) -> bool { + if let STXBalance::LockedPoxOne { .. } = self { + true + } else { + false + } + } + pub fn was_locked_by_v2(&self) -> bool { if let STXBalance::LockedPoxTwo { .. } = self { true @@ -813,6 +864,14 @@ impl STXBalance { } } + pub fn was_locked_by_v3(&self) -> bool { + if let STXBalance::LockedPoxThree { .. } = self { + true + } else { + false + } + } + pub fn has_locked_tokens_at_burn_block( &self, burn_block_height: u64, @@ -856,6 +915,19 @@ impl STXBalance { } true } + STXBalance::LockedPoxThree { + amount_locked, + unlock_height, + .. + } => { + if *amount_locked == 0 { + return false; + } + if *unlock_height <= burn_block_height { + return false; + } + true + } } } @@ -903,6 +975,20 @@ impl STXBalance { } false } + STXBalance::LockedPoxThree { + amount_locked, + unlock_height, + .. + } => { + if *amount_locked == 0 { + return false; + } + // if normally unlockable, return true + if *unlock_height <= burn_block_height { + return true; + } + false + } } } From 8214cf3367295ee35814748f96edda98f389cf7d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 May 2023 14:54:03 -0500 Subject: [PATCH 094/158] address PR nits --- clarity/src/vm/analysis/arithmetic_checker/tests.rs | 4 ++++ clarity/src/vm/analysis/read_only_checker/tests.rs | 4 ++++ clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs | 4 ++++ clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs | 4 ++++ clarity/src/vm/tests/defines.rs | 4 ++++ clarity/src/vm/tests/sequences.rs | 4 ++++ clarity/src/vm/tests/simple_apply_eval.rs | 4 ++++ clarity/src/vm/types/serialization.rs | 4 ++++ src/clarity_vm/clarity.rs | 2 +- src/clarity_vm/tests/contracts.rs | 4 ++-- testnet/stacks-node/src/tests/epoch_23.rs | 4 ++-- testnet/stacks-node/src/tests/neon_integrations.rs | 4 ++-- 12 files changed, 39 insertions(+), 7 deletions(-) diff --git a/clarity/src/vm/analysis/arithmetic_checker/tests.rs b/clarity/src/vm/analysis/arithmetic_checker/tests.rs index d528c644e1..136226b380 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/tests.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/tests.rs @@ -38,6 +38,10 @@ use crate::vm::variables::NativeVariables; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_arith_checker( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/read_only_checker/tests.rs b/clarity/src/vm/analysis/read_only_checker/tests.rs index bedeb4b6a3..b0644c7291 100644 --- a/clarity/src/vm/analysis/read_only_checker/tests.rs +++ b/clarity/src/vm/analysis/read_only_checker/tests.rs @@ -33,6 +33,10 @@ use stacks_common::types::StacksEpochId; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_read_only_checker( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs index 1a32d082f6..6d54c0ee6a 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs @@ -25,6 +25,10 @@ use rstest_reuse::{self, *}; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_assets(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} use crate::vm::analysis::errors::CheckErrors; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index ea8e8c2fc0..1f223de8f1 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -48,6 +48,10 @@ fn mem_type_check_v1(snippet: &str) -> CheckResult<(Option, Contr #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} #[template] diff --git a/clarity/src/vm/tests/defines.rs b/clarity/src/vm/tests/defines.rs index 964042ea2f..9438f1b168 100644 --- a/clarity/src/vm/tests/defines.rs +++ b/clarity/src/vm/tests/defines.rs @@ -24,6 +24,10 @@ use rstest_reuse::{self, *}; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_defines(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} use crate::vm::ast::build_ast; diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index 68911f1eb2..36789e929b 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -37,6 +37,10 @@ use std::convert::{TryFrom, TryInto}; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_sequences(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} #[test] diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 15279243ce..28eb101ab5 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -54,6 +54,10 @@ use stacks_common::util::hash::{hex_bytes, to_hex}; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_simple_apply_eval( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 844b51f1b3..b73be379d8 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -962,6 +962,10 @@ mod tests { #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] + #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] + #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] + #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] + #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] fn test_clarity_versions_serialization( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index ac4f562f72..e0762e88d0 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -1126,7 +1126,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { }) .unwrap(); - // require 2.2 rules henceforth in this connection as well + // require 2.3 rules henceforth in this connection as well tx_conn.epoch = StacksEpochId::Epoch23; }); diff --git a/src/clarity_vm/tests/contracts.rs b/src/clarity_vm/tests/contracts.rs index 1e366346ce..293ddb32d8 100644 --- a/src/clarity_vm/tests/contracts.rs +++ b/src/clarity_vm/tests/contracts.rs @@ -473,8 +473,8 @@ fn trait_invocation_cross_epoch() { &sender, None, &invoke_contract_id, - "invocation-2", - &[Value::Principal(impl_contract_id.clone().into())], + "invocation-1", + &[], |_, _| false, ) .unwrap_err(); diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 84072c9a0f..a04893f7d3 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -362,7 +362,7 @@ fn trait_invocation_behavior() { // epoch_2_2 - 1, so these are the last transactions processed pre-2.2. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let publish_invoke = make_contract_publish( + let publish_wrap = make_contract_publish( &spender_sk, spender_nonce, fee_amount, @@ -371,7 +371,7 @@ fn trait_invocation_behavior() { ); spender_nonce += 1; - submit_tx(&http_origin, &publish_invoke); + submit_tx(&http_origin, &publish_wrap); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 34f2da930d..5bb05bacca 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -7754,7 +7754,7 @@ fn atlas_stress_integration_test() { let total_time = ts_end.saturating_sub(ts_begin); eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); - // requests should take no more than 20ms + // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, "Atlas inventory request is too slow: {} >= {} * {}", @@ -7796,7 +7796,7 @@ fn atlas_stress_integration_test() { let total_time = ts_end.saturating_sub(ts_begin); eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); - // requests should take no more than 40ms + // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, "Atlas chunk request is too slow: {} >= {} * {}", From 8c2ec78a1e18520afd4a7277cca44a3b7873e6fa Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 28 Apr 2023 12:39:05 -0500 Subject: [PATCH 095/158] add comments to epoch_22 test --- testnet/stacks-node/src/tests/epoch_22.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 304825e2e0..8f4375cd3e 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -899,6 +899,10 @@ fn pox_2_unlock_all() { submit_tx(&http_origin, &tx); let nonce_of_2_1_unlock_ht_call = 3; + // this mines bitcoin block epoch_2_2 - 2, and causes + // the stacks-node to mine the stacks block which will be included + // in bitcoin block epoch_2_2 - 1, so `nonce_of_2_1_unlock_ht_call` + // will be included in that bitcoin block. // this will build the last block before 2.2 activates next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); @@ -915,13 +919,17 @@ fn pox_2_unlock_all() { submit_tx(&http_origin, &tx); let nonce_of_2_2_unlock_ht_call = 4; + // this mines bitcoin block epoch_2_2 - 1, and causes + // the stacks-node to mine the stacks block which will be included + // in bitcoin block epoch_2_2, so `nonce_of_2_2_unlock_ht_call` + // will be included in that bitcoin block. // this block activates 2.2 next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); // this *burn block* is when the unlock occurs next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); - // and this will wake up the node + // and this will mine the first block whose parent is the unlock block next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); @@ -958,7 +966,7 @@ fn pox_2_unlock_all() { "Spender 2 should have two accepted transactions" ); - // and this block is the first block whose parent has >= unlock burn block + // and this will mice the bitcoin block containing the first block whose parent has >= unlock burn block // (which is the criterion for the unlock) next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); From d0c3497b09f268edafd86d09b290944c6a17b3bf Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 2 May 2023 15:32:16 -0500 Subject: [PATCH 096/158] introduce epoch 2.4 --- .../vm/analysis/arithmetic_checker/tests.rs | 2 + clarity/src/vm/analysis/mod.rs | 5 +- .../vm/analysis/read_only_checker/tests.rs | 2 + .../src/vm/analysis/trait_checker/tests.rs | 2 + clarity/src/vm/analysis/type_checker/mod.rs | 12 +- .../type_checker/v2_1/tests/assets.rs | 2 + .../type_checker/v2_1/tests/contracts.rs | 2 + .../analysis/type_checker/v2_1/tests/mod.rs | 2 + clarity/src/vm/costs/mod.rs | 7 +- clarity/src/vm/functions/mod.rs | 2 + clarity/src/vm/tests/defines.rs | 2 + clarity/src/vm/tests/sequences.rs | 2 + clarity/src/vm/tests/simple_apply_eval.rs | 2 + clarity/src/vm/types/serialization.rs | 2 + clarity/src/vm/types/signatures.rs | 18 +-- clarity/src/vm/version.rs | 1 + src/chainstate/burn/db/sortdb.rs | 52 ++++++++- .../burn/operations/leader_block_commit.rs | 7 +- src/chainstate/coordinator/mod.rs | 3 +- src/chainstate/stacks/db/blocks.rs | 50 +++++++- src/chainstate/stacks/db/mod.rs | 1 + src/chainstate/stacks/db/transactions.rs | 1 + src/clarity_vm/clarity.rs | 27 +++++ src/clarity_vm/tests/forking.rs | 6 + src/core/mod.rs | 109 +++++++++++++++++- src/cost_estimates/pessimistic.rs | 2 + stacks-common/src/types/mod.rs | 5 +- testnet/stacks-node/src/neon_node.rs | 4 +- 28 files changed, 300 insertions(+), 32 deletions(-) diff --git a/clarity/src/vm/analysis/arithmetic_checker/tests.rs b/clarity/src/vm/analysis/arithmetic_checker/tests.rs index 136226b380..680d66cadf 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/tests.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/tests.rs @@ -42,6 +42,8 @@ use crate::vm::variables::NativeVariables; #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_arith_checker( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index d94e2fd1b3..640807d42c 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -137,7 +137,10 @@ pub fn run_analysis( StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { TypeChecker2_05::run_pass(&epoch, &mut contract_analysis, db) } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db) } StacksEpochId::Epoch10 => unreachable!("Epoch 1.0 is not a valid epoch for analysis"), diff --git a/clarity/src/vm/analysis/read_only_checker/tests.rs b/clarity/src/vm/analysis/read_only_checker/tests.rs index b0644c7291..188a58db89 100644 --- a/clarity/src/vm/analysis/read_only_checker/tests.rs +++ b/clarity/src/vm/analysis/read_only_checker/tests.rs @@ -37,6 +37,8 @@ use stacks_common::types::StacksEpochId; #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_read_only_checker( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/trait_checker/tests.rs b/clarity/src/vm/analysis/trait_checker/tests.rs index 42f42777c0..7bca96fb9b 100644 --- a/clarity/src/vm/analysis/trait_checker/tests.rs +++ b/clarity/src/vm/analysis/trait_checker/tests.rs @@ -38,6 +38,8 @@ use stacks_common::types::StacksEpochId; #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_trait_checker( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index bbcd9270cb..70ed9e1759 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -50,9 +50,10 @@ impl FunctionType { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { self.check_args_2_05(accounting, args) } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { - self.check_args_2_1(accounting, args, clarity_version) - } + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => self.check_args_2_1(accounting, args, clarity_version), StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), } } @@ -68,7 +69,10 @@ impl FunctionType { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { self.check_args_by_allowing_trait_cast_2_05(db, func_args) } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs index 6d54c0ee6a..0ee33344ea 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs @@ -29,6 +29,8 @@ use rstest_reuse::{self, *}; #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_assets(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} use crate::vm::analysis::errors::CheckErrors; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 1f223de8f1..d3771c72ec 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -52,6 +52,8 @@ fn mem_type_check_v1(snippet: &str) -> CheckResult<(Option, Contr #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} #[template] diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index cdbdf8d85f..d867528912 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -62,6 +62,8 @@ pub mod contracts; #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_type_checker( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index cc5296909a..7b5b6467de 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -701,9 +701,10 @@ impl LimitedCostTracker { } StacksEpochId::Epoch20 => COSTS_1_NAME.to_string(), StacksEpochId::Epoch2_05 => COSTS_2_NAME.to_string(), - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { - COSTS_3_NAME.to_string() - } + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => COSTS_3_NAME.to_string(), } } } diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index e2d0f5c6e4..991e074ffe 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -60,6 +60,8 @@ macro_rules! switch_on_global_epoch { StacksEpochId::Epoch22 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 2.3. StacksEpochId::Epoch23 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 2.4. + StacksEpochId::Epoch24 => $Epoch205Version(args, env, context), } } }; diff --git a/clarity/src/vm/tests/defines.rs b/clarity/src/vm/tests/defines.rs index 9438f1b168..6a73e375be 100644 --- a/clarity/src/vm/tests/defines.rs +++ b/clarity/src/vm/tests/defines.rs @@ -28,6 +28,8 @@ use rstest_reuse::{self, *}; #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_defines(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} use crate::vm::ast::build_ast; diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index 36789e929b..3b9e9b6c3c 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -41,6 +41,8 @@ use std::convert::{TryFrom, TryInto}; #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_sequences(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} #[test] diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 28eb101ab5..dd5fee4fcb 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -58,6 +58,8 @@ use stacks_common::util::hash::{hex_bytes, to_hex}; #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_simple_apply_eval( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index b73be379d8..6e86b3f944 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -966,6 +966,8 @@ mod tests { #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] + #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] + #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_serialization( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 48b13a320f..79b56223a0 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -529,9 +529,10 @@ impl TypeSignature { pub fn admits_type(&self, epoch: &StacksEpochId, other: &TypeSignature) -> Result { match epoch { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => self.admits_type_v2_0(&other), - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { - self.admits_type_v2_1(other) - } + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => self.admits_type_v2_1(other), StacksEpochId::Epoch10 => unreachable!("epoch 1.0 not supported"), } } @@ -730,7 +731,7 @@ impl TypeSignature { // Epoch-2.2 had a regression in canonicalization, so it must be preserved here. | StacksEpochId::Epoch22 => self.clone(), // Note for future epochs: Epochs >= 2.3 should use the canonicalize_v2_1() routine - StacksEpochId::Epoch21 | StacksEpochId::Epoch23 => self.canonicalize_v2_1(), + StacksEpochId::Epoch21 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => self.canonicalize_v2_1(), } } @@ -1052,9 +1053,10 @@ impl TypeSignature { ) -> Result { match epoch { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => Self::least_supertype_v2_0(a, b), - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { - Self::least_supertype_v2_1(a, b) - } + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => Self::least_supertype_v2_1(a, b), StacksEpochId::Epoch10 => unreachable!("Clarity 1.0 is not supported"), } } @@ -1947,6 +1949,8 @@ mod test { #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] + #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] + #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_signatures( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 46ad1500aa..62b625e0a6 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -33,6 +33,7 @@ impl ClarityVersion { StacksEpochId::Epoch21 => ClarityVersion::Clarity2, StacksEpochId::Epoch22 => ClarityVersion::Clarity2, StacksEpochId::Epoch23 => ClarityVersion::Clarity2, + StacksEpochId::Epoch24 => ClarityVersion::Clarity2, } } } diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 3355191901..d00414c7e6 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -501,7 +501,7 @@ impl FromRow for StacksEpoch { } } -pub const SORTITION_DB_VERSION: &'static str = "6"; +pub const SORTITION_DB_VERSION: &'static str = "7"; const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ r#" @@ -710,6 +710,9 @@ const SORTITION_DB_SCHEMA_5: &'static [&'static str] = &[r#" const SORTITION_DB_SCHEMA_6: &'static [&'static str] = &[r#" DELETE FROM epochs;"#]; +const SORTITION_DB_SCHEMA_7: &'static [&'static str] = &[r#" + DELETE FROM epochs;"#]; + // update this to add new indexes const LAST_SORTITION_DB_INDEX: &'static str = "index_delegate_stx_burn_header_hash"; @@ -2667,6 +2670,7 @@ impl SortitionDB { SortitionDB::apply_schema_4(&db_tx)?; SortitionDB::apply_schema_5(&db_tx, epochs_ref)?; SortitionDB::apply_schema_6(&db_tx, epochs_ref)?; + SortitionDB::apply_schema_7(&db_tx, epochs_ref)?; db_tx.instantiate_index()?; @@ -2861,6 +2865,7 @@ impl SortitionDB { || version == "4" || version == "5" || version == "6" + || version == "7" } StacksEpochId::Epoch2_05 => { version == "2" @@ -2868,15 +2873,35 @@ impl SortitionDB { || version == "4" || version == "5" || version == "6" + || version == "7" } StacksEpochId::Epoch21 => { - version == "3" || version == "4" || version == "5" || version == "6" + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" } StacksEpochId::Epoch22 => { - version == "3" || version == "4" || version == "5" || version == "6" + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" } StacksEpochId::Epoch23 => { - version == "3" || version == "4" || version == "5" || version == "6" + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" + } + StacksEpochId::Epoch24 => { + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" } } } @@ -2979,6 +3004,21 @@ impl SortitionDB { Ok(()) } + fn apply_schema_7(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + for sql_exec in SORTITION_DB_SCHEMA_7 { + tx.execute_batch(sql_exec)?; + } + + SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["7"], + )?; + + Ok(()) + } + fn check_schema_version_or_error(&mut self) -> Result<(), db_error> { match SortitionDB::get_schema_version(self.conn()) { Ok(Some(version)) => { @@ -3025,6 +3065,10 @@ impl SortitionDB { let tx = self.tx_begin()?; SortitionDB::apply_schema_6(&tx.deref(), epochs)?; tx.commit()?; + } else if version == "6" { + let tx = self.tx_begin()?; + SortitionDB::apply_schema_7(&tx.deref(), epochs)?; + tx.commit()?; } else if version == expected_version { return Ok(()); } else { diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index 6f00e186b3..83dfc14e49 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -40,6 +40,7 @@ use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; use crate::codec::{write_next, Error as codec_error, StacksMessageCodec}; use crate::core::STACKS_EPOCH_2_2_MARKER; use crate::core::STACKS_EPOCH_2_3_MARKER; +use crate::core::STACKS_EPOCH_2_4_MARKER; use crate::core::{StacksEpoch, StacksEpochId}; use crate::core::{STACKS_EPOCH_2_05_MARKER, STACKS_EPOCH_2_1_MARKER}; use crate::net::Error as net_error; @@ -757,6 +758,7 @@ impl LeaderBlockCommitOp { StacksEpochId::Epoch21 => self.check_epoch_commit_marker(STACKS_EPOCH_2_1_MARKER), StacksEpochId::Epoch22 => self.check_epoch_commit_marker(STACKS_EPOCH_2_2_MARKER), StacksEpochId::Epoch23 => self.check_epoch_commit_marker(STACKS_EPOCH_2_3_MARKER), + StacksEpochId::Epoch24 => self.check_epoch_commit_marker(STACKS_EPOCH_2_4_MARKER), } } @@ -771,7 +773,10 @@ impl LeaderBlockCommitOp { ) -> Result { let tx_tip = tx.context.chain_tip.clone(); let intended_sortition = match epoch_id { - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { // correct behavior -- uses *sortition height* to find the intended sortition ID let sortition_height = self .block_height diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 16f8f30c1b..1cb36cffa5 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -2993,7 +2993,8 @@ impl< } StacksEpochId::Epoch21 | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 => { + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { // 2.1 and onward behavior: the anchor block must also be the // heaviest-confirmed anchor block by BTC weight, and the highest // such anchor block if there are multiple contenders. diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 440029166f..40da3decc4 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -4899,6 +4899,14 @@ impl StacksChainState { receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); applied = true; } + StacksEpochId::Epoch24 => { + receipts.push(clarity_tx.block.initialize_epoch_2_05()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); + applied = true; + } _ => { panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); } @@ -4919,6 +4927,13 @@ impl StacksChainState { receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); applied = true; } + StacksEpochId::Epoch24 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); + applied = true; + } _ => { panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); } @@ -4933,20 +4948,40 @@ impl StacksChainState { receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); applied = true; } + StacksEpochId::Epoch24 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); + applied = true; + } + _ => { + panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); + } + }, + StacksEpochId::Epoch22 => match sortition_epoch.epoch_id { + StacksEpochId::Epoch23 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + applied = true; + } + StacksEpochId::Epoch24 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); + applied = true; + } _ => { panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); } }, - StacksEpochId::Epoch22 => { + StacksEpochId::Epoch23 => { assert_eq!( sortition_epoch.epoch_id, - StacksEpochId::Epoch23, - "Should only transition from Epoch22 to Epoch23" + StacksEpochId::Epoch24, + "Should only transition from Epoch23 to Epoch24" ); - receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); applied = true; } - StacksEpochId::Epoch23 => { + StacksEpochId::Epoch24 => { panic!("No defined transition from Epoch23 forward") } } @@ -5534,7 +5569,10 @@ impl StacksChainState { // The DelegateStx bitcoin wire format does not exist before Epoch 2.1. Ok((stack_ops, transfer_ops, vec![])) } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, parent_index_hash, diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index faec4e7da2..f6beeac6ad 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -225,6 +225,7 @@ impl DBConfig { StacksEpochId::Epoch21 => self.version == "3" || self.version == "4", StacksEpochId::Epoch22 => self.version == "3" || self.version == "4", StacksEpochId::Epoch23 => self.version == "3" || self.version == "4", + StacksEpochId::Epoch24 => self.version == "3" || self.version == "4", } } } diff --git a/src/chainstate/stacks/db/transactions.rs b/src/chainstate/stacks/db/transactions.rs index 596ff4f035..ff5a6a4649 100644 --- a/src/chainstate/stacks/db/transactions.rs +++ b/src/chainstate/stacks/db/transactions.rs @@ -8366,6 +8366,7 @@ pub mod test { StacksEpochId::Epoch21 => self.get_stacks_epoch(2), StacksEpochId::Epoch22 => self.get_stacks_epoch(3), StacksEpochId::Epoch23 => self.get_stacks_epoch(4), + StacksEpochId::Epoch24 => self.get_stacks_epoch(5), } } fn get_pox_payout_addrs( diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index e0762e88d0..40081b3994 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -1136,6 +1136,33 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { }) } + pub fn initialize_epoch_2_4(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch24; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch24); + Ok(()) + }) + .unwrap(); + + // require 2.4 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch24; + }); + + debug!("Epoch 2.4 initialized"); + + (old_cost_tracker, Ok(vec![])) + }) + } + pub fn start_transaction_processing<'c>(&'c mut self) -> ClarityTransactionConnection<'c, 'a> { let store = &mut self.datastore; let cost_track = &mut self.cost_track; diff --git a/src/clarity_vm/tests/forking.rs b/src/clarity_vm/tests/forking.rs index c2d20a3f34..029e5654f6 100644 --- a/src/clarity_vm/tests/forking.rs +++ b/src/clarity_vm/tests/forking.rs @@ -42,6 +42,12 @@ const p1_str: &str = "'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_clarity_versions_type_checker( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/src/core/mod.rs b/src/core/mod.rs index 5e738ed9c1..a51d86524e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -59,6 +59,7 @@ pub const PEER_VERSION_EPOCH_2_05: u8 = 0x05; pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; pub const PEER_VERSION_EPOCH_2_3: u8 = 0x08; +pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; // network identifiers pub const NETWORK_ID_MAINNET: u32 = 0x17000000; @@ -235,7 +236,7 @@ pub fn check_fault_injection(fault_name: &str) -> bool { } lazy_static! { - pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 6] = [ + pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 7] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -274,10 +275,17 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch23, start_height: BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_3 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4 + }, ]; } @@ -377,6 +385,10 @@ pub static STACKS_EPOCH_2_2_MARKER: u8 = 0x07; /// *or greater*. pub static STACKS_EPOCH_2_3_MARKER: u8 = 0x08; +/// Stacks 2.4 epoch marker. All block-commits in 2.4 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_2_4_MARKER: u8 = 0x09; + #[test] fn test_ord_for_stacks_epoch() { let epochs = STACKS_EPOCHS_MAINNET.clone(); @@ -455,6 +467,8 @@ pub trait StacksEpochExtension { #[cfg(test)] fn unit_test_2_3(epoch_2_0_block_height: u64) -> Vec; #[cfg(test)] + fn unit_test_2_4(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, @@ -758,6 +772,96 @@ impl StacksEpochExtension for StacksEpoch { ] } + #[cfg(test)] + fn unit_test_2_4(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test_2_3 first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 20, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 20, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + ] + } + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( @@ -819,6 +923,7 @@ impl StacksEpochExtension for StacksEpoch { StacksEpochId::Epoch21 => StacksEpoch::unit_test_2_1(first_burnchain_height), StacksEpochId::Epoch22 => StacksEpoch::unit_test_2_2(first_burnchain_height), StacksEpochId::Epoch23 => StacksEpoch::unit_test_2_3(first_burnchain_height), + StacksEpochId::Epoch24 => StacksEpoch::unit_test_2_4(first_burnchain_height), } } diff --git a/src/cost_estimates/pessimistic.rs b/src/cost_estimates/pessimistic.rs index 4fdf109792..aec4b6f399 100644 --- a/src/cost_estimates/pessimistic.rs +++ b/src/cost_estimates/pessimistic.rs @@ -234,6 +234,8 @@ impl PessimisticEstimator { StacksEpochId::Epoch22 => ":2.1", // reuse cost estimates in Epoch23 StacksEpochId::Epoch23 => ":2.1", + // reuse cost estimates in Epoch24 + StacksEpochId::Epoch24 => ":2.1", }; format!( "cc{}:{}:{}.{}", diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index f71cd7a475..448fdc3937 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -74,11 +74,12 @@ pub enum StacksEpochId { Epoch21 = 0x0200a, Epoch22 = 0x0200f, Epoch23 = 0x02014, + Epoch24 = 0x02019, } impl StacksEpochId { pub fn latest() -> StacksEpochId { - StacksEpochId::Epoch23 + StacksEpochId::Epoch24 } } @@ -91,6 +92,7 @@ impl std::fmt::Display for StacksEpochId { StacksEpochId::Epoch21 => write!(f, "2.1"), StacksEpochId::Epoch22 => write!(f, "2.2"), StacksEpochId::Epoch23 => write!(f, "2.3"), + StacksEpochId::Epoch24 => write!(f, "2.4"), } } } @@ -106,6 +108,7 @@ impl TryFrom for StacksEpochId { x if x == StacksEpochId::Epoch21 as u32 => Ok(StacksEpochId::Epoch21), x if x == StacksEpochId::Epoch22 as u32 => Ok(StacksEpochId::Epoch22), x if x == StacksEpochId::Epoch23 as u32 => Ok(StacksEpochId::Epoch23), + x if x == StacksEpochId::Epoch24 as u32 => Ok(StacksEpochId::Epoch24), _ => Err("Invalid epoch"), } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 5ad0ecc586..17eebf2c97 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -177,7 +177,7 @@ use stacks::chainstate::stacks::{ use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; -use stacks::core::STACKS_EPOCH_2_3_MARKER; +use stacks::core::STACKS_EPOCH_2_4_MARKER; use stacks::cost_estimates::metrics::CostMetric; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; @@ -1326,7 +1326,7 @@ impl BlockMinerThread { apparent_sender: sender, key_block_ptr: key.block_height as u32, key_vtxindex: key.op_vtxindex as u16, - memo: vec![STACKS_EPOCH_2_3_MARKER], + memo: vec![STACKS_EPOCH_2_4_MARKER], new_seed: vrf_seed, parent_block_ptr, parent_vtxindex, From c473be72c40c9ca82f352f7537ce4f6333f5a9b4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 3 May 2023 20:18:11 -0500 Subject: [PATCH 097/158] expanded epoch validation checks --- src/core/mod.rs | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index a51d86524e..91a42accd7 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -48,10 +48,8 @@ pub use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, STACKS_EPOCH // first byte == major network protocol version (currently 0x18) // second and third bytes are unused // fourth byte == highest epoch supported by this node -// - 0x05 for 2.05 -// - 0x06 for 2.1 -pub const PEER_VERSION_MAINNET: u32 = 0x18000008; -pub const PEER_VERSION_TESTNET: u32 = 0xfacade08; +pub const PEER_VERSION_MAINNET_MAJOR: u32 = 0x18000000; +pub const PEER_VERSION_TESTNET_MAJOR: u32 = 0xfacade00; pub const PEER_VERSION_EPOCH_1_0: u8 = 0x00; pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; @@ -61,6 +59,14 @@ pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; pub const PEER_VERSION_EPOCH_2_3: u8 = 0x08; pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; +// this should be updated to the latest network epoch version supported by +// this node. this will be checked by the `validate_epochs()` method. +pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_4 as u32; + +// set the fourth byte of the peer version +pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; +pub const PEER_VERSION_TESTNET: u32 = PEER_VERSION_TESTNET_MAJOR | PEER_NETWORK_EPOCH; + // network identifiers pub const NETWORK_ID_MAINNET: u32 = 0x17000000; pub const NETWORK_ID_TESTNET: u32 = 0xff000000; @@ -974,6 +980,20 @@ impl StacksEpochExtension for StacksEpoch { let mut seen_epochs = HashSet::new(); epochs.sort(); + let max_epoch = epochs_ref + .iter() + .max() + .expect("FATAL: expect at least one epoch"); + assert!( + max_epoch.network_epoch as u32 <= PEER_NETWORK_EPOCH, + "stacks-blockchain static network epoch should be greater than or equal to the max epoch's" + ); + + assert!( + StacksEpochId::latest() >= max_epoch.epoch_id, + "StacksEpochId::latest() should be greater than or equal to any epoch defined in the node" + ); + let mut epoch_end_height = 0; for epoch in epochs.iter() { assert!( From 960cbc00b3df08d98c31b717c22f1affa0cad326 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Fri, 28 Apr 2023 12:03:50 -0400 Subject: [PATCH 098/158] wip - add pox 3 contract --- .../burn/operations/leader_block_commit.rs | 3 + src/chainstate/coordinator/tests.rs | 228 ++- src/chainstate/stacks/boot/mod.rs | 8 +- src/chainstate/stacks/boot/pox-3.clar | 1290 +++++++++++++++++ src/clarity_vm/clarity.rs | 119 +- src/core/mod.rs | 20 +- 6 files changed, 1651 insertions(+), 17 deletions(-) create mode 100644 src/chainstate/stacks/boot/pox-3.clar diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index 83dfc14e49..9330e2d707 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -734,6 +734,9 @@ impl LeaderBlockCommitOp { /// Check the epoch marker in the block commit, given the epoch we're in fn check_epoch_commit(&self, epoch_id: StacksEpochId) -> Result<(), op_error> { + info!("CHECKING EPOCH COMMIT: curr epoch: {:?}, marker: {:?}, expected for 2.2:{:?}", + epoch_id, self.memo[0], STACKS_EPOCH_2_2_MARKER + ); match epoch_id { StacksEpochId::Epoch10 => { panic!("FATAL: processed block-commit pre-Stacks 2.0"); diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index e53a4ffc08..b02cb09b0c 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -39,7 +39,7 @@ use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; use crate::chainstate::coordinator::{Error as CoordError, *}; use crate::chainstate::stacks::address::PoxAddress; -use crate::chainstate::stacks::boot::PoxStartCycleInfo; +use crate::chainstate::stacks::boot::{POX_3_NAME, PoxStartCycleInfo}; use crate::chainstate::stacks::boot::POX_1_NAME; use crate::chainstate::stacks::boot::POX_2_NAME; use crate::chainstate::stacks::db::{ @@ -4092,7 +4092,7 @@ fn test_epoch_switch_cost_contract_instantiation() { // and that the epoch transition is only applied once. If it were to be applied more than once, // the test would panic when trying to re-create the pox-2 contract. #[test] -fn test_epoch_switch_pox_contract_instantiation() { +fn test_epoch_switch_pox_2_contract_instantiation() { let path = "/tmp/stacks-blockchain-epoch-switch-pox-contract-instantiation"; let _r = std::fs::remove_dir_all(path); @@ -4307,6 +4307,230 @@ fn test_epoch_switch_pox_contract_instantiation() { } } +// This test ensures the epoch transition from 2.05 to 2.1 is applied at the proper block boundaries, +// and that the epoch transition is only applied once. If it were to be applied more than once, +// the test would panic when trying to re-create the pox-2 contract. +#[test] +fn test_epoch_switch_pox_3_contract_instantiation() { + let path = "/tmp/stacks-blockchain-epoch-switch-pox-3-contract-instantiation"; + let _r = std::fs::remove_dir_all(path); + + let sunset_ht = 8000; + let pox_consts = Some(PoxConstants::new( + 6, + 3, + 3, + 25, + 5, + 10, + sunset_ht, + 10, + 14, + )); + let burnchain_conf = get_burnchain(path, pox_consts.clone()); + + let vrf_keys: Vec<_> = (0..15).map(|_| VRFPrivateKey::new()).collect(); + let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::new()).collect(); + + let stacker = p2pkh_from(&StacksPrivateKey::new()); + let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); + let initial_balances = vec![(stacker.clone().into(), balance)]; + + setup_states( + &[path], + &vrf_keys, + &committers, + pox_consts.clone(), + Some(initial_balances), + StacksEpochId::Epoch23, + ); + + let mut coord = make_coordinator(path, Some(burnchain_conf)); + + coord.handle_new_burnchain_block().unwrap(); + + let sort_db = get_sortition_db(path, pox_consts.clone()); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + assert_eq!(tip.block_height, 1); + assert_eq!(tip.sortition, false); + let (_, ops) = sort_db + .get_sortition_result(&tip.sortition_id) + .unwrap() + .unwrap(); + + // we should have all the VRF registrations accepted + assert_eq!(ops.accepted_ops.len(), vrf_keys.len()); + assert_eq!(ops.consumed_leader_keys.len(), 0); + + // process sequential blocks, and their sortitions... + let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; + + for ix in 0..18 { + let vrf_key = &vrf_keys[ix]; + let miner = &committers[ix]; + + let mut burnchain = get_burnchain_db(path, pox_consts.clone()); + let mut chainstate = get_chainstate(path); + + // Want to ensure that the pox-3 contract DNE for all blocks after the epoch transition height, + // and does exist for blocks after the boundary. + // Epoch 2.1 transition Epoch 2.2 transition Epoch 2.3 transition + // ^ ^ ^ + //.. B1 -> B2 -> B3 -> B4 -> B5 -> B6 -> B7 -> B8 -> B9 -> B10 -> B11 -> B12 -> B13 -> B14 -> B15 + // S0 -> S1 -> S2 -> S3 -> S4 -> S5 -> S6 -> S7 -> S8 -> S9 -> S10 -> S11 -> S12 -> S13 -> S14 + // \ + // \ + // _ _ _ S15 -> S16 -> .. + let parent = if ix == 0 { + BlockHeaderHash([0; 32]) + } else if ix == 15 { + stacks_blocks[ix - 2].1.header.block_hash() + } else { + stacks_blocks[ix - 1].1.header.block_hash() + }; + + let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); + let b = get_burnchain(path, pox_consts.clone()); + + let next_mock_header = BurnchainBlockHeader { + block_height: burnchain_tip.block_height + 1, + block_hash: BurnchainHeaderHash([0; 32]), + parent_block_hash: burnchain_tip.block_hash, + num_txs: 0, + timestamp: 1, + }; + + let reward_cycle_info = coord.get_reward_cycle_info(&next_mock_header).unwrap(); + + let (good_op, block) = if ix == 0 { + make_genesis_block_with_recipients( + &sort_db, + &mut chainstate, + &parent, + miner, + 10000, + vrf_key, + ix as u32, + None, + ) + } else { + make_stacks_block_with_recipients( + &sort_db, + &mut chainstate, + &b, + &parent, + burnchain_tip.block_height, + miner, + 1000, + vrf_key, + ix as u32, + None, + ) + }; + + let expected_winner = good_op.txid(); + let ops = vec![good_op]; + + let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); + produce_burn_block( + &b, + &mut burnchain, + &burnchain_tip.block_hash, + ops, + vec![].iter_mut(), + ); + // handle the sortition + coord.handle_new_burnchain_block().unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + assert_eq!(&tip.winning_block_txid, &expected_winner); + + // load the block into staging + let block_hash = block.header.block_hash(); + + assert_eq!(&tip.winning_stacks_block_hash, &block_hash); + stacks_blocks.push((tip.sortition_id.clone(), block.clone())); + + preprocess_block(&mut chainstate, &sort_db, &tip, block); + + // handle the stacks block + coord.handle_new_stacks_block().unwrap(); + + let stacks_tip = SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db.conn()).unwrap(); + let burn_block_height = tip.block_height; + + // check that the expected stacks epoch ID is equal to the actual stacks epoch ID + let expected_epoch = match burn_block_height { + x if x < 4 => StacksEpochId::Epoch20, + x if x >= 4 && x < 8 => StacksEpochId::Epoch2_05, + x if x >= 8 && x < 12 => StacksEpochId::Epoch21, + x if x >= 12 && x < 16 => StacksEpochId::Epoch22, + _ => StacksEpochId::Epoch23, + }; + assert_eq!( + chainstate + .with_read_only_clarity_tx( + &sort_db.index_conn(), + &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), + |conn| conn.with_clarity_db_readonly(|db| db + .get_stacks_epoch(burn_block_height as u32) + .unwrap()) + ) + .unwrap() + .epoch_id, + expected_epoch + ); + + // These expectations are according to according to hard-coded values in + // `StacksEpoch::unit_test_2_3`. + let expected_runtime = match burn_block_height { + x if x < 4 => u64::MAX, + x if x >= 4 && x < 8 => 205205, + x if x >= 8 && x < 12 => 210210, + x if x >= 12 && x < 16 => 220220, + x => 230230, + }; + assert_eq!( + chainstate + .with_read_only_clarity_tx( + &sort_db.index_conn(), + &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), + |conn| { + conn.with_clarity_db_readonly(|db| { + db.get_stacks_epoch(burn_block_height as u32).unwrap() + }) + }, + ) + .unwrap() + .block_limit + .runtime, + expected_runtime + ); + + // check that pox-3 contract DNE before epoch 2.3, and that it does exist after + let does_pox_3_contract_exist = chainstate + .with_read_only_clarity_tx( + &sort_db.index_conn(), + &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), + |conn| { + conn.with_clarity_db_readonly(|db| { + db.get_contract(&boot_code_id(POX_3_NAME, false)) + }) + }, + ) + .unwrap(); + + if burn_block_height < 16 { + assert!(does_pox_3_contract_exist.is_err()) + } else { + assert!(does_pox_3_contract_exist.is_ok()) + } + } +} + +#[cfg(test)] fn get_total_stacked_info( chainstate: &mut StacksChainState, burn_dbconn: &dyn BurnStateDB, diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index fcd4d5497e..e2d2e48e92 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -81,10 +81,10 @@ pub const BOOT_CODE_BNS: &'static str = std::include_str!("bns.clar"); pub const BOOT_CODE_GENESIS: &'static str = std::include_str!("genesis.clar"); pub const POX_1_NAME: &'static str = "pox"; pub const POX_2_NAME: &'static str = "pox-2"; +pub const POX_3_NAME: &'static str = "pox-3"; -const POX_2_TESTNET_CONSTS: &'static str = std::include_str!("pox-testnet.clar"); -const POX_2_MAINNET_CONSTS: &'static str = std::include_str!("pox-mainnet.clar"); const POX_2_BODY: &'static str = std::include_str!("pox-2.clar"); +const POX_3_BODY: &'static str = std::include_str!("pox-3.clar"); pub const COSTS_1_NAME: &'static str = "costs"; pub const COSTS_2_NAME: &'static str = "costs-2"; @@ -101,6 +101,10 @@ lazy_static! { format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_2_BODY); pub static ref POX_2_TESTNET_CODE: String = format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_2_BODY); + pub static ref POX_3_MAINNET_CODE: String = + format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_3_BODY); + pub static ref POX_3_TESTNET_CODE: String = + format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_3_BODY); pub static ref BOOT_CODE_COST_VOTING_TESTNET: String = make_testnet_cost_voting(); pub static ref STACKS_BOOT_CODE_MAINNET: [(&'static str, &'static str); 6] = [ ("pox", &BOOT_CODE_POX_MAINNET), diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar new file mode 100644 index 0000000000..4a4bc1b482 --- /dev/null +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -0,0 +1,1290 @@ +;; The .pox-3 contract +;; Error codes +(define-constant ERR_STACKING_UNREACHABLE 255) +(define-constant ERR_STACKING_CORRUPTED_STATE 254) +(define-constant ERR_STACKING_INSUFFICIENT_FUNDS 1) +(define-constant ERR_STACKING_INVALID_LOCK_PERIOD 2) +(define-constant ERR_STACKING_ALREADY_STACKED 3) +(define-constant ERR_STACKING_NO_SUCH_PRINCIPAL 4) +(define-constant ERR_STACKING_EXPIRED 5) +(define-constant ERR_STACKING_STX_LOCKED 6) +(define-constant ERR_STACKING_PERMISSION_DENIED 9) +(define-constant ERR_STACKING_THRESHOLD_NOT_MET 11) +(define-constant ERR_STACKING_POX_ADDRESS_IN_USE 12) +(define-constant ERR_STACKING_INVALID_POX_ADDRESS 13) +(define-constant ERR_STACKING_ALREADY_REJECTED 17) +(define-constant ERR_STACKING_INVALID_AMOUNT 18) +(define-constant ERR_NOT_ALLOWED 19) +(define-constant ERR_STACKING_ALREADY_DELEGATED 20) +(define-constant ERR_DELEGATION_EXPIRES_DURING_LOCK 21) +(define-constant ERR_DELEGATION_TOO_MUCH_LOCKED 22) +(define-constant ERR_DELEGATION_POX_ADDR_REQUIRED 23) +(define-constant ERR_INVALID_START_BURN_HEIGHT 24) +(define-constant ERR_NOT_CURRENT_STACKER 25) +(define-constant ERR_STACK_EXTEND_NOT_LOCKED 26) +(define-constant ERR_STACK_INCREASE_NOT_LOCKED 27) +(define-constant ERR_DELEGATION_NO_REWARD_SLOT 28) +(define-constant ERR_DELEGATION_WRONG_REWARD_SLOT 29) + +;; PoX disabling threshold (a percent) +(define-constant POX_REJECTION_FRACTION u25) + +;; Valid values for burnchain address versions. +;; These first four correspond to address hash modes in Stacks 2.1, +;; and are defined in pox-mainnet.clar and pox-testnet.clar (so they +;; cannot be defined here again). +;; (define-constant ADDRESS_VERSION_P2PKH 0x00) +;; (define-constant ADDRESS_VERSION_P2SH 0x01) +;; (define-constant ADDRESS_VERSION_P2WPKH 0x02) +;; (define-constant ADDRESS_VERSION_P2WSH 0x03) +(define-constant ADDRESS_VERSION_NATIVE_P2WPKH 0x04) +(define-constant ADDRESS_VERSION_NATIVE_P2WSH 0x05) +(define-constant ADDRESS_VERSION_NATIVE_P2TR 0x06) +;; Keep these constants in lock-step with the address version buffs above +;; Maximum value of an address version as a uint +(define-constant MAX_ADDRESS_VERSION u6) +;; Maximum value of an address version that has a 20-byte hashbytes +;; (0x00, 0x01, 0x02, 0x03, and 0x04 have 20-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_20 u4) +;; Maximum value of an address version that has a 32-byte hashbytes +;; (0x05 and 0x06 have 32-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_32 u6) + +;; Data vars that store a copy of the burnchain configuration. +;; Implemented as data-vars, so that different configurations can be +;; used in e.g. test harnesses. +(define-data-var pox-prepare-cycle-length uint PREPARE_CYCLE_LENGTH) +(define-data-var pox-reward-cycle-length uint REWARD_CYCLE_LENGTH) +(define-data-var pox-rejection-fraction uint POX_REJECTION_FRACTION) +(define-data-var first-burnchain-block-height uint u0) +(define-data-var configured bool false) +(define-data-var first-2-1-reward-cycle uint u0) + +;; This function can only be called once, when it boots up +(define-public (set-burnchain-parameters (first-burn-height uint) + (prepare-cycle-length uint) + (reward-cycle-length uint) + (rejection-fraction uint) + (begin-2-1-reward-cycle uint)) + (begin + (asserts! (not (var-get configured)) (err ERR_NOT_ALLOWED)) + (var-set first-burnchain-block-height first-burn-height) + (var-set pox-prepare-cycle-length prepare-cycle-length) + (var-set pox-reward-cycle-length reward-cycle-length) + (var-set pox-rejection-fraction rejection-fraction) + (var-set first-2-1-reward-cycle begin-2-1-reward-cycle) + (var-set configured true) + (ok true)) +) + +;; The Stacking lock-up state and associated metadata. +;; Records are inserted into this map via `stack-stx`, `delegate-stack-stx`, `stack-extend` +;; `delegate-stack-extend` and burnchain transactions for invoking `stack-stx`, etc. +;; Records will be deleted from this map when auto-unlocks are processed +;; +;; This map de-normalizes some state from the `reward-cycle-pox-address-list` map +;; and the `pox-2` contract tries to keep this state in sync with the reward-cycle +;; state. The major invariants of this `stacking-state` map are: +;; (1) any entry in `reward-cycle-pox-address-list` with `some stacker` points to a real `stacking-state` +;; (2) `stacking-state.reward-set-indexes` matches the index of that `reward-cycle-pox-address-list` +;; (3) all `stacking-state.reward-set-indexes` match the index of their reward cycle entries +;; (4) `stacking-state.pox-addr` matches `reward-cycle-pox-address-list.pox-addr` +;; (5) if set, (len reward-set-indexes) == lock-period +;; (6) (reward-cycle-to-burn-height (+ lock-period first-reward-cycle)) == (get unlock-height (stx-account stacker)) +;; These invariants only hold while `cur-reward-cycle < (+ lock-period first-reward-cycle)` +;; +(define-map stacking-state + { stacker: principal } + { + ;; Description of the underlying burnchain address that will + ;; receive PoX'ed tokens. Translating this into an address + ;; depends on the burnchain being used. When Bitcoin is + ;; the burnchain, this gets translated into a p2pkh, p2sh, + ;; p2wpkh-p2sh, p2wsh-p2sh, p2wpkh, p2wsh, or p2tr UTXO, + ;; depending on the version. The `hashbytes` field *must* be + ;; either 20 bytes or 32 bytes, depending on the output. + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + ;; how long the uSTX are locked, in reward cycles. + lock-period: uint, + ;; reward cycle when rewards begin + first-reward-cycle: uint, + ;; indexes in each reward-set associated with this user. + ;; these indexes are only valid looking forward from + ;; `first-reward-cycle` (i.e., they do not correspond + ;; to entries in the reward set that may have been from + ;; previous stack-stx calls, or prior to an extend) + reward-set-indexes: (list 12 uint) + } +) + +;; Delegation relationships +(define-map delegation-state + { stacker: principal } + { + amount-ustx: uint, ;; how many uSTX delegated? + delegated-to: principal, ;; who are we delegating? + until-burn-ht: (optional uint), ;; how long does the delegation last? + ;; does the delegate _need_ to use a specific + ;; pox recipient address? + pox-addr: (optional { version: (buff 1), hashbytes: (buff 32) }) + } +) + +;; allowed contract-callers +(define-map allowance-contract-callers + { sender: principal, contract-caller: principal } + { until-burn-ht: (optional uint) }) + +;; How many uSTX are stacked in a given reward cycle. +;; Updated when a new PoX address is registered, or when more STX are granted +;; to it. +(define-map reward-cycle-total-stacked + { reward-cycle: uint } + { total-ustx: uint } +) + +;; Internal map read by the Stacks node to iterate through the list of +;; PoX reward addresses on a per-reward-cycle basis. +(define-map reward-cycle-pox-address-list + { reward-cycle: uint, index: uint } + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + total-ustx: uint, + stacker: (optional principal) + } +) + +(define-map reward-cycle-pox-address-list-len + { reward-cycle: uint } + { len: uint } +) + +;; how much has been locked up for this address before +;; committing? +;; this map allows stackers to stack amounts < minimum +;; by paying the cost of aggregation during the commit +(define-map partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; This is identical to partial-stacked-by-cycle, but its data is never deleted. +;; It is used to preserve data for downstream clients to observe aggregate +;; commits. Each key/value pair in this map is simply the last value of +;; partial-stacked-by-cycle right after it was deleted (so, subsequent calls +;; to the `stack-aggregation-*` functions will overwrite this). +(define-map logged-partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; Amount of uSTX that reject PoX, by reward cycle +(define-map stacking-rejection + { reward-cycle: uint } + { amount: uint } +) + +;; Who rejected in which reward cycle +(define-map stacking-rejectors + { stacker: principal, reward-cycle: uint } + { amount: uint } +) + +;; Getter for stacking-rejectors +(define-read-only (get-pox-rejection (stacker principal) (reward-cycle uint)) + (map-get? stacking-rejectors { stacker: stacker, reward-cycle: reward-cycle })) + +;; Has PoX been rejected in the given reward cycle? +(define-read-only (is-pox-active (reward-cycle uint)) + (let ( + (reject-votes + (default-to + u0 + (get amount (map-get? stacking-rejection { reward-cycle: reward-cycle })))) + ) + ;; (100 * reject-votes) / stx-liquid-supply < pox-rejection-fraction + (< (* u100 reject-votes) + (* (var-get pox-rejection-fraction) stx-liquid-supply))) +) + +;; What's the reward cycle number of the burnchain block height? +;; Will runtime-abort if height is less than the first burnchain block (this is intentional) +(define-read-only (burn-height-to-reward-cycle (height uint)) + (/ (- height (var-get first-burnchain-block-height)) (var-get pox-reward-cycle-length))) + +;; What's the block height at the start of a given reward cycle? +(define-read-only (reward-cycle-to-burn-height (cycle uint)) + (+ (var-get first-burnchain-block-height) (* cycle (var-get pox-reward-cycle-length)))) + +;; What's the current PoX reward cycle? +(define-read-only (current-pox-reward-cycle) + (burn-height-to-reward-cycle burn-block-height)) + +;; Get the _current_ PoX stacking principal information. If the information +;; is expired, or if there's never been such a stacker, then returns none. +(define-read-only (get-stacker-info (stacker principal)) + (match (map-get? stacking-state { stacker: stacker }) + stacking-info + (if (<= (+ (get first-reward-cycle stacking-info) (get lock-period stacking-info)) (current-pox-reward-cycle)) + ;; present, but lock has expired + none + ;; present, and lock has not expired + (some stacking-info) + ) + ;; no state at all + none + )) + +(define-read-only (check-caller-allowed) + (or (is-eq tx-sender contract-caller) + (let ((caller-allowed + ;; if not in the caller map, return false + (unwrap! (map-get? allowance-contract-callers + { sender: tx-sender, contract-caller: contract-caller }) + false)) + (expires-at + ;; if until-burn-ht not set, then return true (because no expiry) + (unwrap! (get until-burn-ht caller-allowed) true))) + ;; is the caller allowance expired? + (if (>= burn-block-height expires-at) + false + true)))) + +(define-read-only (get-check-delegation (stacker principal)) + (let ((delegation-info (try! (map-get? delegation-state { stacker: stacker })))) + ;; did the existing delegation expire? + (if (match (get until-burn-ht delegation-info) + until-burn-ht (> burn-block-height until-burn-ht) + false) + ;; it expired, return none + none + ;; delegation is active + (some delegation-info)))) + +;; Get the size of the reward set for a reward cycle. +;; Note that this does _not_ return duplicate PoX addresses. +;; Note that this also _will_ return PoX addresses that are beneath +;; the minimum threshold -- i.e. the threshold can increase after insertion. +;; Used internally by the Stacks node, which filters out the entries +;; in this map to select PoX addresses with enough STX. +(define-read-only (get-reward-set-size (reward-cycle uint)) + (default-to + u0 + (get len (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle })))) + +;; How many rejection votes have we been accumulating for the next block +(define-read-only (next-cycle-rejection-votes) + (default-to + u0 + (get amount (map-get? stacking-rejection { reward-cycle: (+ u1 (current-pox-reward-cycle)) })))) + +;; Add a single PoX address to a single reward cycle. +;; Used to build up a set of per-reward-cycle PoX addresses. +;; No checking will be done -- don't call if this PoX address is already registered in this reward cycle! +;; Returns the index into the reward cycle that the PoX address is stored to +(define-private (append-reward-cycle-pox-addr (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-cycle uint) + (amount-ustx uint) + (stacker (optional principal))) + (let ((sz (get-reward-set-size reward-cycle))) + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: sz } + { pox-addr: pox-addr, total-ustx: amount-ustx, stacker: stacker }) + (map-set reward-cycle-pox-address-list-len + { reward-cycle: reward-cycle } + { len: (+ u1 sz) }) + sz)) + +;; How many uSTX are stacked? +(define-read-only (get-total-ustx-stacked (reward-cycle uint)) + (default-to + u0 + (get total-ustx (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) +) + +;; Called internally by the node to iterate through the list of PoX addresses in this reward cycle. +;; Returns (optional (tuple (pox-addr ) (total-ustx ))) +(define-read-only (get-reward-set-pox-address (reward-cycle uint) (index uint)) + (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: index })) + +(define-private (fold-unlock-reward-cycle (set-index uint) + (data-res (response { cycle: uint, + first-unlocked-cycle: uint, + stacker: principal + } int))) + (let ((data (try! data-res)) + (cycle (get cycle data)) + (first-unlocked-cycle (get first-unlocked-cycle data))) + ;; if current-cycle hasn't reached first-unlocked-cycle, just continue to next iter + (asserts! (>= cycle first-unlocked-cycle) (ok (merge data { cycle: (+ u1 cycle) }))) + (let ((cycle-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: set-index }))) + (cycle-entry-u (get stacker cycle-entry)) + (cycle-entry-total-ustx (get total-ustx cycle-entry)) + (cycle-last-entry-ix (- (get len (unwrap-panic (map-get? reward-cycle-pox-address-list-len { reward-cycle: cycle }))) u1))) + (asserts! (is-eq cycle-entry-u (some (get stacker data))) (err ERR_STACKING_CORRUPTED_STATE)) + (if (not (is-eq cycle-last-entry-ix set-index)) + ;; do a "move" if the entry to remove isn't last + (let ((move-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix })))) + (map-set reward-cycle-pox-address-list + { reward-cycle: cycle, index: set-index } + move-entry) + (match (get stacker move-entry) moved-stacker + ;; if the moved entry had an associated stacker, update its state + (let ((moved-state (unwrap-panic (map-get? stacking-state { stacker: moved-stacker }))) + ;; calculate the index into the reward-set-indexes that `cycle` is at + (moved-cycle-index (- cycle (get first-reward-cycle moved-state))) + (moved-reward-list (get reward-set-indexes moved-state)) + ;; reward-set-indexes[moved-cycle-index] = set-index via slice?, append, concat. + (update-list (unwrap-panic (replace-at? moved-reward-list moved-cycle-index set-index)))) + (map-set stacking-state { stacker: moved-stacker } + (merge moved-state { reward-set-indexes: update-list }))) + ;; otherwise, we don't need to update stacking-state after move + true)) + ;; if not moving, just noop + true) + ;; in all cases, we now need to delete the last list entry + (map-delete reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix }) + (map-set reward-cycle-pox-address-list-len { reward-cycle: cycle } { len: cycle-last-entry-ix }) + ;; finally, update `reward-cycle-total-stacked` + (map-set reward-cycle-total-stacked { reward-cycle: cycle } + { total-ustx: (- (get total-ustx (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: cycle }))) + cycle-entry-total-ustx) }) + (ok (merge data { cycle: (+ u1 cycle)} ))))) + +;; This method is called by the Stacks block processor directly in order to handle the contract state mutations +;; associated with an early unlock. This can only be invoked by the block processor: it is private, and no methods +;; from this contract invoke it. +(define-private (handle-unlock (user principal) (amount-locked uint) (cycle-to-unlock uint)) + (let ((user-stacking-state (unwrap-panic (map-get? stacking-state { stacker: user }))) + (first-cycle-locked (get first-reward-cycle user-stacking-state)) + (reward-set-indexes (get reward-set-indexes user-stacking-state))) + ;; iterate over each reward set the user is a member of, and remove them from the sets. only apply to reward sets after cycle-to-unlock. + (try! (fold fold-unlock-reward-cycle reward-set-indexes (ok { cycle: first-cycle-locked, first-unlocked-cycle: cycle-to-unlock, stacker: user }))) + ;; Now that we've cleaned up all the reward set entries for the user, delete the user's stacking-state + (map-delete stacking-state { stacker: user }) + (ok true))) + +;; Add a PoX address to the `cycle-index`-th reward cycle, if `cycle-index` is between 0 and the given num-cycles (exclusive). +;; Arguments are given as a tuple, so this function can be (folded ..)'ed onto a list of its arguments. +;; Used by add-pox-addr-to-reward-cycles. +;; No checking is done. +;; The returned tuple is the same as inputted `params`, but the `i` field is incremented if +;; the pox-addr was added to the given cycle. Also, `reward-set-indexes` grows to include all +;; of the `reward-cycle-index` key parts of the `reward-cycle-pox-address-list` which get added by this function. +;; This way, the caller knows which items in a given reward cycle's PoX address list got updated. +(define-private (add-pox-addr-to-ith-reward-cycle (cycle-index uint) (params (tuple + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-set-indexes (list 12 uint)) + (first-reward-cycle uint) + (num-cycles uint) + (stacker (optional principal)) + (amount-ustx uint) + (i uint)))) + (let ((reward-cycle (+ (get first-reward-cycle params) (get i params))) + (num-cycles (get num-cycles params)) + (i (get i params)) + (reward-set-index (if (< i num-cycles) + (let ((total-ustx (get-total-ustx-stacked reward-cycle)) + (reward-index + ;; record how many uSTX this pox-addr will stack for in the given reward cycle + (append-reward-cycle-pox-addr + (get pox-addr params) + reward-cycle + (get amount-ustx params) + (get stacker params) + ))) + ;; update running total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: (+ (get amount-ustx params) total-ustx) }) + (some reward-index)) + none)) + (next-i (if (< i num-cycles) (+ i u1) i))) + { + pox-addr: (get pox-addr params), + first-reward-cycle: (get first-reward-cycle params), + num-cycles: num-cycles, + amount-ustx: (get amount-ustx params), + stacker: (get stacker params), + reward-set-indexes: (match + reward-set-index new (unwrap-panic (as-max-len? (append (get reward-set-indexes params) new) u12)) + (get reward-set-indexes params)), + i: next-i + })) + +;; Add a PoX address to a given sequence of reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-addr-to-reward-cycles (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint) + (stacker principal)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11)) + (results (fold add-pox-addr-to-ith-reward-cycle cycle-indexes + { pox-addr: pox-addr, first-reward-cycle: first-reward-cycle, num-cycles: num-cycles, + reward-set-indexes: (list), amount-ustx: amount-ustx, i: u0, stacker: (some stacker) })) + (reward-set-indexes (get reward-set-indexes results))) + ;; For safety, add up the number of times (add-principal-to-ith-reward-cycle) returns 1. + ;; It _should_ be equal to num-cycles. + (asserts! (is-eq num-cycles (get i results)) (err ERR_STACKING_UNREACHABLE)) + (asserts! (is-eq num-cycles (len reward-set-indexes)) (err ERR_STACKING_UNREACHABLE)) + (ok reward-set-indexes))) + +(define-private (add-pox-partial-stacked-to-ith-cycle + (cycle-index uint) + (params { pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + num-cycles: uint, + amount-ustx: uint })) + (let ((pox-addr (get pox-addr params)) + (num-cycles (get num-cycles params)) + (reward-cycle (get reward-cycle params)) + (amount-ustx (get amount-ustx params))) + (let ((current-amount + (default-to u0 + (get stacked-amount + (map-get? partial-stacked-by-cycle { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle }))))) + (if (>= cycle-index num-cycles) + ;; do not add to cycles >= cycle-index + false + ;; otherwise, add to the partial-stacked-by-cycle + (map-set partial-stacked-by-cycle + { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle } + { stacked-amount: (+ amount-ustx current-amount) })) + ;; produce the next params tuple + { pox-addr: pox-addr, + reward-cycle: (+ u1 reward-cycle), + num-cycles: num-cycles, + amount-ustx: amount-ustx }))) + +;; Add a PoX address to a given sequence of partial reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-partial-stacked (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11))) + (fold add-pox-partial-stacked-to-ith-cycle cycle-indexes + { pox-addr: pox-addr, reward-cycle: first-reward-cycle, num-cycles: num-cycles, amount-ustx: amount-ustx }) + true)) + +;; What is the minimum number of uSTX to be stacked in the given reward cycle? +;; Used internally by the Stacks node, and visible publicly. +(define-read-only (get-stacking-minimum) + (/ stx-liquid-supply STACKING_THRESHOLD_25)) + +;; Is the address mode valid for a PoX address? +(define-read-only (check-pox-addr-version (version (buff 1))) + (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION)) + +;; Is this buffer the right length for the given PoX address? +(define-read-only (check-pox-addr-hashbytes (version (buff 1)) (hashbytes (buff 32))) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_20) + (is-eq (len hashbytes) u20) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_32) + (is-eq (len hashbytes) u32) + false))) + +;; Is the given lock period valid? +(define-read-only (check-pox-lock-period (lock-period uint)) + (and (>= lock-period MIN_POX_REWARD_CYCLES) + (<= lock-period MAX_POX_REWARD_CYCLES))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (can-stack-stx (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; minimum uSTX must be met + (asserts! (<= (get-stacking-minimum) amount-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle num-cycles))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (minimal-can-stack-stx + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; amount must be valid + (asserts! (> amount-ustx u0) + (err ERR_STACKING_INVALID_AMOUNT)) + + ;; sender principal must not have rejected in this upcoming reward cycle + (asserts! (is-none (get-pox-rejection tx-sender first-reward-cycle)) + (err ERR_STACKING_ALREADY_REJECTED)) + + ;; lock period must be in acceptable range. + (asserts! (check-pox-lock-period num-cycles) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; address version must be valid + (asserts! (check-pox-addr-version (get version pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + ;; address hashbytes must be valid for the version + (asserts! (check-pox-addr-hashbytes (get version pox-addr) (get hashbytes pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + (ok true))) + +;; Revoke contract-caller authorization to call stacking methods +(define-public (disallow-contract-caller (caller principal)) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete allowance-contract-callers { sender: tx-sender, contract-caller: caller })))) + +;; Give a contract-caller authorization to call stacking methods +;; normally, stacking methods may only be invoked by _direct_ transactions +;; (i.e., the tx-sender issues a direct contract-call to the stacking methods) +;; by issuing an allowance, the tx-sender may call through the allowed contract +(define-public (allow-contract-caller (caller principal) (until-burn-ht (optional uint))) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-set allowance-contract-callers + { sender: tx-sender, contract-caller: caller } + { until-burn-ht: until-burn-ht })))) + +;; Lock up some uSTX for stacking! Note that the given amount here is in micro-STX (uSTX). +;; The STX will be locked for the given number of reward cycles (lock-period). +;; This is the self-service interface. tx-sender will be the Stacker. +;; +;; * The given stacker cannot currently be stacking. +;; * You will need the minimum uSTX threshold. This will be determined by (get-stacking-minimum) +;; at the time this method is called. +;; * You may need to increase the amount of uSTX locked up later, since the minimum uSTX threshold +;; may increase between reward cycles. +;; * The Stacker will receive rewards in the reward cycle following `start-burn-ht`. +;; Importantly, `start-burn-ht` may not be further into the future than the next reward cycle, +;; and in most cases should be set to the current burn block height. +;; +;; The tokens will unlock and be returned to the Stacker (tx-sender) automatically. +(define-public (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender principal must not be stacking + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance tx-sender) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked + (let ((reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-reward-cycle lock-period amount-ustx tx-sender)))) + ;; add stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: tx-sender, lock-amount: amount-ustx, unlock-burn-height: (reward-cycle-to-burn-height (+ first-reward-cycle lock-period)) })))) + +(define-public (revoke-delegate-stx) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete delegation-state { stacker: tx-sender })))) + +;; Delegate to `delegate-to` the ability to stack from a given address. +;; This method _does not_ lock the funds, rather, it allows the delegate +;; to issue the stacking lock. +;; The caller specifies: +;; * amount-ustx: the total amount of ustx the delegate may be allowed to lock +;; * until-burn-ht: an optional burn height at which this delegation expires +;; * pox-addr: an optional address to which any rewards *must* be sent +(define-public (delegate-stx (amount-ustx uint) + (delegate-to principal) + (until-burn-ht (optional uint)) + (pox-addr (optional { version: (buff 1), + hashbytes: (buff 32) }))) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; delegate-stx no longer requires the delegator to not currently + ;; be stacking. + + ;; pox-addr, if given, must be valid + (match pox-addr + address + (asserts! (check-pox-addr-version (get version address)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + true) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; add delegation record + (map-set delegation-state + { stacker: tx-sender } + { amount-ustx: amount-ustx, + delegated-to: delegate-to, + until-burn-ht: until-burn-ht, + pox-addr: pox-addr }) + + (ok true))) + +;; Commit partially stacked STX and allocate a new PoX reward address slot. +;; This allows a stacker/delegate to lock fewer STX than the minimal threshold in multiple transactions, +;; so long as: 1. The pox-addr is the same. +;; 2. This "commit" transaction is called _before_ the PoX anchor block. +;; This ensures that each entry in the reward set returned to the stacks-node is greater than the threshold, +;; but does not require it be all locked up within a single transaction +;; +;; Returns (ok uint) on success, where the given uint is the reward address's index in the list of reward +;; addresses allocated in this reward cycle. This index can then be passed to `stack-aggregation-increase` +;; to later increment the STX this PoX address represents, in amounts less than the stacking minimum. +;; +;; *New in Stacks 2.1.* +(define-private (inner-stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (let ((amount-ustx (get stacked-amount partial-stacked))) + (try! (can-stack-stx pox-addr amount-ustx reward-cycle u1)) + ;; Add the pox addr to the reward cycle, and extract the index of the PoX address + ;; so the delegator can later use it to call stack-aggregation-increase. + (let ((add-pox-addr-info + (add-pox-addr-to-ith-reward-cycle + u0 + { pox-addr: pox-addr, + first-reward-cycle: reward-cycle, + num-cycles: u1, + reward-set-indexes: (list), + stacker: none, + amount-ustx: amount-ustx, + i: u0 })) + (pox-addr-index (unwrap-panic + (element-at (get reward-set-indexes add-pox-addr-info) u0)))) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok pox-addr-index))))) + +;; Legacy interface for stack-aggregation-commit. +;; Wraps inner-stack-aggregation-commit. See its docstring for details. +;; Returns (ok true) on success +;; Returns (err ...) on failure. +(define-public (stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (match (inner-stack-aggregation-commit pox-addr reward-cycle) + pox-addr-index (ok true) + commit-err (err commit-err))) + +;; Public interface to `inner-stack-aggregation-commit`. See its documentation for details. +;; *New in Stacks 2.1.* +(define-public (stack-aggregation-commit-indexed (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (inner-stack-aggregation-commit pox-addr reward-cycle)) + +;; Commit partially stacked STX to a PoX address which has already received some STX (more than the Stacking min). +;; This allows a delegator to lock up marginally more STX from new delegates, even if they collectively do not +;; exceed the Stacking minimum, so long as the target PoX address already represents at least as many STX as the +;; Stacking minimum. +;; +;; The `reward-cycle-index` is emitted as a contract event from `stack-aggregation-commit` when the initial STX are +;; locked up by this delegator. It must be passed here to add more STX behind this PoX address. If the delegator +;; called `stack-aggregation-commit` multiple times for the same PoX address, then any such `reward-cycle-index` will +;; work here. +;; +;; *New in Stacks 2.1* +;; +(define-public (stack-aggregation-increase (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (reward-cycle-index uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; reward-cycle must be in the future + (asserts! (> reward-cycle (current-pox-reward-cycle)) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((amount-ustx (get stacked-amount partial-stacked)) + ;; reward-cycle must point to an existing record in reward-cycle-total-stacked + ;; infallible; getting something from partial-stacked-by-cycle succeeded so this must succeed + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + ;; reward-cycle and reward-cycle-index must point to an existing record in reward-cycle-pox-address-list + (existing-entry (unwrap! (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }) + (err ERR_DELEGATION_NO_REWARD_SLOT))) + (increased-ustx (+ (get total-ustx existing-entry) amount-ustx)) + (total-ustx (+ (get total-ustx existing-total) amount-ustx))) + + ;; must be stackable + (try! (minimal-can-stack-stx pox-addr total-ustx reward-cycle u1)) + + ;; new total must exceed the stacking minimum + (asserts! (<= (get-stacking-minimum) total-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + ;; there must *not* be a stacker entry (since this is a delegator) + (asserts! (is-none (get stacker existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; the given PoX address must match the one on record + (asserts! (is-eq pox-addr (get pox-addr existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; update the pox-address list -- bump the total-ustx + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: pox-addr, + total-ustx: increased-ustx, + stacker: none }) + + ;; update the total ustx in this cycle + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok true)))) + +;; As a delegate, stack the given principal's STX using partial-stacked-by-cycle +;; Once the delegate has stacked > minimum, the delegate should call stack-aggregation-commit +(define-public (delegate-stack-stx (stacker principal) + (amount-ustx uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht))) + (unlock-burn-height (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 lock-period)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + unlock-burn-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; stacker principal must not be stacking + (asserts! (is-none (get-stacker-info stacker)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance stacker) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-reward-cycle lock-period amount-ustx) + + ;; add stacker record + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + first-reward-cycle: first-reward-cycle, + reward-set-indexes: (list), + lock-period: lock-period }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + lock-amount: amount-ustx, + unlock-burn-height: unlock-burn-height }))) + +;; Reject Stacking for this reward cycle. +;; tx-sender votes all its uSTX for rejection. +;; Note that unlike PoX, rejecting PoX does not lock the tx-sender's +;; tokens. PoX rejection acts like a coin vote. +(define-public (reject-pox) + (let ( + (balance (stx-get-balance tx-sender)) + (vote-reward-cycle (+ u1 (current-pox-reward-cycle))) + ) + + ;; tx-sender principal must not have rejected in this upcoming reward cycle + (asserts! (is-none (get-pox-rejection tx-sender vote-reward-cycle)) + (err ERR_STACKING_ALREADY_REJECTED)) + + ;; tx-sender can't be a stacker + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; vote for rejection + (map-set stacking-rejection + { reward-cycle: vote-reward-cycle } + { amount: (+ (next-cycle-rejection-votes) balance) } + ) + + ;; mark voted + (map-set stacking-rejectors + { stacker: tx-sender, reward-cycle: vote-reward-cycle } + { amount: balance } + ) + + (ok true)) +) + +;; Used for PoX parameters discovery +(define-read-only (get-pox-info) + (ok { + min-amount-ustx: (get-stacking-minimum), + reward-cycle-id: (current-pox-reward-cycle), + prepare-cycle-length: (var-get pox-prepare-cycle-length), + first-burnchain-block-height: (var-get first-burnchain-block-height), + reward-cycle-length: (var-get pox-reward-cycle-length), + rejection-fraction: (var-get pox-rejection-fraction), + current-rejection-votes: (next-cycle-rejection-votes), + total-liquid-supply-ustx: stx-liquid-supply, + }) +) + +;; Update the number of stacked STX in a given reward cycle entry. +;; `reward-cycle-index` is the index into the `reward-cycle-pox-address-list` map for a given reward cycle number. +;; `updates`, if `(some ..)`, encodes which PoX reward cycle entry (if any) gets updated. In particular, it must have +;; `(some stacker)` as the listed stacker, and must be an upcoming reward cycle. +(define-private (increase-reward-cycle-entry + (reward-cycle-index uint) + (updates (optional { first-cycle: uint, reward-cycle: uint, stacker: principal, add-amount: uint }))) + (let ((data (try! updates)) + (first-cycle (get first-cycle data)) + (reward-cycle (get reward-cycle data))) + (if (> first-cycle reward-cycle) + ;; not at first cycle to process yet + (some { first-cycle: first-cycle, reward-cycle: (+ u1 reward-cycle), stacker: (get stacker data), add-amount: (get add-amount data) }) + (let ((existing-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }))) + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + (total-ustx (+ (get total-ustx existing-total) (get add-amount data)))) + ;; stacker must match + (asserts! (is-eq (get stacker existing-entry) (some (get stacker data))) none) + ;; update the pox-address list + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: (get pox-addr existing-entry), + total-ustx: total-ustx, + stacker: (some (get stacker data)) }) + ;; update the total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + (some { first-cycle: first-cycle, + reward-cycle: (+ u1 reward-cycle), + stacker: (get stacker data), + add-amount: (get add-amount data) }))))) + +;; Increase the number of STX locked. +;; *New in Stacks 2.1* +;; This method locks up an additional amount of STX from `tx-sender`'s, indicated +;; by `increase-by`. The `tx-sender` must already be Stacking. +(define-public (stack-increase (increase-by uint)) + (let ((stacker-info (stx-account tx-sender)) + (amount-stacked (get locked stacker-info)) + (amount-unlocked (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + (first-increased-cycle (+ cur-cycle u1)) + (stacker-state (unwrap! (map-get? stacking-state + { stacker: tx-sender }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + ;; tx-sender must be currently locked + (asserts! (> amount-stacked u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + ;; stacker must have enough stx to lock + (asserts! (>= amount-unlocked increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_ALREADY_DELEGATED)) + ;; update reward cycle amounts + (asserts! (is-some (fold increase-reward-cycle-entry + (get reward-set-indexes stacker-state) + (some { first-cycle: first-increased-cycle, + reward-cycle: (get first-reward-cycle stacker-state), + stacker: tx-sender, + add-amount: increase-by }))) + (err ERR_STACKING_UNREACHABLE)) + ;; NOTE: stacking-state map is unchanged: it no longer tracks amount-stacked in PoX-2 + (ok { stacker: tx-sender, total-locked: (+ amount-stacked increase-by)}))) + +;; Extend an active Stacking lock. +;; *New in Stacks 2.1* +;; This method extends the `tx-sender`'s current lockup for an additional `extend-count` +;; and associates `pox-addr` with the rewards +(define-public (stack-extend (extend-count uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) })) + (let ((stacker-info (stx-account tx-sender)) + (stacker-state (get-stacker-info tx-sender)) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + (unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) + ;; if the account unlocks *during* this cycle (should only occur during testing), + ;; set first-extend-cycle to the next cycle. + (first-extend-cycle (if (> (+ cur-cycle u1) unlock-in-cycle) + (+ cur-cycle u1) unlock-in-cycle)) + ;; maintaining valid stacking-state entries requires checking + ;; whether there is an existing entry for the stacker in the state + ;; this would be the case if the stacker is extending a lockup from PoX-1 + ;; to PoX-2 + (first-reward-cycle (match (get first-reward-cycle stacker-state) + ;; if we've stacked in PoX2, then max(cur-cycle, stacker-state.first-reward-cycle) is valid + old-first-cycle (if (> cur-cycle old-first-cycle) cur-cycle old-first-cycle) + ;; otherwise, there aren't PoX2 entries until first-extend-cycle + first-extend-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender must be locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; standard can-stack-stx checks + (try! (can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked + ;; for the new cycles + (let ((extended-reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-extend-cycle extend-count amount-ustx tx-sender))) + (reward-set-indexes (match stacker-state + ;; if there's active stacker state, we need to extend the existing reward-set-indexes + old-state (let ((cur-cycle-index (- first-reward-cycle (get first-reward-cycle old-state))) + (old-indexes (get reward-set-indexes old-state)) + ;; build index list by taking the old-indexes starting from cur cycle + ;; and adding the new indexes to it. this way, the index is valid starting from the current cycle + (new-list (concat (default-to (list) (slice? old-indexes cur-cycle-index (len old-indexes))) + extended-reward-set-indexes))) + (unwrap-panic (as-max-len? new-list u12))) + extended-reward-set-indexes))) + ;; update stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period }) + + ;; return lock-up information + (ok { stacker: tx-sender, unlock-burn-height: new-unlock-ht }))))) + +;; As a delegator, increase an active Stacking lock, issuing a "partial commitment" for the +;; increased cycles. +;; *New in Stacks 2.1* +;; This method increases `stacker`'s current lockup and partially commits the additional +;; STX to `pox-addr` +(define-public (delegate-stack-increase + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (increase-by uint)) + (let ((stacker-info (stx-account stacker)) + (existing-lock (get locked stacker-info)) + (available-stx (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info))) + + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + + (let ((unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + (first-increase-cycle (+ cur-cycle u1)) + (last-increase-cycle (- unlock-in-cycle u1)) + (cycle-count (try! (if (<= first-increase-cycle last-increase-cycle) + (ok (+ u1 (- last-increase-cycle first-increase-cycle))) + (err ERR_STACKING_INVALID_LOCK_PERIOD)))) + (new-total-locked (+ increase-by existing-lock)) + (stacker-state + (unwrap! (map-get? stacking-state { stacker: stacker }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must be currently locked + (asserts! (> existing-lock u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + + ;; stacker must have enough stx to lock + (asserts! (>= available-stx increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED))) + (delegated-to (get delegated-to delegation-info)) + (delegated-amount (get amount-ustx delegation-info)) + (delegated-pox-addr (get pox-addr delegation-info)) + (delegated-until (get until-burn-ht delegation-info))) + ;; must have delegated to tx-sender + (asserts! (is-eq delegated-to tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= delegated-amount new-total-locked) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match delegated-pox-addr + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match delegated-until + until-burn-ht + (>= until-burn-ht unlock-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr new-total-locked first-increase-cycle (+ u1 (- last-increase-cycle first-increase-cycle)))) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-increase-cycle cycle-count increase-by) + + ;; stacking-state is unchanged, so no need to update + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, total-locked: new-total-locked})))) + +;; As a delegator, extend an active stacking lock, issuing a "partial commitment" for the +;; extended-to cycles. +;; *New in Stacks 2.1* +;; This method extends `stacker`'s current lockup for an additional `extend-count` +;; and partially commits those new cycles to `pox-addr` +(define-public (delegate-stack-extend + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (extend-count uint)) + (let ((stacker-info (stx-account stacker)) + (stacker-state (get-stacker-info stacker)) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) + ;; if the account unlocks *during* this cycle (should only occur during testing), + ;; set first-extend-cycle to the next cycle. + (cur-cycle (current-pox-reward-cycle)) + (first-extend-cycle (if (> (+ cur-cycle u1) unlock-in-cycle) + (+ cur-cycle u1) unlock-in-cycle)) + ;; update stacker record + ;; maintaining valid stacking-state entries requires checking + ;; whether there is an existing entry for the stacker in the state + ;; this would be the case if the stacker is extending a lockup from PoX-1 + ;; to PoX-2 + (first-reward-cycle (match (get first-reward-cycle stacker-state) + ;; if stacker stacked in PoX2, then max(cur-cycle, stacker-state.first-reward-cycle) is valid + old-first-cycle (if (> cur-cycle old-first-cycle) cur-cycle old-first-cycle) + ;; otherwise, there aren't PoX2 entries until first-extend-cycle + first-extend-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; check valid lock period + (asserts! (check-pox-lock-period lock-period) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be currently locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + new-unlock-ht) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-extend-cycle extend-count amount-ustx) + + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + reward-set-indexes: (list), + first-reward-cycle: first-reward-cycle, + lock-period: lock-period }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + unlock-burn-height: new-unlock-ht })))) + +;; Get the _current_ PoX stacking delegation information for a stacker. If the information +;; is expired, or if there's never been such a stacker, then returns none. +;; *New in Stacks 2.1* +(define-read-only (get-delegation-info (stacker principal)) + (get-check-delegation stacker) +) + +;; Get the burn height at which a particular contract is allowed to stack for a particular principal. +;; *New in Stacks 2.1* +;; Returns (some (some X)) if X is the burn height at which the allowance terminates +;; Returns (some none) if the caller is allowed indefinitely +;; Returns none if there is no allowance record +(define-read-only (get-allowance-contract-callers (sender principal) (calling-contract principal)) + (map-get? allowance-contract-callers { sender: sender, contract-caller: calling-contract }) +) + +;; How many PoX addresses in this reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-num-reward-set-pox-addresses (reward-cycle uint)) + (match (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle }) + num-addrs + (get len num-addrs) + u0 + ) +) + +;; How many uSTX have been locked up for this address so far, before the delegator commits them? +;; *New in Stacks 2.1* +(define-read-only (get-partial-stacked-by-cycle (pox-addr { version: (buff 1), hashbytes: (buff 32) }) (reward-cycle uint) (sender principal)) + (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, reward-cycle: reward-cycle, sender: sender }) +) + +;; How many uSTX have voted to reject PoX in a given reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-total-pox-rejection (reward-cycle uint)) + (match (map-get? stacking-rejection { reward-cycle: reward-cycle }) + rejected + (get amount rejected) + u0 + ) +) diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index 40081b3994..997a80028b 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -22,10 +22,12 @@ use std::thread; use crate::chainstate::stacks::boot::BOOT_CODE_COSTS_2_TESTNET; use crate::chainstate::stacks::boot::POX_2_MAINNET_CODE; use crate::chainstate::stacks::boot::POX_2_TESTNET_CODE; +use crate::chainstate::stacks::boot::POX_3_MAINNET_CODE; +use crate::chainstate::stacks::boot::POX_3_TESTNET_CODE; use crate::chainstate::stacks::boot::{ BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_3, BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, COSTS_2_NAME, - COSTS_3_NAME, POX_2_NAME, + COSTS_3_NAME, POX_2_NAME, POX_3_NAME }; use crate::chainstate::stacks::db::StacksAccount; use crate::chainstate::stacks::db::StacksChainState; @@ -44,7 +46,7 @@ use crate::chainstate::stacks::TransactionVersion; use crate::chainstate::stacks::{SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction}; use crate::core::StacksEpoch; use crate::core::FIRST_STACKS_BLOCK_ID; -use crate::core::GENESIS_EPOCH; +use crate::core::{GENESIS_EPOCH, BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT}; use crate::types::chainstate::BlockHeaderHash; use crate::types::chainstate::BurnchainHeaderHash; use crate::types::chainstate::SortitionId; @@ -1116,6 +1118,8 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // epoch initialization is *free*. // NOTE: this also means that cost functions won't be evaluated. self.cost_track.replace(LimitedCostTracker::new_free()); + + // first, upgrade the epoch self.epoch = StacksEpochId::Epoch23; self.as_transaction(|tx_conn| { // bump the epoch in the Clarity DB @@ -1130,9 +1134,118 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { tx_conn.epoch = StacksEpochId::Epoch23; }); + /////////////////// .pox-3 //////////////////////// + let mainnet = self.mainnet; + let first_block_height = self.burn_state_db.get_burn_start_height(); + let pox_prepare_length = self.burn_state_db.get_pox_prepare_length(); + let pox_reward_cycle_length = self.burn_state_db.get_pox_reward_cycle_length(); + let pox_rejection_fraction = self.burn_state_db.get_pox_rejection_fraction(); + + let pox_3_first_cycle = PoxConstants::static_block_height_to_reward_cycle( + BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT as u64, + first_block_height as u64, + pox_reward_cycle_length as u64, + ) + .expect("PANIC: PoX-3 first reward cycle begins *before* first burn block height"); + + // get tx_version & boot code account information for pox-3 contract init + let tx_version = if mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let boot_code_address = boot_code_addr(mainnet); + + let boot_code_auth = TransactionAuth::Standard( + TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { + signer: boot_code_address.bytes.clone(), + hash_mode: SinglesigHashMode::P2PKH, + key_encoding: TransactionPublicKeyEncoding::Uncompressed, + nonce: 0, + tx_fee: 0, + signature: MessageSignature::empty(), + }), + ); + + let boot_code_nonce = self.with_clarity_db_readonly(|db| { + db.get_account_nonce(&boot_code_address.clone().into()) + }); + + let boot_code_account = StacksAccount { + principal: PrincipalData::Standard(boot_code_address.into()), + nonce: boot_code_nonce, + stx_balance: STXBalance::zero(), + }; + + let pox_3_code = if mainnet { + &*POX_3_MAINNET_CODE + } else { + &*POX_3_TESTNET_CODE + }; + + let pox_3_contract_id = boot_code_id(POX_3_NAME, mainnet); + + let payload = TransactionPayload::SmartContract( + TransactionSmartContract { + name: ContractName::try_from(POX_3_NAME) + .expect("FATAL: invalid boot-code contract name"), + code_body: StacksString::from_str(pox_3_code) + .expect("FATAL: invalid boot code body"), + }, + Some(ClarityVersion::Clarity2), + ); + + let pox_3_contract_tx = + StacksTransaction::new(tx_version.clone(), boot_code_auth.clone(), payload); + + let pox_3_initialization_receipt = self.as_transaction(|tx_conn| { + // initialize with a synthetic transaction + debug!("Instantiate {} contract", &pox_3_contract_id); + let receipt = StacksChainState::process_transaction_payload( + tx_conn, + &pox_3_contract_tx, + &boot_code_account, + ASTRules::PrecheckSize, + ) + .expect("FATAL: Failed to process PoX 3 contract initialization"); + + // set burnchain params + let consts_setter = PrincipalData::from(pox_3_contract_id.clone()); + let params = vec![ + Value::UInt(first_block_height as u128), + Value::UInt(pox_prepare_length as u128), + Value::UInt(pox_reward_cycle_length as u128), + Value::UInt(pox_rejection_fraction as u128), + Value::UInt(pox_3_first_cycle as u128), + ]; + + let (_, _, _burnchain_params_events) = tx_conn + .run_contract_call( + &consts_setter, + None, + &pox_3_contract_id, + "set-burnchain-parameters", + ¶ms, + |_, _| false, + ) + .expect("Failed to set burnchain parameters in PoX-3 contract"); + + receipt + }); + + if pox_3_initialization_receipt.result != Value::okay_true() + || pox_3_initialization_receipt.post_condition_aborted + { + panic!( + "FATAL: Failure processing PoX 3 contract initialization: {:#?}", + &pox_3_initialization_receipt + ); + } + debug!("Epoch 2.3 initialized"); - (old_cost_tracker, Ok(vec![])) + (old_cost_tracker, Ok(vec![pox_3_initialization_receipt])) }) } diff --git a/src/core/mod.rs b/src/core/mod.rs index 91a42accd7..61023dc4a9 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -754,11 +754,11 @@ impl StacksEpochExtension for StacksEpoch { start_height: first_burnchain_height + 12, end_height: first_burnchain_height + 16, block_limit: ExecutionCost { - write_length: 210210, - write_count: 210210, - read_length: 210210, - read_count: 210210, - runtime: 210210, + write_length: 220220, + write_count: 220220, + read_length: 220220, + read_count: 220220, + runtime: 220220, }, network_epoch: PEER_VERSION_EPOCH_2_2, }, @@ -767,11 +767,11 @@ impl StacksEpochExtension for StacksEpoch { start_height: first_burnchain_height + 16, end_height: STACKS_EPOCH_MAX, block_limit: ExecutionCost { - write_length: 210210, - write_count: 210210, - read_length: 210210, - read_count: 210210, - runtime: 210210, + write_length: 230230, + write_count: 230230, + read_length: 230230, + read_count: 230230, + runtime: 230230, }, network_epoch: PEER_VERSION_EPOCH_2_3, }, From c77bed7963bdb4ad78350e430b7b3aec4a91d9d8 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Fri, 28 Apr 2023 13:42:00 -0400 Subject: [PATCH 099/158] added burnstateDB method to create getter for pox_3 contract activation --- clarity/src/vm/database/clarity_db.rs | 13 +++- clarity/src/vm/database/structures.rs | 2 +- clarity/src/vm/docs/mod.rs | 8 +- clarity/src/vm/test_util/mod.rs | 8 +- src/burnchains/mod.rs | 23 ++++-- src/burnchains/tests/affirmation.rs | 60 +++++++++------ src/burnchains/tests/db.rs | 20 +++-- src/chainstate/burn/db/sortdb.rs | 2 +- .../burn/operations/leader_block_commit.rs | 9 +-- src/chainstate/coordinator/tests.rs | 75 +++++++++++-------- src/chainstate/stacks/boot/contract_tests.rs | 8 +- src/chainstate/stacks/boot/mod.rs | 5 +- src/chainstate/stacks/db/transactions.rs | 10 ++- src/clarity_vm/clarity.rs | 11 ++- src/clarity_vm/database/mod.rs | 8 ++ src/net/inv.rs | 14 ++-- src/net/mod.rs | 7 +- 17 files changed, 184 insertions(+), 99 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 3eac1510cf..71efdbb0d2 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -115,6 +115,7 @@ pub trait HeadersDB { pub trait BurnStateDB { fn get_v1_unlock_height(&self) -> u32; fn get_v2_unlock_height(&self) -> u32; + fn get_pox_3_activation_height(&self) -> u32; /// Returns the *burnchain block height* for the `sortition_id` is associated with. fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option; @@ -203,6 +204,10 @@ impl BurnStateDB for &dyn BurnStateDB { (*self).get_v2_unlock_height() } + fn get_pox_3_activation_height(&self) -> u32 { + (*self).get_pox_3_activation_height() + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { (*self).get_burn_block_height(sortition_id) } @@ -370,11 +375,15 @@ impl BurnStateDB for NullBurnStateDB { } fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX } fn get_v2_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 601500d372..5147a26653 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -261,7 +261,7 @@ impl ClarityDeserializable for STXBalance { } } else if bytes.len() == STXBalance::v2_and_v3_size { let version = &bytes[0]; - if version != &STXBalance::pox_2_version || version != &STXBalance::pox_3_version { + if version != &STXBalance::pox_2_version && version != &STXBalance::pox_3_version { panic!( "Bad version byte in STX Balance serialization = {}", version diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 257a61b039..9a019dbbc9 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2766,11 +2766,15 @@ mod test { } fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX } fn get_v2_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index a1987e6985..65e0025c7a 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -207,11 +207,15 @@ impl BurnStateDB for UnitTestBurnStateDB { } fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX } fn get_v2_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index a42e2fc0f5..35786212e7 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -31,6 +31,7 @@ use crate::chainstate::burn::operations::Error as op_error; use crate::chainstate::burn::operations::LeaderKeyRegisterOp; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::StacksPublicKey; +use crate::chainstate::stacks::boot::POX_3_NAME; use crate::core::*; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use crate::util_lib::db::Error as db_error; @@ -314,6 +315,8 @@ pub struct PoxConstants { pub v1_unlock_height: u32, /// The auto unlock height for PoX v2 lockups during Epoch 2.2 pub v2_unlock_height: u32, + /// After this burn height, reward cycles use pox-3 for reward set data + pub pox_3_activation_height: u32, _shadow: PhantomData<()>, } @@ -328,10 +331,13 @@ impl PoxConstants { sunset_end: u64, v1_unlock_height: u32, v2_unlock_height: u32, + pox_3_activation_height: u32, ) -> PoxConstants { assert!(anchor_threshold > (prepare_length / 2)); assert!(prepare_length < reward_cycle_length); assert!(sunset_start <= sunset_end); + assert!(v2_unlock_height >= v1_unlock_height); + assert!(pox_3_activation_height >= v2_unlock_height); PoxConstants { reward_cycle_length, @@ -343,6 +349,7 @@ impl PoxConstants { sunset_end, v1_unlock_height, v2_unlock_height, + pox_3_activation_height, _shadow: PhantomData, } } @@ -357,14 +364,17 @@ impl PoxConstants { 5, 5000, 10000, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ) } /// Returns the PoX contract that is "active" at the given burn block height - pub fn static_active_pox_contract(v1_unlock_height: u64, burn_height: u64) -> &'static str { - if burn_height > v1_unlock_height { + pub fn static_active_pox_contract(v1_unlock_height: u64, pox_3_activation_height: u64, burn_height: u64) -> &'static str { + if burn_height > pox_3_activation_height { + POX_3_NAME + } else if burn_height > v1_unlock_height { POX_2_NAME } else { POX_1_NAME @@ -373,7 +383,7 @@ impl PoxConstants { /// Returns the PoX contract that is "active" at the given burn block height pub fn active_pox_contract(&self, burn_height: u64) -> &'static str { - Self::static_active_pox_contract(self.v1_unlock_height as u64, burn_height) + Self::static_active_pox_contract(self.v1_unlock_height as u64, self.pox_3_activation_height as u64, burn_height) } pub fn reward_slots(&self) -> u32 { @@ -401,6 +411,7 @@ impl PoxConstants { BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT, POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT, + BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT.try_into().expect("Epoch transition height must be <= u32::MAX"), ) } @@ -415,6 +426,7 @@ impl PoxConstants { BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT, POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT, + BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT.try_into().expect("Epoch transition height must be <= u32::MAX"), ) // total liquid supply is 40000000000000000 µSTX } @@ -429,6 +441,7 @@ impl PoxConstants { BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, 1_000_000, 2_000_000, + 3_000_000 ) } diff --git a/src/burnchains/tests/affirmation.rs b/src/burnchains/tests/affirmation.rs index 46d73e9d6d..d4ee58dbc9 100644 --- a/src/burnchains/tests/affirmation.rs +++ b/src/burnchains/tests/affirmation.rs @@ -496,8 +496,9 @@ fn test_read_prepare_phase_commits() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -572,8 +573,9 @@ fn test_parent_block_commits() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -673,8 +675,9 @@ fn test_filter_orphan_block_commits() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -743,8 +746,9 @@ fn test_filter_missed_block_commits() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -813,8 +817,9 @@ fn test_find_heaviest_block_commit() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1035,8 +1040,9 @@ fn test_find_heaviest_parent_commit_many_commits() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1297,8 +1303,9 @@ fn test_update_pox_affirmation_maps_3_forks() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1556,8 +1563,9 @@ fn test_update_pox_affirmation_maps_unique_anchor_block() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1758,8 +1766,9 @@ fn test_update_pox_affirmation_maps_absent() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -2230,8 +2239,9 @@ fn test_update_pox_affirmation_maps_nothing() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -2506,8 +2516,9 @@ fn test_update_pox_affirmation_fork_2_cycles() { 5, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -2807,8 +2818,9 @@ fn test_update_pox_affirmation_fork_duel() { 5, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); diff --git a/src/burnchains/tests/db.rs b/src/burnchains/tests/db.rs index 2aa60e2593..685e1d1b56 100644 --- a/src/burnchains/tests/db.rs +++ b/src/burnchains/tests/db.rs @@ -517,8 +517,9 @@ fn test_get_commit_at() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -641,8 +642,9 @@ fn test_get_set_check_anchor_block() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -735,8 +737,9 @@ fn test_update_block_descendancy() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -863,8 +866,9 @@ fn test_update_block_descendancy_with_fork() { 0, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index d00414c7e6..31b19a2969 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -9579,7 +9579,7 @@ pub mod tests { fs::create_dir_all(path_root).unwrap(); - let pox_consts = PoxConstants::new(10, 3, 3, 25, 5, u64::MAX, u64::MAX, u32::MAX, u32::MAX); + let pox_consts = PoxConstants::new(10, 3, 3, 25, 5, u64::MAX, u64::MAX, u32::MAX, u32::MAX, u32::MAX); let mut burnchain = Burnchain::regtest(path_root); burnchain.pox_constants = pox_consts.clone(); diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index 9330e2d707..3f66474fc2 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -734,9 +734,6 @@ impl LeaderBlockCommitOp { /// Check the epoch marker in the block commit, given the epoch we're in fn check_epoch_commit(&self, epoch_id: StacksEpochId) -> Result<(), op_error> { - info!("CHECKING EPOCH COMMIT: curr epoch: {:?}, marker: {:?}, expected for 2.2:{:?}", - epoch_id, self.memo[0], STACKS_EPOCH_2_2_MARKER - ); match epoch_id { StacksEpochId::Epoch10 => { panic!("FATAL: processed block-commit pre-Stacks 2.0"); @@ -1779,7 +1776,7 @@ mod tests { ]; let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX), + pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), @@ -2312,7 +2309,7 @@ mod tests { ]; let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX), + pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), @@ -3002,7 +2999,7 @@ mod tests { .unwrap(); let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX), + pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index b02cb09b0c..ba72d9c50f 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -513,10 +513,11 @@ pub fn get_burnchain(path: &str, pox_consts: Option) -> Burnchain 3, 25, 5, - u64::max_value(), - u64::max_value(), - u32::max_value(), - u32::max_value(), + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, ) }); b @@ -659,7 +660,7 @@ fn make_genesis_block_with_recipients( ), key_block_ptr: 1, // all registers happen in block height 1 key_vtxindex: (1 + key_index) as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_3_MARKER], new_seed: VRFSeed::from_proof(&proof), commit_outs, @@ -922,7 +923,7 @@ fn make_stacks_block_with_input( ), key_block_ptr: 1, // all registers happen in block height 1 key_vtxindex: (1 + key_index) as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_3_MARKER], new_seed: VRFSeed::from_proof(&proof), commit_outs, @@ -953,8 +954,9 @@ fn missed_block_commits_2_05() { 5, 7010, sunset_ht, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1270,8 +1272,9 @@ fn missed_block_commits_2_1() { 5, 7010, sunset_ht, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1611,8 +1614,9 @@ fn late_block_commits_2_1() { 5, 7010, sunset_ht, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2666,8 +2670,8 @@ fn test_pox_btc_ops() { let _r = std::fs::remove_dir_all(path); let sunset_ht = 8000; - let pox_v1_unlock_ht = u32::max_value(); - let pox_v2_unlock_ht = u32::max_value(); + let pox_v1_unlock_ht = u32::MAX; + let pox_v2_unlock_ht = u32::MAX; let pox_consts = Some(PoxConstants::new( 5, 3, @@ -2678,6 +2682,7 @@ fn test_pox_btc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2947,8 +2952,8 @@ fn test_stx_transfer_btc_ops() { let path = "/tmp/stacks-blockchain-stx_transfer-btc-ops"; let _r = std::fs::remove_dir_all(path); - let pox_v1_unlock_ht = u32::max_value(); - let pox_v2_unlock_ht = u32::max_value(); + let pox_v1_unlock_ht = u32::MAX; + let pox_v2_unlock_ht = u32::MAX; let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 5, @@ -2960,6 +2965,7 @@ fn test_stx_transfer_btc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3353,7 +3359,7 @@ fn test_delegate_stx_btc_ops() { let _r = std::fs::remove_dir_all(path); let pox_v1_unlock_ht = 12; - let pox_v2_unlock_ht = u32::max_value(); + let pox_v2_unlock_ht = u32::MAX; let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 100, @@ -3365,6 +3371,7 @@ fn test_delegate_stx_btc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3667,8 +3674,9 @@ fn test_initial_coinbase_reward_distributions() { 5, 7010, sunset_ht, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3905,8 +3913,9 @@ fn test_epoch_switch_cost_contract_instantiation() { 5, 10, sunset_ht, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4106,7 +4115,8 @@ fn test_epoch_switch_pox_2_contract_instantiation() { 10, sunset_ht, 10, - u32::max_value(), + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4326,11 +4336,12 @@ fn test_epoch_switch_pox_3_contract_instantiation() { sunset_ht, 10, 14, + 16, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); - let vrf_keys: Vec<_> = (0..15).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::new()).collect(); + let vrf_keys: Vec<_> = (0..25).map(|_| VRFPrivateKey::new()).collect(); + let committers: Vec<_> = (0..25).map(|_| StacksPrivateKey::new()).collect(); let stacker = p2pkh_from(&StacksPrivateKey::new()); let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -4367,7 +4378,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - for ix in 0..18 { + for ix in 0..24 { let vrf_key = &vrf_keys[ix]; let miner = &committers[ix]; @@ -4586,6 +4597,7 @@ fn test_epoch_verify_active_pox_contract() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4874,8 +4886,9 @@ fn test_sortition_with_sunset() { 5, 10, sunset_ht, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5183,7 +5196,8 @@ fn test_sortition_with_sunset_and_epoch_switch() { 10, sunset_ht, v1_unlock_ht, - u32::max_value(), + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5531,8 +5545,9 @@ fn test_pox_processable_block_in_different_pox_forks() { 5, u64::MAX - 1, u64::MAX, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let b = get_burnchain(path, pox_consts.clone()); let b_blind = get_burnchain(path_blinded, pox_consts.clone()); diff --git a/src/chainstate/stacks/boot/contract_tests.rs b/src/chainstate/stacks/boot/contract_tests.rs index 55be1ff722..7a538ed14d 100644 --- a/src/chainstate/stacks/boot/contract_tests.rs +++ b/src/chainstate/stacks/boot/contract_tests.rs @@ -402,11 +402,15 @@ impl BurnStateDB for TestSimBurnStateDB { } fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX } fn get_v2_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index e2d2e48e92..6bb5014ae3 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -982,8 +982,9 @@ pub mod test { 5, 5000, 10000, - u32::max_value(), - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, ); // when the liquid amount = the threshold step, // the threshold should always be the step size. diff --git a/src/chainstate/stacks/db/transactions.rs b/src/chainstate/stacks/db/transactions.rs index ff5a6a4649..46dd796962 100644 --- a/src/chainstate/stacks/db/transactions.rs +++ b/src/chainstate/stacks/db/transactions.rs @@ -8301,7 +8301,10 @@ pub mod test { 2 } fn get_v2_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) @@ -8512,7 +8515,10 @@ pub mod test { 2 } fn get_v2_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index 997a80028b..9b09af0cb6 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -1140,9 +1140,10 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let pox_prepare_length = self.burn_state_db.get_pox_prepare_length(); let pox_reward_cycle_length = self.burn_state_db.get_pox_reward_cycle_length(); let pox_rejection_fraction = self.burn_state_db.get_pox_rejection_fraction(); + let pox_3_activation_height = self.burn_state_db.get_pox_3_activation_height(); let pox_3_first_cycle = PoxConstants::static_block_height_to_reward_cycle( - BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT as u64, + pox_3_activation_height as u64, first_block_height as u64, pox_reward_cycle_length as u64, ) @@ -2444,11 +2445,15 @@ mod tests { } fn get_v2_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX } fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { diff --git a/src/clarity_vm/database/mod.rs b/src/clarity_vm/database/mod.rs index 73687f22bd..bb1514b4c7 100644 --- a/src/clarity_vm/database/mod.rs +++ b/src/clarity_vm/database/mod.rs @@ -460,6 +460,10 @@ impl BurnStateDB for SortitionHandleTx<'_> { self.context.pox_constants.v2_unlock_height } + fn get_pox_3_activation_height(&self) -> u32 { + self.context.pox_constants.pox_3_activation_height + } + fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length } @@ -575,6 +579,10 @@ impl BurnStateDB for SortitionDBConn<'_> { self.context.pox_constants.v2_unlock_height } + fn get_pox_3_activation_height(&self) -> u32 { + self.context.pox_constants.pox_3_activation_height + } + fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length } diff --git a/src/net/inv.rs b/src/net/inv.rs index 5175fd0926..008cee37a2 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -3115,10 +3115,11 @@ mod test { 3, 25, 5, - u64::max_value(), - u64::max_value(), - u32::max_value(), + u64::MAX, + u64::MAX, u32::MAX, + u32::MAX, + u32::MAX ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); @@ -3143,9 +3144,10 @@ mod test { 3, 25, 5, - u64::max_value(), - u64::max_value(), - u32::max_value(), + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, u32::MAX, ); diff --git a/src/net/mod.rs b/src/net/mod.rs index afaaea3fd0..fe337f1c31 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -2481,9 +2481,10 @@ pub mod test { 3, 25, 5, - u64::max_value(), - u64::max_value(), - u32::max_value(), + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, u32::MAX, ); From a3fd0863b0b7f0bb52f5a9b5d2b81bf5de592cc7 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Fri, 28 Apr 2023 14:34:18 -0400 Subject: [PATCH 100/158] fix to test --- src/chainstate/coordinator/tests.rs | 49 +++++++++-------------------- 1 file changed, 14 insertions(+), 35 deletions(-) diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index ba72d9c50f..a4daceb8f1 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -4102,7 +4102,7 @@ fn test_epoch_switch_cost_contract_instantiation() { // the test would panic when trying to re-create the pox-2 contract. #[test] fn test_epoch_switch_pox_2_contract_instantiation() { - let path = "/tmp/stacks-blockchain-epoch-switch-pox-contract-instantiation"; + let path = "/tmp/stacks-blockchain-epoch-switch-pox-2-contract-instantiation"; let _r = std::fs::remove_dir_all(path); let sunset_ht = 8000; @@ -4123,17 +4123,12 @@ fn test_epoch_switch_pox_2_contract_instantiation() { let vrf_keys: Vec<_> = (0..15).map(|_| VRFPrivateKey::new()).collect(); let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::new()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); - let initial_balances = vec![(stacker.clone().into(), balance)]; - setup_states( &[path], &vrf_keys, &committers, pox_consts.clone(), - Some(initial_balances), + None, StacksEpochId::Epoch21, ); @@ -4165,7 +4160,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { let mut burnchain = get_burnchain_db(path, pox_consts.clone()); let mut chainstate = get_chainstate(path); - // Want to ensure that the pox-2 contract DNE for all blocks after the epoch transition height, + // Want to ensure that the pox-2 contract DNE for all blocks before the epoch transition height, // and does exist for blocks after the boundary. // Epoch 2.1 transition // ^ @@ -4317,9 +4312,9 @@ fn test_epoch_switch_pox_2_contract_instantiation() { } } -// This test ensures the epoch transition from 2.05 to 2.1 is applied at the proper block boundaries, +// This test ensures the epoch transition from 2.2 to 2.3 is applied at the proper block boundaries, // and that the epoch transition is only applied once. If it were to be applied more than once, -// the test would panic when trying to re-create the pox-2 contract. +// the test would panic when trying to re-create the pox-3 contract. #[test] fn test_epoch_switch_pox_3_contract_instantiation() { let path = "/tmp/stacks-blockchain-epoch-switch-pox-3-contract-instantiation"; @@ -4343,17 +4338,12 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let vrf_keys: Vec<_> = (0..25).map(|_| VRFPrivateKey::new()).collect(); let committers: Vec<_> = (0..25).map(|_| StacksPrivateKey::new()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); - let initial_balances = vec![(stacker.clone().into(), balance)]; - setup_states( &[path], &vrf_keys, &committers, pox_consts.clone(), - Some(initial_balances), + None, StacksEpochId::Epoch23, ); @@ -4385,15 +4375,15 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let mut burnchain = get_burnchain_db(path, pox_consts.clone()); let mut chainstate = get_chainstate(path); - // Want to ensure that the pox-3 contract DNE for all blocks after the epoch transition height, + // Want to ensure that the pox-3 contract DNE for all blocks before the epoch 2.3 transition height, // and does exist for blocks after the boundary. - // Epoch 2.1 transition Epoch 2.2 transition Epoch 2.3 transition - // ^ ^ ^ - //.. B1 -> B2 -> B3 -> B4 -> B5 -> B6 -> B7 -> B8 -> B9 -> B10 -> B11 -> B12 -> B13 -> B14 -> B15 - // S0 -> S1 -> S2 -> S3 -> S4 -> S5 -> S6 -> S7 -> S8 -> S9 -> S10 -> S11 -> S12 -> S13 -> S14 - // \ - // \ - // _ _ _ S15 -> S16 -> .. + // Epoch 2.1 transition Epoch 2.2 transition Epoch 2.3 transition + // ^ ^ ^ + //.. -> B6 -> B7 -> B8 -> B9 -> B10 -> B11 -> B12 -> B13 -> B14 -> B15 + //.. -> S5 -> S6 -> S7 -> S8 -> S9 -> S10 -> S11 -> S12 -> S13 -> S14 + // \ + // \ + // _ _ _ S15 -> S16 -> .. let parent = if ix == 0 { BlockHeaderHash([0; 32]) } else if ix == 15 { @@ -4405,16 +4395,6 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); let b = get_burnchain(path, pox_consts.clone()); - let next_mock_header = BurnchainBlockHeader { - block_height: burnchain_tip.block_height + 1, - block_hash: BurnchainHeaderHash([0; 32]), - parent_block_hash: burnchain_tip.block_hash, - num_txs: 0, - timestamp: 1, - }; - - let reward_cycle_info = coord.get_reward_cycle_info(&next_mock_header).unwrap(); - let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( &sort_db, @@ -4444,7 +4424,6 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let expected_winner = good_op.txid(); let ops = vec![good_op]; - let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( &b, &mut burnchain, From 57ad7ef898543e0efa828e54e356bb242b0b482d Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Fri, 28 Apr 2023 14:47:12 -0400 Subject: [PATCH 101/158] applying cargo fmt --- src/burnchains/mod.rs | 37 +++++++++--------- src/chainstate/burn/db/sortdb.rs | 13 ++++++- .../burn/operations/leader_block_commit.rs | 39 +++++++++++++++++-- src/chainstate/coordinator/tests.rs | 26 +------------ src/chainstate/stacks/boot/mod.rs | 14 +------ src/clarity_vm/clarity.rs | 5 +-- src/net/inv.rs | 2 +- 7 files changed, 74 insertions(+), 62 deletions(-) diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index 35786212e7..59e5a78e62 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -30,8 +30,8 @@ use crate::chainstate::burn::operations::BlockstackOperationType; use crate::chainstate::burn::operations::Error as op_error; use crate::chainstate::burn::operations::LeaderKeyRegisterOp; use crate::chainstate::stacks::address::PoxAddress; -use crate::chainstate::stacks::StacksPublicKey; use crate::chainstate::stacks::boot::POX_3_NAME; +use crate::chainstate::stacks::StacksPublicKey; use crate::core::*; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use crate::util_lib::db::Error as db_error; @@ -356,22 +356,15 @@ impl PoxConstants { #[cfg(test)] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots - PoxConstants::new( - 10, - 5, - 3, - 25, - 5, - 5000, - 10000, - u32::MAX, - u32::MAX, - u32::MAX, - ) + PoxConstants::new(10, 5, 3, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX) } /// Returns the PoX contract that is "active" at the given burn block height - pub fn static_active_pox_contract(v1_unlock_height: u64, pox_3_activation_height: u64, burn_height: u64) -> &'static str { + pub fn static_active_pox_contract( + v1_unlock_height: u64, + pox_3_activation_height: u64, + burn_height: u64, + ) -> &'static str { if burn_height > pox_3_activation_height { POX_3_NAME } else if burn_height > v1_unlock_height { @@ -383,7 +376,11 @@ impl PoxConstants { /// Returns the PoX contract that is "active" at the given burn block height pub fn active_pox_contract(&self, burn_height: u64) -> &'static str { - Self::static_active_pox_contract(self.v1_unlock_height as u64, self.pox_3_activation_height as u64, burn_height) + Self::static_active_pox_contract( + self.v1_unlock_height as u64, + self.pox_3_activation_height as u64, + burn_height, + ) } pub fn reward_slots(&self) -> u32 { @@ -411,7 +408,9 @@ impl PoxConstants { BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT, POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT, - BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT.try_into().expect("Epoch transition height must be <= u32::MAX"), + BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT + .try_into() + .expect("Epoch transition height must be <= u32::MAX"), ) } @@ -426,7 +425,9 @@ impl PoxConstants { BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT, POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT, - BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT.try_into().expect("Epoch transition height must be <= u32::MAX"), + BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT + .try_into() + .expect("Epoch transition height must be <= u32::MAX"), ) // total liquid supply is 40000000000000000 µSTX } @@ -441,7 +442,7 @@ impl PoxConstants { BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, 1_000_000, 2_000_000, - 3_000_000 + 3_000_000, ) } diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 31b19a2969..59b39240e7 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -9579,7 +9579,18 @@ pub mod tests { fs::create_dir_all(path_root).unwrap(); - let pox_consts = PoxConstants::new(10, 3, 3, 25, 5, u64::MAX, u64::MAX, u32::MAX, u32::MAX, u32::MAX); + let pox_consts = PoxConstants::new( + 10, + 3, + 3, + 25, + 5, + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); let mut burnchain = Burnchain::regtest(path_root); burnchain.pox_constants = pox_consts.clone(); diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index 3f66474fc2..b38ebff08f 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -1776,7 +1776,18 @@ mod tests { ]; let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX), + pox_constants: PoxConstants::new( + 6, + 2, + 2, + 25, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + ), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), @@ -2309,7 +2320,18 @@ mod tests { ]; let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX), + pox_constants: PoxConstants::new( + 6, + 2, + 2, + 25, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + ), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), @@ -2999,7 +3021,18 @@ mod tests { .unwrap(); let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX), + pox_constants: PoxConstants::new( + 6, + 2, + 2, + 25, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + ), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index a4daceb8f1..036ffdac3e 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -39,9 +39,9 @@ use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; use crate::chainstate::coordinator::{Error as CoordError, *}; use crate::chainstate::stacks::address::PoxAddress; -use crate::chainstate::stacks::boot::{POX_3_NAME, PoxStartCycleInfo}; use crate::chainstate::stacks::boot::POX_1_NAME; use crate::chainstate::stacks::boot::POX_2_NAME; +use crate::chainstate::stacks::boot::{PoxStartCycleInfo, POX_3_NAME}; use crate::chainstate::stacks::db::{ accounts::MinerReward, ClarityTx, StacksChainState, StacksHeaderInfo, }; @@ -4180,16 +4180,6 @@ fn test_epoch_switch_pox_2_contract_instantiation() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); let b = get_burnchain(path, pox_consts.clone()); - let next_mock_header = BurnchainBlockHeader { - block_height: burnchain_tip.block_height + 1, - block_hash: BurnchainHeaderHash([0; 32]), - parent_block_hash: burnchain_tip.block_hash, - num_txs: 0, - timestamp: 1, - }; - - let reward_cycle_info = coord.get_reward_cycle_info(&next_mock_header).unwrap(); - let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( &sort_db, @@ -4219,7 +4209,6 @@ fn test_epoch_switch_pox_2_contract_instantiation() { let expected_winner = good_op.txid(); let ops = vec![good_op]; - let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( &b, &mut burnchain, @@ -4321,18 +4310,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let _r = std::fs::remove_dir_all(path); let sunset_ht = 8000; - let pox_consts = Some(PoxConstants::new( - 6, - 3, - 3, - 25, - 5, - 10, - sunset_ht, - 10, - 14, - 16, - )); + let pox_consts = Some(PoxConstants::new(6, 3, 3, 25, 5, 10, sunset_ht, 10, 14, 16)); let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..25).map(|_| VRFPrivateKey::new()).collect(); diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index 6bb5014ae3..03d0bd81a6 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -974,18 +974,8 @@ pub mod test { #[test] fn get_reward_threshold_units() { - let test_pox_constants = PoxConstants::new( - 501, - 1, - 1, - 1, - 5, - 5000, - 10000, - u32::MAX, - u32::MAX, - u32::MAX, - ); + let test_pox_constants = + PoxConstants::new(501, 1, 1, 1, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX); // when the liquid amount = the threshold step, // the threshold should always be the step size. let liquid = POX_THRESHOLD_STEPS_USTX; diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index 9b09af0cb6..0405fe2ada 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -27,7 +27,7 @@ use crate::chainstate::stacks::boot::POX_3_TESTNET_CODE; use crate::chainstate::stacks::boot::{ BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_3, BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, COSTS_2_NAME, - COSTS_3_NAME, POX_2_NAME, POX_3_NAME + COSTS_3_NAME, POX_2_NAME, POX_3_NAME, }; use crate::chainstate::stacks::db::StacksAccount; use crate::chainstate::stacks::db::StacksChainState; @@ -45,8 +45,7 @@ use crate::chainstate::stacks::TransactionSpendingCondition; use crate::chainstate::stacks::TransactionVersion; use crate::chainstate::stacks::{SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction}; use crate::core::StacksEpoch; -use crate::core::FIRST_STACKS_BLOCK_ID; -use crate::core::{GENESIS_EPOCH, BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT}; +use crate::core::{FIRST_STACKS_BLOCK_ID, GENESIS_EPOCH}; use crate::types::chainstate::BlockHeaderHash; use crate::types::chainstate::BurnchainHeaderHash; use crate::types::chainstate::SortitionId; diff --git a/src/net/inv.rs b/src/net/inv.rs index 008cee37a2..3f1f39d812 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -3119,7 +3119,7 @@ mod test { u64::MAX, u32::MAX, u32::MAX, - u32::MAX + u32::MAX, ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); From 539caa7f57aff0760c37fb0600081fcead767264 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Mon, 1 May 2023 09:42:56 -0400 Subject: [PATCH 102/158] adjust first pox 3 cycle --- src/clarity_vm/clarity.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index 0405fe2ada..e944259f55 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -1146,7 +1146,8 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { first_block_height as u64, pox_reward_cycle_length as u64, ) - .expect("PANIC: PoX-3 first reward cycle begins *before* first burn block height"); + .expect("PANIC: PoX-3 first reward cycle begins *before* first burn block height") + + 1; // get tx_version & boot code account information for pox-3 contract init let tx_version = if mainnet { From 2a68a0ff32d5d765dbb42c94a5e48eb572d45c66 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Wed, 3 May 2023 10:29:16 -0400 Subject: [PATCH 103/158] made changes needed for epoch 2.4 --- src/burnchains/mod.rs | 4 +-- src/clarity_vm/clarity.rs | 56 +++++++++++++++++++-------------------- src/core/mod.rs | 1 + 3 files changed, 31 insertions(+), 30 deletions(-) diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index 59e5a78e62..549b060990 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -408,7 +408,7 @@ impl PoxConstants { BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT, POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT, - BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT + BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT .try_into() .expect("Epoch transition height must be <= u32::MAX"), ) @@ -425,7 +425,7 @@ impl PoxConstants { BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT, POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT, - BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT + BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT .try_into() .expect("Epoch transition height must be <= u32::MAX"), ) // total liquid supply is 40000000000000000 µSTX diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index e944259f55..f3c572c019 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -1133,7 +1133,34 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { tx_conn.epoch = StacksEpochId::Epoch23; }); - /////////////////// .pox-3 //////////////////////// + debug!("Epoch 2.3 initialized"); + + (old_cost_tracker, Ok(vec![pox_3_initialization_receipt])) + }) + } + + pub fn initialize_epoch_2_4(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch24; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch24); + Ok(()) + }) + .unwrap(); + + // require 2.4 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch24; + }); + + /////////////////// .pox-3 //////////////////////// let mainnet = self.mainnet; let first_block_height = self.burn_state_db.get_burn_start_height(); let pox_prepare_length = self.burn_state_db.get_pox_prepare_length(); @@ -1244,33 +1271,6 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { ); } - debug!("Epoch 2.3 initialized"); - - (old_cost_tracker, Ok(vec![pox_3_initialization_receipt])) - }) - } - - pub fn initialize_epoch_2_4(&mut self) -> Result, Error> { - // use the `using!` statement to ensure that the old cost_tracker is placed - // back in all branches after initialization - using!(self.cost_track, "cost tracker", |old_cost_tracker| { - // epoch initialization is *free*. - // NOTE: this also means that cost functions won't be evaluated. - self.cost_track.replace(LimitedCostTracker::new_free()); - self.epoch = StacksEpochId::Epoch24; - self.as_transaction(|tx_conn| { - // bump the epoch in the Clarity DB - tx_conn - .with_clarity_db(|db| { - db.set_clarity_epoch_version(StacksEpochId::Epoch24); - Ok(()) - }) - .unwrap(); - - // require 2.4 rules henceforth in this connection as well - tx_conn.epoch = StacksEpochId::Epoch24; - }); - debug!("Epoch 2.4 initialized"); (old_cost_tracker, Ok(vec![])) diff --git a/src/core/mod.rs b/src/core/mod.rs index 61023dc4a9..9967b01de6 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -129,6 +129,7 @@ pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_104_380; pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_431_300; pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; +pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_433_033; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; From b87a5ac4226645f9fcdcd96b91a296a0d581f3a5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 3 May 2023 13:07:17 -0500 Subject: [PATCH 104/158] fix test build errors, add receipt --- src/clarity_vm/clarity.rs | 6 +++--- testnet/stacks-node/src/tests/epoch_21.rs | 15 +++++++++++++++ testnet/stacks-node/src/tests/epoch_22.rs | 6 +++--- testnet/stacks-node/src/tests/epoch_23.rs | 1 + .../stacks-node/src/tests/neon_integrations.rs | 3 +++ 5 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index f3c572c019..0e50f21a76 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -1135,7 +1135,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { debug!("Epoch 2.3 initialized"); - (old_cost_tracker, Ok(vec![pox_3_initialization_receipt])) + (old_cost_tracker, Ok(vec![])) }) } @@ -1160,7 +1160,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { tx_conn.epoch = StacksEpochId::Epoch24; }); - /////////////////// .pox-3 //////////////////////// + /////////////////// .pox-3 //////////////////////// let mainnet = self.mainnet; let first_block_height = self.burn_state_db.get_burn_start_height(); let pox_prepare_length = self.burn_state_db.get_pox_prepare_length(); @@ -1273,7 +1273,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { debug!("Epoch 2.4 initialized"); - (old_cost_tracker, Ok(vec![])) + (old_cost_tracker, Ok(vec![pox_3_initialization_receipt])) }) } diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index a996fda07a..4243835a66 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -126,6 +126,7 @@ fn advance_to_2_1( u64::max_value() - 1, u32::max_value(), u32::MAX, + u32::MAX, )); burnchain_config.pox_constants = pox_constants.clone(); @@ -621,6 +622,7 @@ fn transition_fixes_bitcoin_rigidity() { (17 * reward_cycle_len).into(), u32::max_value(), u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1064,6 +1066,7 @@ fn transition_adds_get_pox_addr_recipients() { u64::max_value() - 1, v1_unlock_height, u32::MAX, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1366,6 +1369,7 @@ fn transition_adds_mining_from_segwit() { u64::MAX, v1_unlock_height, u32::MAX, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1530,6 +1534,7 @@ fn transition_removes_pox_sunset() { (sunset_end_rc * reward_cycle_len).into(), (epoch_21 as u32) + 1, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1811,6 +1816,7 @@ fn transition_empty_blocks() { u64::max_value() - 1, (epoch_2_1 + 1) as u32, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -2169,6 +2175,7 @@ fn test_pox_reorgs_three_flaps() { (1700 * reward_cycle_len).into(), v1_unlock_height, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -2705,6 +2712,7 @@ fn test_pox_reorg_one_flap() { (1700 * reward_cycle_len).into(), v1_unlock_height, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3129,6 +3137,7 @@ fn test_pox_reorg_flap_duel() { (1700 * reward_cycle_len).into(), v1_unlock_height, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3563,6 +3572,7 @@ fn test_pox_reorg_flap_reward_cycles() { (1700 * reward_cycle_len).into(), v1_unlock_height, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3991,6 +4001,7 @@ fn test_pox_missing_five_anchor_blocks() { (1700 * reward_cycle_len).into(), v1_unlock_height, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4391,6 +4402,7 @@ fn test_sortition_divergence_pre_21() { (1700 * reward_cycle_len).into(), v1_unlock_height, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4754,6 +4766,7 @@ fn trait_invocation_cross_epoch() { (17 * reward_cycle_len).into(), u32::max_value(), u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4999,6 +5012,7 @@ fn test_v1_unlock_height_with_current_stackers() { u64::max_value() - 1, v1_unlock_height as u32, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5260,6 +5274,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { u64::max_value() - 1, v1_unlock_height as u32, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 8f4375cd3e..d0e89244b9 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -1,7 +1,6 @@ use std::collections::HashMap; use std::env; use std::thread; -use std::time::Duration; use stacks::burnchains::Burnchain; use stacks::chainstate::stacks::address::PoxAddress; @@ -18,7 +17,6 @@ use crate::config::EventObserverConfig; use crate::config::InitialBalance; use crate::neon; use crate::neon_node::StacksNode; -use crate::node::get_account_balances; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::epoch_21::wait_pox_stragglers; use crate::tests::neon_integrations::*; @@ -186,6 +184,7 @@ fn disable_pox() { u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -612,7 +611,6 @@ fn pox_2_unlock_all() { let epoch_2_2 = 239; // one block before a prepare phase let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); @@ -720,6 +718,7 @@ fn pox_2_unlock_all() { u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1414,6 +1413,7 @@ fn test_pox_reorg_one_flap() { (1700 * reward_cycle_len).into(), v1_unlock_height, v2_unlock_height.try_into().unwrap(), + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index a04893f7d3..130b094b05 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -152,6 +152,7 @@ fn trait_invocation_behavior() { u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 5bb05bacca..4c0513fc81 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1801,6 +1801,7 @@ fn stx_delegate_btc_integration_test() { (17 * reward_cycle_len).into(), u32::MAX, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5828,6 +5829,7 @@ fn pox_integration_test() { (17 * reward_cycle_len).into(), u32::MAX, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -10466,6 +10468,7 @@ fn test_competing_miners_build_on_same_chain( (17 * reward_cycle_len).into(), u32::MAX, u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); From 0b6c8c696df3622d58b7ee340f623a661fb837d2 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Wed, 3 May 2023 16:02:27 -0400 Subject: [PATCH 105/158] fixed test --- src/chainstate/coordinator/tests.rs | 37 ++++++++++++++--------------- src/core/mod.rs | 2 +- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 036ffdac3e..81f4efa899 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -660,7 +660,7 @@ fn make_genesis_block_with_recipients( ), key_block_ptr: 1, // all registers happen in block height 1 key_vtxindex: (1 + key_index) as u16, - memo: vec![STACKS_EPOCH_2_3_MARKER], + memo: vec![STACKS_EPOCH_2_4_MARKER], new_seed: VRFSeed::from_proof(&proof), commit_outs, @@ -923,7 +923,7 @@ fn make_stacks_block_with_input( ), key_block_ptr: 1, // all registers happen in block height 1 key_vtxindex: (1 + key_index) as u16, - memo: vec![STACKS_EPOCH_2_3_MARKER], + memo: vec![STACKS_EPOCH_2_4_MARKER], new_seed: VRFSeed::from_proof(&proof), commit_outs, @@ -4301,7 +4301,7 @@ fn test_epoch_switch_pox_2_contract_instantiation() { } } -// This test ensures the epoch transition from 2.2 to 2.3 is applied at the proper block boundaries, +// This test ensures the epoch transition from 2.3 to 2.4 is applied at the proper block boundaries, // and that the epoch transition is only applied once. If it were to be applied more than once, // the test would panic when trying to re-create the pox-3 contract. #[test] @@ -4322,7 +4322,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { &committers, pox_consts.clone(), None, - StacksEpochId::Epoch23, + StacksEpochId::Epoch24, ); let mut coord = make_coordinator(path, Some(burnchain_conf)); @@ -4353,15 +4353,15 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let mut burnchain = get_burnchain_db(path, pox_consts.clone()); let mut chainstate = get_chainstate(path); - // Want to ensure that the pox-3 contract DNE for all blocks before the epoch 2.3 transition height, + // Want to ensure that the pox-3 contract DNE for all blocks before the epoch 2.4 transition height, // and does exist for blocks after the boundary. - // Epoch 2.1 transition Epoch 2.2 transition Epoch 2.3 transition - // ^ ^ ^ - //.. -> B6 -> B7 -> B8 -> B9 -> B10 -> B11 -> B12 -> B13 -> B14 -> B15 - //.. -> S5 -> S6 -> S7 -> S8 -> S9 -> S10 -> S11 -> S12 -> S13 -> S14 - // \ - // \ - // _ _ _ S15 -> S16 -> .. + // Epoch 2.1 transition Epoch 2.2 transition Epoch 2.3 transition Epoch 2.4 transition + // ^ ^ ^ ^ + //.. -> B6 -> B7 -> B8 -> B9 -> B10 -> B11 -> B12 -> B13 -> B14 -> B15 -> B16 -> B17 -> B18 -> B19 + //.. -> S5 -> S6 -> S7 -> S8 -> S9 -> S10 -> S11 -> S12 -> S13 -> S14 -> S15 -> S16 -> S17 -> S18 + // \ + // \ + // _ _ _ S19 -> S20 -> .. let parent = if ix == 0 { BlockHeaderHash([0; 32]) } else if ix == 15 { @@ -4435,7 +4435,8 @@ fn test_epoch_switch_pox_3_contract_instantiation() { x if x >= 4 && x < 8 => StacksEpochId::Epoch2_05, x if x >= 8 && x < 12 => StacksEpochId::Epoch21, x if x >= 12 && x < 16 => StacksEpochId::Epoch22, - _ => StacksEpochId::Epoch23, + x if x >= 16 && x < 20 => StacksEpochId::Epoch23, + _ => StacksEpochId::Epoch24, }; assert_eq!( chainstate @@ -4452,13 +4453,11 @@ fn test_epoch_switch_pox_3_contract_instantiation() { ); // These expectations are according to according to hard-coded values in - // `StacksEpoch::unit_test_2_3`. + // `StacksEpoch::unit_test_2_4`. let expected_runtime = match burn_block_height { x if x < 4 => u64::MAX, x if x >= 4 && x < 8 => 205205, - x if x >= 8 && x < 12 => 210210, - x if x >= 12 && x < 16 => 220220, - x => 230230, + x => 210210 }; assert_eq!( chainstate @@ -4477,7 +4476,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { expected_runtime ); - // check that pox-3 contract DNE before epoch 2.3, and that it does exist after + // check that pox-3 contract DNE before epoch 2.4, and that it does exist after let does_pox_3_contract_exist = chainstate .with_read_only_clarity_tx( &sort_db.index_conn(), @@ -4490,7 +4489,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { ) .unwrap(); - if burn_block_height < 16 { + if burn_block_height < 20 { assert!(does_pox_3_contract_exist.is_err()) } else { assert!(does_pox_3_contract_exist.is_ok()) diff --git a/src/core/mod.rs b/src/core/mod.rs index 9967b01de6..04c498fa99 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -782,7 +782,7 @@ impl StacksEpochExtension for StacksEpoch { #[cfg(test)] fn unit_test_2_4(first_burnchain_height: u64) -> Vec { info!( - "StacksEpoch unit_test_2_3 first_burn_height = {}", + "StacksEpoch unit_test_2_4 first_burn_height = {}", first_burnchain_height ); From 7d55801c8ad264f7bf1de9e2c5aa9c44e73a5ffe Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Mon, 1 May 2023 14:22:19 -0400 Subject: [PATCH 106/158] added special handlers --- clarity/src/vm/database/clarity_db.rs | 5 + clarity/src/vm/database/structures.rs | 118 ++++++++++ src/chainstate/stacks/db/accounts.rs | 123 +++++++++++ src/clarity_vm/special.rs | 297 ++++++++++++++++++++++++-- 4 files changed, 527 insertions(+), 16 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 71efdbb0d2..8c4a61db74 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -778,6 +778,11 @@ impl<'a> ClarityDatabase<'a> { self.burn_state_db.get_v1_unlock_height() } + /// Return the height for PoX 3 activation from the burn state db + pub fn get_pox_3_activation_height(&self) -> u32 { + self.burn_state_db.get_pox_3_activation_height() + } + /// Return the height for PoX v2 -> v3 auto unlocks /// from the burn state db pub fn get_v2_unlock_height(&mut self) -> u32 { diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 5147a26653..b81f13781d 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -454,6 +454,8 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { }; } + ////////////// Pox-2 ///////////////// + /// Return true iff `self` represents a snapshot that has a lock /// created by PoX v2. pub fn is_v2_locked(&mut self) -> bool { @@ -566,6 +568,122 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { }; } + //////////////// Pox-3 ////////////////// + + /// Lock `amount_to_lock` tokens on this account until `unlock_burn_height`. + /// After calling, this method will set the balance to a "LockedPoxThree" balance, + /// because this method is only invoked as a result of PoX3 interactions + pub fn lock_tokens_v3(&mut self, amount_to_lock: u128, unlock_burn_height: u64) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after account-token-lock"); + } + + // caller needs to have checked this + assert!(amount_to_lock > 0, "BUG: cannot lock 0 tokens"); + + if unlock_burn_height <= self.burn_block_height { + // caller needs to have checked this + panic!("FATAL: cannot set a lock with expired unlock burn height"); + } + + if self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account already has locked tokens"); + } + + // from `unlock_available_tokens_if_any` call above, `self.balance` should + // be canonicalized already + + let new_amount_unlocked = self + .balance + .get_total_balance() + .checked_sub(amount_to_lock) + .expect("STX underflow"); + + self.balance = STXBalance::LockedPoxThree { + amount_unlocked: new_amount_unlocked, + amount_locked: amount_to_lock, + unlock_height: unlock_burn_height, + }; + } + + /// Extend this account's current lock to `unlock_burn_height`. + /// After calling, this method will set the balance to a "LockedPoxThree" balance, + /// because this method is only invoked as a result of PoX3 interactions + pub fn extend_lock_v3(&mut self, unlock_burn_height: u64) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after extend-token-lock"); + } + + if !self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account does not have locked tokens"); + } + + if unlock_burn_height <= self.burn_block_height { + // caller needs to have checked this + panic!("FATAL: cannot set a lock with expired unlock burn height"); + } + + self.balance = STXBalance::LockedPoxThree { + amount_unlocked: self.balance.amount_unlocked(), + amount_locked: self.balance.amount_locked(), + unlock_height: unlock_burn_height, + }; + } + + /// Increase the account's current lock to `new_total_locked`. + /// Panics if `self` was not locked by V3 PoX. + pub fn increase_lock_v3(&mut self, new_total_locked: u128) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after extend-token-lock"); + } + + if !self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account does not have locked tokens"); + } + + if !self.is_v3_locked() { + // caller needs to have checked this + panic!("FATAL: account must be locked by pox-3"); + } + + assert!( + self.balance.amount_locked() <= new_total_locked, + "FATAL: account must lock more after `increase_lock_v3`" + ); + + let total_amount = self + .balance + .amount_unlocked() + .checked_add(self.balance.amount_locked()) + .expect("STX balance overflowed u128"); + let amount_unlocked = total_amount + .checked_sub(new_total_locked) + .expect("STX underflow: more is locked than total balance"); + + self.balance = STXBalance::LockedPoxTwo { + amount_unlocked, + amount_locked: new_total_locked, + unlock_height: self.balance.unlock_height(), + }; + } + + /// Return true iff `self` represents a snapshot that has a lock + /// created by PoX v3. + pub fn is_v3_locked(&mut self) -> bool { + match self.canonical_balance_repr() { + STXBalance::LockedPoxThree { .. } => true, + _ => false, + } + } + + /////////////// GENERAL ////////////////////// + /// If this snapshot is locked, then alter the lock height to be /// the next burn block (i.e., `self.burn_block_height + 1`) pub fn accelerate_unlock(&mut self) { diff --git a/src/chainstate/stacks/db/accounts.rs b/src/chainstate/stacks/db/accounts.rs index 1861b77333..cf65dcf705 100644 --- a/src/chainstate/stacks/db/accounts.rs +++ b/src/chainstate/stacks/db/accounts.rs @@ -392,6 +392,127 @@ impl StacksChainState { .expect("FATAL: failed to set account nonce") } + /////////////////////// PoX-3 ///////////////////////////////// + + /// Lock up STX for PoX for a time. Does NOT touch the account nonce. + pub fn pox_lock_v3( + db: &mut ClarityDatabase, + principal: &PrincipalData, + lock_amount: u128, + unlock_burn_height: u64, + ) -> Result<(), Error> { + assert!(unlock_burn_height > 0); + assert!(lock_amount > 0); + + let mut snapshot = db.get_stx_balance_snapshot(principal); + + if snapshot.has_locked_tokens() { + return Err(Error::PoxAlreadyLocked); + } + if !snapshot.can_transfer(lock_amount) { + return Err(Error::PoxInsufficientBalance); + } + snapshot.lock_tokens_v3(lock_amount, unlock_burn_height); + + debug!( + "PoX v3 lock applied"; + "pox_locked_ustx" => snapshot.balance().amount_locked(), + "available_ustx" => snapshot.balance().amount_unlocked(), + "unlock_burn_height" => unlock_burn_height, + "account" => %principal, + ); + + snapshot.save(); + Ok(()) + } + + /// Extend a STX lock up for PoX for a time. Does NOT touch the account nonce. + /// Returns Ok(lock_amount) when successful + /// + /// # Errors + /// - Returns Error::PoxExtendNotLocked if this function was called on an account + /// which isn't locked. This *should* have been checked by the PoX v3 contract, + /// so this should surface in a panic. + pub fn pox_lock_extend_v3( + db: &mut ClarityDatabase, + principal: &PrincipalData, + unlock_burn_height: u64, + ) -> Result { + assert!(unlock_burn_height > 0); + + let mut snapshot = db.get_stx_balance_snapshot(principal); + + if !snapshot.has_locked_tokens() { + return Err(Error::PoxExtendNotLocked); + } + + snapshot.extend_lock_v3(unlock_burn_height); + + let amount_locked = snapshot.balance().amount_locked(); + + debug!( + "PoX v3 lock applied"; + "pox_locked_ustx" => amount_locked, + "available_ustx" => snapshot.balance().amount_unlocked(), + "unlock_burn_height" => unlock_burn_height, + "account" => %principal, + ); + + snapshot.save(); + Ok(amount_locked) + } + + /// Increase a STX lock up for PoX-3. Does NOT touch the account nonce. + /// Returns Ok( account snapshot ) when successful + /// + /// # Errors + /// - Returns Error::PoxExtendNotLocked if this function was called on an account + /// which isn't locked. This *should* have been checked by the PoX v3 contract, + /// so this should surface in a panic. + pub fn pox_lock_increase_v3( + db: &mut ClarityDatabase, + principal: &PrincipalData, + new_total_locked: u128, + ) -> Result { + assert!(new_total_locked > 0); + + let mut snapshot = db.get_stx_balance_snapshot(principal); + + if !snapshot.has_locked_tokens() { + return Err(Error::PoxExtendNotLocked); + } + + let bal = snapshot.canonical_balance_repr(); + let total_amount = bal + .amount_unlocked() + .checked_add(bal.amount_locked()) + .expect("STX balance overflowed u128"); + if total_amount < new_total_locked { + return Err(Error::PoxInsufficientBalance); + } + + if bal.amount_locked() > new_total_locked { + return Err(Error::PoxInvalidIncrease); + } + + snapshot.increase_lock_v3(new_total_locked); + + let out_balance = snapshot.canonical_balance_repr(); + + debug!( + "PoX v3 lock increased"; + "pox_locked_ustx" => out_balance.amount_locked(), + "available_ustx" => out_balance.amount_unlocked(), + "unlock_burn_height" => out_balance.unlock_height(), + "account" => %principal, + ); + + snapshot.save(); + Ok(out_balance) + } + + /////////////////////// PoX-2 ///////////////////////////////// + /// Increase a STX lock up for PoX. Does NOT touch the account nonce. /// Returns Ok( account snapshot ) when successful /// @@ -513,6 +634,8 @@ impl StacksChainState { Ok(()) } + /////////////////////// PoX (first version) ///////////////////////////////// + /// Lock up STX for PoX for a time. Does NOT touch the account nonce. pub fn pox_lock_v1( db: &mut ClarityDatabase, diff --git a/src/clarity_vm/special.rs b/src/clarity_vm/special.rs index 4c88dcb98a..abb85a02cc 100644 --- a/src/clarity_vm/special.rs +++ b/src/clarity_vm/special.rs @@ -21,8 +21,7 @@ use clarity::vm::{ast, eval_all}; use std::cmp; use std::convert::{TryFrom, TryInto}; -use crate::chainstate::stacks::boot::POX_1_NAME; -use crate::chainstate::stacks::boot::POX_2_NAME; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainstateError; use crate::chainstate::stacks::StacksMicroblockHeader; @@ -149,7 +148,7 @@ fn parse_pox_extend_result(result: &Value) -> std::result::Result<(PrincipalData } } -/// Parse the returned value from PoX2 `stack-increase` function +/// Parse the returned value from PoX2 or PoX3 `stack-increase` function /// into a format more readily digestible in rust. /// Panics if the supplied value doesn't match the expected tuple structure fn parse_pox_increase(result: &Value) -> std::result::Result<(PrincipalData, u128), i128> { @@ -565,10 +564,10 @@ fn create_event_info_data_code(function_name: &str, args: &[Value]) -> String { } } -/// Synthesize an events data tuple to return on the successful execution of a pox-2 stacking +/// Synthesize an events data tuple to return on the successful execution of a pox-2 or pox-3 stacking /// function. It runs a series of Clarity queries against the PoX contract's data space (including /// calling PoX functions). -fn synthesize_pox_2_event_info( +fn synthesize_pox_2_or_3_event_info( global_context: &mut GlobalContext, contract_id: &QualifiedContractIdentifier, sender_opt: Option<&PrincipalData>, @@ -608,7 +607,7 @@ fn synthesize_pox_2_event_info( let pox_2_contract = global_context .database .get_contract(contract_id) - .expect("FATAL: could not load PoX-2 contract metadata"); + .expect("FATAL: could not load PoX contract metadata"); let event_info = global_context .special_cc_handler_execute_read_only( @@ -657,12 +656,12 @@ fn synthesize_pox_2_event_info( }, ) .map_err(|e: ChainstateError| { - error!("Failed to synthesize PoX-2 event: {:?}", &e); + error!("Failed to synthesize PoX event: {:?}", &e); e })?; test_debug!( - "Synthesized PoX-2 event info for '{}''s call to '{}': {:?}", + "Synthesized PoX event info for '{}''s call to '{}': {:?}", sender, function_name, &event_info @@ -674,7 +673,7 @@ fn synthesize_pox_2_event_info( } /// Handle responses from stack-stx and delegate-stack-stx -- functions that *lock up* STX -fn handle_stack_lockup( +fn handle_stack_lockup_pox_v2( global_context: &mut GlobalContext, function_name: &str, value: &Value, @@ -735,7 +734,7 @@ fn handle_stack_lockup( /// Handle responses from stack-extend and delegate-stack-extend -- functions that *extend /// already-locked* STX. -fn handle_stack_lockup_extension( +fn handle_stack_lockup_extension_pox_v2( global_context: &mut GlobalContext, function_name: &str, value: &Value, @@ -795,9 +794,9 @@ fn handle_stack_lockup_extension( } } -/// Handle resposnes from stack-increase and delegate-stack-increase -- functions that *increase +/// Handle responses from stack-increase and delegate-stack-increase -- functions that *increase /// already-locked* STX amounts. -fn handle_stack_lockup_increase( +fn handle_stack_lockup_increase_pox_v2( global_context: &mut GlobalContext, function_name: &str, value: &Value, @@ -871,7 +870,7 @@ fn handle_pox_v2_api_contract_call( // for some reason. // Failure to synthesize an event due to a bug is *NOT* an excuse to crash the whole // network! Event capture is not consensus-critical. - let event_info_opt = match synthesize_pox_2_event_info( + let event_info_opt = match synthesize_pox_2_or_3_event_info( global_context, contract_id, sender_opt, @@ -903,11 +902,265 @@ fn handle_pox_v2_api_contract_call( // Execute function specific logic to complete the lock-up let lock_event_opt = if function_name == "stack-stx" || function_name == "delegate-stack-stx" { - handle_stack_lockup(global_context, function_name, value)? + handle_stack_lockup_pox_v2(global_context, function_name, value)? } else if function_name == "stack-extend" || function_name == "delegate-stack-extend" { - handle_stack_lockup_extension(global_context, function_name, value)? + handle_stack_lockup_extension_pox_v2(global_context, function_name, value)? } else if function_name == "stack-increase" || function_name == "delegate-stack-increase" { - handle_stack_lockup_increase(global_context, function_name, value)? + handle_stack_lockup_increase_pox_v2(global_context, function_name, value)? + } else { + None + }; + + // append the lockup event, so it looks as if the print event happened before the lock-up + if let Some(batch) = global_context.event_batches.last_mut() { + if let Some(print_event) = print_event_opt { + batch.events.push(print_event); + } + if let Some(lock_event) = lock_event_opt { + batch.events.push(lock_event); + } + } + + Ok(()) +} + +/////////////// PoX-3 ////////////////////////////////////////// + +/// Handle responses from stack-stx and delegate-stack-stx in pox-3 -- functions that *lock up* STX +fn handle_stack_lockup_pox_v3( + global_context: &mut GlobalContext, + function_name: &str, + value: &Value, +) -> Result> { + debug!( + "Handle special-case contract-call to {:?} {} (which returned {:?})", + boot_code_id(POX_3_NAME, global_context.mainnet), + function_name, + value + ); + // applying a pox lock at this point is equivalent to evaluating a transfer + runtime_cost( + ClarityCostFunction::StxTransfer, + &mut global_context.cost_track, + 1, + )?; + + match parse_pox_stacking_result(value) { + Ok((stacker, locked_amount, unlock_height)) => { + match StacksChainState::pox_lock_v3( + &mut global_context.database, + &stacker, + locked_amount, + unlock_height as u64, + ) { + Ok(_) => { + let event = StacksTransactionEvent::STXEvent(STXEventType::STXLockEvent( + STXLockEventData { + locked_amount, + unlock_height, + locked_address: stacker, + contract_identifier: boot_code_id(POX_3_NAME, global_context.mainnet), + }, + )); + return Ok(Some(event)); + } + Err(ChainstateError::DefunctPoxContract) => { + return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)); + } + Err(ChainstateError::PoxAlreadyLocked) => { + // the caller tried to lock tokens into multiple pox contracts + return Err(Error::Runtime(RuntimeErrorType::PoxAlreadyLocked, None)); + } + Err(e) => { + panic!( + "FATAL: failed to lock {} from {} until {}: '{:?}'", + locked_amount, stacker, unlock_height, &e + ); + } + } + } + Err(_) => { + // nothing to do -- the function failed + return Ok(None); + } + } +} + +/// Handle responses from stack-extend and delegate-stack-extend in pox-3 -- functions that *extend +/// already-locked* STX. +fn handle_stack_lockup_extension_pox_v3( + global_context: &mut GlobalContext, + function_name: &str, + value: &Value, +) -> Result> { + // in this branch case, the PoX-3 contract has stored the extension information + // and performed the extension checks. Now, the VM needs to update the account locks + // (because the locks cannot be applied directly from the Clarity code itself) + // applying a pox lock at this point is equivalent to evaluating a transfer + debug!( + "Handle special-case contract-call to {:?} {} (which returned {:?})", + boot_code_id("pox-3", global_context.mainnet), + function_name, + value + ); + + runtime_cost( + ClarityCostFunction::StxTransfer, + &mut global_context.cost_track, + 1, + )?; + + if let Ok((stacker, unlock_height)) = parse_pox_extend_result(value) { + match StacksChainState::pox_lock_extend_v3( + &mut global_context.database, + &stacker, + unlock_height as u64, + ) { + Ok(locked_amount) => { + let event = StacksTransactionEvent::STXEvent(STXEventType::STXLockEvent( + STXLockEventData { + locked_amount, + unlock_height, + locked_address: stacker, + contract_identifier: boot_code_id("pox-3", global_context.mainnet), + }, + )); + return Ok(Some(event)); + } + Err(ChainstateError::DefunctPoxContract) => { + return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)) + } + Err(e) => { + // Error results *other* than a DefunctPoxContract panic, because + // those errors should have been caught by the PoX contract before + // getting to this code path. + panic!( + "FATAL: failed to extend lock from {} until {}: '{:?}'", + stacker, unlock_height, &e + ); + } + } + } else { + // The stack-extend function returned an error: we do not need to apply a lock + // in this case, and can just return and let the normal VM codepath surface the + // error response type. + return Ok(None); + } +} + +/// Handle responses from stack-increase and delegate-stack-increase in PoX-3 -- functions +/// that *increase already-locked* STX amounts. +fn handle_stack_lockup_increase_pox_v3( + global_context: &mut GlobalContext, + function_name: &str, + value: &Value, +) -> Result> { + // in this branch case, the PoX-3 contract has stored the increase information + // and performed the increase checks. Now, the VM needs to update the account locks + // (because the locks cannot be applied directly from the Clarity code itself) + // applying a pox lock at this point is equivalent to evaluating a transfer + debug!( + "Handle special-case contract-call"; + "contract" => ?boot_code_id("pox-3", global_context.mainnet), + "function" => function_name, + "return-value" => %value, + ); + + runtime_cost( + ClarityCostFunction::StxTransfer, + &mut global_context.cost_track, + 1, + )?; + + if let Ok((stacker, total_locked)) = parse_pox_increase(value) { + match StacksChainState::pox_lock_increase_v3( + &mut global_context.database, + &stacker, + total_locked, + ) { + Ok(new_balance) => { + let event = StacksTransactionEvent::STXEvent(STXEventType::STXLockEvent( + STXLockEventData { + locked_amount: new_balance.amount_locked(), + unlock_height: new_balance.unlock_height(), + locked_address: stacker, + contract_identifier: boot_code_id("pox-3", global_context.mainnet), + }, + )); + + return Ok(Some(event)); + } + Err(ChainstateError::DefunctPoxContract) => { + return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)) + } + Err(e) => { + // Error results *other* than a DefunctPoxContract panic, because + // those errors should have been caught by the PoX contract before + // getting to this code path. + panic!( + "FATAL: failed to increase lock from {}: '{:?}'", + stacker, &e + ); + } + } + } else { + Ok(None) + } +} + +/// Handle special cases when calling into the PoX-3 API contract +fn handle_pox_v3_api_contract_call( + global_context: &mut GlobalContext, + sender_opt: Option<&PrincipalData>, + contract_id: &QualifiedContractIdentifier, + function_name: &str, + args: &[Value], + value: &Value, +) -> Result<()> { + // Generate a synthetic print event for all functions that alter stacking state + let print_event_opt = if let Value::Response(response) = value { + if response.committed { + // method succeeded. Synthesize event info, but default to no event report if we fail + // for some reason. + // Failure to synthesize an event due to a bug is *NOT* an excuse to crash the whole + // network! Event capture is not consensus-critical. + let event_info_opt = match synthesize_pox_2_or_3_event_info( + global_context, + contract_id, + sender_opt, + function_name, + args, + ) { + Ok(Some(event_info)) => Some(event_info), + Ok(None) => None, + Err(e) => { + error!("Failed to synthesize PoX-3 event info: {:?}", &e); + None + } + }; + if let Some(event_info) = event_info_opt { + let event_response = + Value::okay(event_info).expect("FATAL: failed to construct (ok event-info)"); + let tx_event = + Environment::construct_print_transaction_event(contract_id, &event_response); + Some(tx_event) + } else { + None + } + } else { + None + } + } else { + None + }; + + // Execute function specific logic to complete the lock-up + let lock_event_opt = if function_name == "stack-stx" || function_name == "delegate-stack-stx" { + handle_stack_lockup_pox_v3(global_context, function_name, value)? + } else if function_name == "stack-extend" || function_name == "delegate-stack-extend" { + handle_stack_lockup_extension_pox_v3(global_context, function_name, value)? + } else if function_name == "stack-increase" || function_name == "delegate-stack-increase" { + handle_stack_lockup_increase_pox_v3(global_context, function_name, value)? } else { None }; @@ -1003,6 +1256,18 @@ pub fn handle_contract_call_special_cases( return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)); } + return handle_pox_v2_api_contract_call( + global_context, + sender, + contract_id, + function_name, + args, + result, + ); + } else if *contract_id == boot_code_id(POX_3_NAME, global_context.mainnet) + && global_context.database.get_pox_3_activation_height() + >= global_context.database.get_current_burnchain_block_height() + { return handle_pox_v2_api_contract_call( global_context, sender, From 18805f742917998ad870a95fe87f9c21ac06c415 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Tue, 2 May 2023 09:53:57 -0400 Subject: [PATCH 107/158] used constant --- src/clarity_vm/special.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/clarity_vm/special.rs b/src/clarity_vm/special.rs index abb85a02cc..70d0c94969 100644 --- a/src/clarity_vm/special.rs +++ b/src/clarity_vm/special.rs @@ -48,7 +48,7 @@ use stacks_common::util::hash::Hash160; use crate::vm::costs::runtime_cost; /// Parse the returned value from PoX `stack-stx` and `delegate-stack-stx` functions -/// from pox-2.clar into a format more readily digestible in rust. +/// from pox-2.clar or pox-3.clar into a format more readily digestible in rust. /// Panics if the supplied value doesn't match the expected tuple structure fn parse_pox_stacking_result( result: &Value, @@ -119,7 +119,7 @@ fn parse_pox_stacking_result_v1( } } -/// Parse the returned value from PoX2 `stack-extend` and `delegate-stack-extend` functions +/// Parse the returned value from PoX2 or PoX3 `stack-extend` and `delegate-stack-extend` functions /// into a format more readily digestible in rust. /// Panics if the supplied value doesn't match the expected tuple structure fn parse_pox_extend_result(result: &Value) -> std::result::Result<(PrincipalData, u64), i128> { @@ -1022,7 +1022,7 @@ fn handle_stack_lockup_extension_pox_v3( locked_amount, unlock_height, locked_address: stacker, - contract_identifier: boot_code_id("pox-3", global_context.mainnet), + contract_identifier: boot_code_id(POX_3_NAME, global_context.mainnet), }, )); return Ok(Some(event)); @@ -1084,7 +1084,7 @@ fn handle_stack_lockup_increase_pox_v3( locked_amount: new_balance.amount_locked(), unlock_height: new_balance.unlock_height(), locked_address: stacker, - contract_identifier: boot_code_id("pox-3", global_context.mainnet), + contract_identifier: boot_code_id(POX_3_NAME, global_context.mainnet), }, )); @@ -1268,7 +1268,7 @@ pub fn handle_contract_call_special_cases( && global_context.database.get_pox_3_activation_height() >= global_context.database.get_current_burnchain_block_height() { - return handle_pox_v2_api_contract_call( + return handle_pox_v3_api_contract_call( global_context, sender, contract_id, From 3dbb886aebeb8e99ee695ed0c0e355e4f97bb609 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Wed, 3 May 2023 15:41:15 -0400 Subject: [PATCH 108/158] respond to comments --- clarity/src/vm/database/structures.rs | 2 +- src/clarity_vm/special.rs | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index b81f13781d..8eba16734a 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -599,7 +599,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { .balance .get_total_balance() .checked_sub(amount_to_lock) - .expect("STX underflow"); + .expect("FATAL: account locks more STX than balance possessed"); self.balance = STXBalance::LockedPoxThree { amount_unlocked: new_amount_unlocked, diff --git a/src/clarity_vm/special.rs b/src/clarity_vm/special.rs index 70d0c94969..7797073aff 100644 --- a/src/clarity_vm/special.rs +++ b/src/clarity_vm/special.rs @@ -1264,10 +1264,13 @@ pub fn handle_contract_call_special_cases( args, result, ); - } else if *contract_id == boot_code_id(POX_3_NAME, global_context.mainnet) - && global_context.database.get_pox_3_activation_height() - >= global_context.database.get_current_burnchain_block_height() - { + } else if *contract_id == boot_code_id(POX_3_NAME, global_context.mainnet) { + if global_context.database.get_current_burnchain_block_height() + < global_context.database.get_pox_3_activation_height() + { + warn!("PoX-3 contract invoked before PoX-3 activation height"); + return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)); + } return handle_pox_v3_api_contract_call( global_context, sender, From e7a663b4fa84ed65fcef9349108c08d36618bb69 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Thu, 4 May 2023 11:22:33 -0400 Subject: [PATCH 109/158] removed burnchain height check --- src/clarity_vm/special.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/clarity_vm/special.rs b/src/clarity_vm/special.rs index 7797073aff..e738c3b7df 100644 --- a/src/clarity_vm/special.rs +++ b/src/clarity_vm/special.rs @@ -1265,12 +1265,6 @@ pub fn handle_contract_call_special_cases( result, ); } else if *contract_id == boot_code_id(POX_3_NAME, global_context.mainnet) { - if global_context.database.get_current_burnchain_block_height() - < global_context.database.get_pox_3_activation_height() - { - warn!("PoX-3 contract invoked before PoX-3 activation height"); - return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)); - } return handle_pox_v3_api_contract_call( global_context, sender, From c37c345a3b85b9982357d2f1b2d0d29dcbfa6629 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Apr 2023 16:25:32 -0400 Subject: [PATCH 110/158] fix: add `delegated-to` field to `stacking-state` This is part of SIP-022 to fix the problems with `stack-increase`. This field allows us to track whether the stacker is stacking directly or through a pool, and ensure that they cannot stack more than one way at a time. More testing is still needed. --- src/chainstate/stacks/boot/pox-3.clar | 77 ++++++++++++++++----------- 1 file changed, 47 insertions(+), 30 deletions(-) diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index 4a4bc1b482..ebd1fc2699 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -25,6 +25,8 @@ (define-constant ERR_STACK_INCREASE_NOT_LOCKED 27) (define-constant ERR_DELEGATION_NO_REWARD_SLOT 28) (define-constant ERR_DELEGATION_WRONG_REWARD_SLOT 29) +(define-constant ERR_STACKING_IS_DELEGATED 30) +(define-constant ERR_STACKING_NOT_DELEGATED 31) ;; PoX disabling threshold (a percent) (define-constant POX_REJECTION_FRACTION u25) @@ -83,7 +85,7 @@ ;; Records will be deleted from this map when auto-unlocks are processed ;; ;; This map de-normalizes some state from the `reward-cycle-pox-address-list` map -;; and the `pox-2` contract tries to keep this state in sync with the reward-cycle +;; and the `pox-3` contract tries to keep this state in sync with the reward-cycle ;; state. The major invariants of this `stacking-state` map are: ;; (1) any entry in `reward-cycle-pox-address-list` with `some stacker` points to a real `stacking-state` ;; (2) `stacking-state.reward-set-indexes` matches the index of that `reward-cycle-pox-address-list` @@ -113,7 +115,9 @@ ;; `first-reward-cycle` (i.e., they do not correspond ;; to entries in the reward set that may have been from ;; previous stack-stx calls, or prior to an extend) - reward-set-indexes: (list 12 uint) + reward-set-indexes: (list 12 uint), + ;; principal of the delegate, if stacker has delegated + delegated-to: (optional principal) } ) @@ -619,7 +623,8 @@ { pox-addr: pox-addr, reward-set-indexes: reward-set-indexes, first-reward-cycle: first-reward-cycle, - lock-period: lock-period }) + lock-period: lock-period, + delegated-to: none }) ;; return the lock-up information, so the node can actually carry out the lock. (ok { stacker: tx-sender, lock-amount: amount-ustx, unlock-burn-height: (reward-cycle-to-burn-height (+ first-reward-cycle lock-period)) })))) @@ -648,8 +653,9 @@ (asserts! (check-caller-allowed) (err ERR_STACKING_PERMISSION_DENIED)) - ;; delegate-stx no longer requires the delegator to not currently - ;; be stacking. + ;; tx-sender principal must not be stacking + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) ;; pox-addr, if given, must be valid (match pox-addr @@ -870,7 +876,8 @@ { pox-addr: pox-addr, first-reward-cycle: first-reward-cycle, reward-set-indexes: (list), - lock-period: lock-period }) + lock-period: lock-period, + delegated-to: tx-sender }) ;; return the lock-up information, so the node can actually carry out the lock. (ok { stacker: stacker, @@ -981,6 +988,9 @@ ;; stacker must have enough stx to lock (asserts! (>= amount-unlocked increase-by) (err ERR_STACKING_INSUFFICIENT_FUNDS)) + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) ;; must be called directly by the tx-sender or by an allowed contract-caller (asserts! (check-caller-allowed) (err ERR_STACKING_PERMISSION_DENIED)) @@ -995,7 +1005,7 @@ stacker: tx-sender, add-amount: increase-by }))) (err ERR_STACKING_UNREACHABLE)) - ;; NOTE: stacking-state map is unchanged: it no longer tracks amount-stacked in PoX-2 + ;; NOTE: stacking-state map is unchanged: it does not track amount-stacked in PoX-3 (ok { stacker: tx-sender, total-locked: (+ amount-stacked increase-by)}))) ;; Extend an active Stacking lock. @@ -1005,7 +1015,8 @@ (define-public (stack-extend (extend-count uint) (pox-addr { version: (buff 1), hashbytes: (buff 32) })) (let ((stacker-info (stx-account tx-sender)) - (stacker-state (get-stacker-info tx-sender)) + ;; to extend, there must already be an etry in the stacking-state + (stacker-state (unwrap! (get-stacker-info tx-sender) (err ERR_STACK_EXTEND_NOT_LOCKED))) (amount-ustx (get locked stacker-info)) (unlock-height (get unlock-height stacker-info)) (cur-cycle (current-pox-reward-cycle)) @@ -1014,20 +1025,18 @@ ;; set first-extend-cycle to the next cycle. (first-extend-cycle (if (> (+ cur-cycle u1) unlock-in-cycle) (+ cur-cycle u1) unlock-in-cycle)) - ;; maintaining valid stacking-state entries requires checking - ;; whether there is an existing entry for the stacker in the state - ;; this would be the case if the stacker is extending a lockup from PoX-1 - ;; to PoX-2 - (first-reward-cycle (match (get first-reward-cycle stacker-state) - ;; if we've stacked in PoX2, then max(cur-cycle, stacker-state.first-reward-cycle) is valid - old-first-cycle (if (> cur-cycle old-first-cycle) cur-cycle old-first-cycle) - ;; otherwise, there aren't PoX2 entries until first-extend-cycle - first-extend-cycle))) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) ;; must be called with positive extend-count (asserts! (>= extend-count u1) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) @@ -1071,7 +1080,8 @@ { pox-addr: pox-addr, reward-set-indexes: reward-set-indexes, first-reward-cycle: first-reward-cycle, - lock-period: lock-period }) + lock-period: lock-period, + delegated-to: none }) ;; return lock-up information (ok { stacker: tx-sender, unlock-burn-height: new-unlock-ht }))))) @@ -1110,6 +1120,12 @@ (asserts! (check-caller-allowed) (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must be currently locked (asserts! (> existing-lock u0) (err ERR_STACK_INCREASE_NOT_LOCKED)) @@ -1164,7 +1180,8 @@ (pox-addr { version: (buff 1), hashbytes: (buff 32) }) (extend-count uint)) (let ((stacker-info (stx-account stacker)) - (stacker-state (get-stacker-info stacker)) + ;; to extend, there must already be an entry in the stacking-state + (stacker-state (unwrap! (get-stacker-info stacker) (err ERR_STACK_EXTEND_NOT_LOCKED))) (amount-ustx (get locked stacker-info)) (unlock-height (get unlock-height stacker-info)) (unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) @@ -1173,16 +1190,9 @@ (cur-cycle (current-pox-reward-cycle)) (first-extend-cycle (if (> (+ cur-cycle u1) unlock-in-cycle) (+ cur-cycle u1) unlock-in-cycle)) - ;; update stacker record - ;; maintaining valid stacking-state entries requires checking - ;; whether there is an existing entry for the stacker in the state - ;; this would be the case if the stacker is extending a lockup from PoX-1 - ;; to PoX-2 - (first-reward-cycle (match (get first-reward-cycle stacker-state) - ;; if stacker stacked in PoX2, then max(cur-cycle, stacker-state.first-reward-cycle) is valid - old-first-cycle (if (> cur-cycle old-first-cycle) cur-cycle old-first-cycle) - ;; otherwise, there aren't PoX2 entries until first-extend-cycle - first-extend-cycle))) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) ;; must be called with positive extend-count (asserts! (>= extend-count u1) @@ -1201,6 +1211,12 @@ (asserts! (check-caller-allowed) (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; check valid lock period (asserts! (check-pox-lock-period lock-period) (err ERR_STACKING_INVALID_LOCK_PERIOD)) @@ -1241,7 +1257,8 @@ { pox-addr: pox-addr, reward-set-indexes: (list), first-reward-cycle: first-reward-cycle, - lock-period: lock-period }) + lock-period: lock-period, + delegated-to: tx-sender }) ;; return the lock-up information, so the node can actually carry out the lock. (ok { stacker: stacker, From 205c380170fb9478a20326f4bb5f6339a8c21866 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 2 May 2023 16:19:19 -0400 Subject: [PATCH 111/158] fix: fix error in `increase-reward-cycle-entry` This fixes the bug discussed in SIP-022, where the total stacked ustx amount was applied to the stacker, instead of just that stacker's amount. --- src/chainstate/stacks/boot/pox-3.clar | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index ebd1fc2699..9f7c289633 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -946,14 +946,15 @@ (some { first-cycle: first-cycle, reward-cycle: (+ u1 reward-cycle), stacker: (get stacker data), add-amount: (get add-amount data) }) (let ((existing-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }))) (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) - (total-ustx (+ (get total-ustx existing-total) (get add-amount data)))) + (add-amount (get add-amount data)) + (total-ustx (+ (get total-ustx existing-total) add-amount))) ;; stacker must match (asserts! (is-eq (get stacker existing-entry) (some (get stacker data))) none) ;; update the pox-address list (map-set reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index } { pox-addr: (get pox-addr existing-entry), - total-ustx: total-ustx, + total-ustx: (+ (get total-ustx existing-entry) add-amount), stacker: (some (get stacker data)) }) ;; update the total (map-set reward-cycle-total-stacked From 7c191d8c998671cc94d8f3c84cc5bb617fb8bb6a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 3 May 2023 14:28:15 -0400 Subject: [PATCH 112/158] feat(pox-3): add more assertions --- src/chainstate/stacks/boot/pox-3.clar | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index 9f7c289633..53f7e30b00 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -989,15 +989,15 @@ ;; stacker must have enough stx to lock (asserts! (>= amount-unlocked increase-by) (err ERR_STACKING_INSUFFICIENT_FUNDS)) - ;; stacker must not be delegating - (asserts! (is-none (get delegated-to stacker-state)) - (err ERR_STACKING_IS_DELEGATED)) ;; must be called directly by the tx-sender or by an allowed contract-caller (asserts! (check-caller-allowed) (err ERR_STACKING_PERMISSION_DENIED)) ;; stacker must be directly stacking (asserts! (> (len (get reward-set-indexes stacker-state)) u0) - (err ERR_STACKING_ALREADY_DELEGATED)) + (err ERR_STACKING_IS_DELEGATED)) + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) ;; update reward cycle amounts (asserts! (is-some (fold increase-reward-cycle-entry (get reward-set-indexes stacker-state) @@ -1034,10 +1034,17 @@ (asserts! (>= extend-count u1) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + ;; stacker must not be delegating (asserts! (is-none (get delegated-to stacker-state)) (err ERR_STACKING_IS_DELEGATED)) + ;; TODO: add more assertions to sanity check the `stacker-info` values with + ;; the `stacker-state` values + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) @@ -1121,6 +1128,10 @@ (asserts! (check-caller-allowed) (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + ;; stacker must be delegated to tx-sender (asserts! (is-eq (unwrap! (get delegated-to stacker-state) (err ERR_STACKING_NOT_DELEGATED)) @@ -1212,6 +1223,10 @@ (asserts! (check-caller-allowed) (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + ;; stacker must be delegated to tx-sender (asserts! (is-eq (unwrap! (get delegated-to stacker-state) (err ERR_STACKING_NOT_DELEGATED)) From 8cd9273aaaf8843d6da4872b0c6e5ce8bdc4d7b8 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 3 May 2023 14:28:54 -0400 Subject: [PATCH 113/158] refactor(pox-3): `first-extend-cycle` Simplify the usage of this variable due to the simplifications available for pox-3. --- src/chainstate/stacks/boot/pox-3.clar | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index 53f7e30b00..a5d3a2ade2 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -954,6 +954,7 @@ (map-set reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index } { pox-addr: (get pox-addr existing-entry), + ;; This addresses the bug in pox-2 (see SIP-022) total-ustx: (+ (get total-ustx existing-entry) add-amount), stacker: (some (get stacker data)) }) ;; update the total @@ -974,7 +975,6 @@ (amount-stacked (get locked stacker-info)) (amount-unlocked (get unlocked stacker-info)) (unlock-height (get unlock-height stacker-info)) - (unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) (cur-cycle (current-pox-reward-cycle)) (first-increased-cycle (+ cur-cycle u1)) (stacker-state (unwrap! (map-get? stacking-state @@ -1021,11 +1021,7 @@ (amount-ustx (get locked stacker-info)) (unlock-height (get unlock-height stacker-info)) (cur-cycle (current-pox-reward-cycle)) - (unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) - ;; if the account unlocks *during* this cycle (should only occur during testing), - ;; set first-extend-cycle to the next cycle. - (first-extend-cycle (if (> (+ cur-cycle u1) unlock-in-cycle) - (+ cur-cycle u1) unlock-in-cycle)) + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) (cur-first-reward-cycle (get first-reward-cycle stacker-state)) (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) @@ -1196,12 +1192,8 @@ (stacker-state (unwrap! (get-stacker-info stacker) (err ERR_STACK_EXTEND_NOT_LOCKED))) (amount-ustx (get locked stacker-info)) (unlock-height (get unlock-height stacker-info)) - (unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) - ;; if the account unlocks *during* this cycle (should only occur during testing), - ;; set first-extend-cycle to the next cycle. + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) (cur-cycle (current-pox-reward-cycle)) - (first-extend-cycle (if (> (+ cur-cycle u1) unlock-in-cycle) - (+ cur-cycle u1) unlock-in-cycle)) ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) (cur-first-reward-cycle (get first-reward-cycle stacker-state)) (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) From aeeec2503e735494c4bd7c46638c038707287b8d Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 3 May 2023 15:34:05 -0400 Subject: [PATCH 114/158] chore: add comment on `first-extend-cycle` --- src/chainstate/stacks/boot/pox-3.clar | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index a5d3a2ade2..64d9b8c07e 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -1021,6 +1021,7 @@ (amount-ustx (get locked stacker-info)) (unlock-height (get unlock-height stacker-info)) (cur-cycle (current-pox-reward-cycle)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) (cur-first-reward-cycle (get first-reward-cycle stacker-state)) @@ -1192,6 +1193,7 @@ (stacker-state (unwrap! (get-stacker-info stacker) (err ERR_STACK_EXTEND_NOT_LOCKED))) (amount-ustx (get locked stacker-info)) (unlock-height (get unlock-height stacker-info)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) (cur-cycle (current-pox-reward-cycle)) ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) From c8a3fe0f25e9b8f92071ab604f5b14120e6afe34 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 4 May 2023 10:54:44 -0500 Subject: [PATCH 115/158] use pox-3 for reward sets after activation height --- src/chainstate/coordinator/mod.rs | 19 +++-- src/chainstate/stacks/boot/mod.rs | 115 +++++++++++++++++++++++++++++- 2 files changed, 129 insertions(+), 5 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index 1cb36cffa5..e7f3103a8b 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -268,10 +268,21 @@ impl RewardSetProvider for OnChainRewardSetProvider { let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), current_burn_height)?.expect( &format!("FATAL: no epoch for burn height {}", current_burn_height), ); - if cur_epoch.epoch_id >= StacksEpochId::Epoch22 { - info!("PoX reward cycle defaulting to burn in Epoch 2.2"); - return Ok(RewardSet::empty()); - } + match cur_epoch.epoch_id { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 => { + // Epochs 1.0 - 2.1 compute reward sets + } + StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { + info!("PoX reward cycle defaulting to burn in Epochs 2.2 and 2.3"); + return Ok(RewardSet::empty()); + } + StacksEpochId::Epoch24 => { + // Epoch 2.4 computes reward sets + } + }; let registered_addrs = chainstate.get_reward_addresses(burnchain, sortdb, current_burn_height, block_id)?; diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index 03d0bd81a6..20eed937eb 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -847,6 +847,95 @@ impl StacksChainState { Ok(ret) } + fn get_reward_addresses_pox_3( + &mut self, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + reward_cycle: u64, + ) -> Result, Error> { + if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_3_NAME)? { + debug!( + "PoX was voted disabled in block {} (reward cycle {})", + block_id, reward_cycle + ); + return Ok(vec![]); + } + + // how many in this cycle? + let num_addrs = self + .eval_boot_code_read_only( + sortdb, + block_id, + POX_3_NAME, + &format!("(get-reward-set-size u{})", reward_cycle), + )? + .expect_u128(); + + debug!( + "At block {:?} (reward cycle {}): {} PoX reward addresses", + block_id, reward_cycle, num_addrs + ); + + let mut ret = vec![]; + for i in 0..num_addrs { + // value should be (optional (tuple (pox-addr (tuple (...))) (total-ustx uint))). + let tuple = self + .eval_boot_code_read_only( + sortdb, + block_id, + POX_3_NAME, + &format!("(get-reward-set-pox-address u{} u{})", reward_cycle, i), + )? + .expect_optional() + .expect(&format!( + "FATAL: missing PoX address in slot {} out of {} in reward cycle {}", + i, num_addrs, reward_cycle + )) + .expect_tuple(); + + let pox_addr_tuple = tuple + .get("pox-addr") + .expect(&format!("FATAL: no `pox-addr` in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) + .to_owned(); + + let reward_address = PoxAddress::try_from_pox_tuple(self.mainnet, &pox_addr_tuple) + .expect(&format!( + "FATAL: not a valid PoX address: {:?}", + &pox_addr_tuple + )); + + let total_ustx = tuple + .get("total-ustx") + .expect(&format!("FATAL: no 'total-ustx' in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) + .to_owned() + .expect_u128(); + + let stacker = tuple + .get("stacker") + .expect(&format!( + "FATAL: no 'stacker' in return value from (get-reward-set-pox-address u{} u{})", + reward_cycle, i + )) + .to_owned() + .expect_optional() + .map(|value| value.expect_principal()); + + debug!( + "Parsed PoX reward address"; + "stacked_ustx" => total_ustx, + "reward_address" => %reward_address, + "stacker" => ?stacker, + ); + ret.push(RawRewardSetEntry { + reward_address, + amount_stacked: total_ustx, + stacker, + }) + } + + Ok(ret) + } + /// Get the sequence of reward addresses, as well as the PoX-specified hash mode (which gets /// lost in the conversion to StacksAddress) /// Each address will have at least (get-stacking-minimum) tokens. @@ -870,6 +959,7 @@ impl StacksChainState { match pox_contract_name { x if x == POX_1_NAME => self.get_reward_addresses_pox_1(sortdb, block_id, reward_cycle), x if x == POX_2_NAME => self.get_reward_addresses_pox_2(sortdb, block_id, reward_cycle), + x if x == POX_3_NAME => self.get_reward_addresses_pox_3(sortdb, block_id, reward_cycle), unknown_contract => { panic!("Blockchain implementation failure: PoX contract name '{}' is unknown. Chainstate is corrupted.", unknown_contract); @@ -1341,6 +1431,29 @@ pub mod test { addr: PoxAddress, lock_period: u128, burn_ht: u64, + ) -> StacksTransaction { + make_pox_2_or_3_lockup(key, nonce, amount, addr, lock_period, burn_ht, POX_2_NAME) + } + + pub fn make_pox_3_lockup( + key: &StacksPrivateKey, + nonce: u64, + amount: u128, + addr: PoxAddress, + lock_period: u128, + burn_ht: u64, + ) -> StacksTransaction { + make_pox_2_or_3_lockup(key, nonce, amount, addr, lock_period, burn_ht, POX_3_NAME) + } + + pub fn make_pox_2_or_3_lockup( + key: &StacksPrivateKey, + nonce: u64, + amount: u128, + addr: PoxAddress, + lock_period: u128, + burn_ht: u64, + contract_name: &str, ) -> StacksTransaction { // (define-public (stack-stx (amount-ustx uint) // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) @@ -1349,7 +1462,7 @@ pub mod test { let addr_tuple = Value::Tuple(addr.as_clarity_tuple().unwrap()); let payload = TransactionPayload::new_contract_call( boot_code_test_addr(), - POX_2_NAME, + contract_name, "stack-stx", vec![ Value::UInt(amount), From aa35ed7f6e22d2e52dbb64d5408328ca00b3c786 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 4 May 2023 11:18:06 -0500 Subject: [PATCH 116/158] initial testing --- src/chainstate/coordinator/tests.rs | 8 +- src/chainstate/stacks/boot/mod.rs | 2 + src/chainstate/stacks/boot/pox-3.clar | 23 +- src/chainstate/stacks/boot/pox_2_tests.rs | 54 +- src/chainstate/stacks/boot/pox_3_tests.rs | 602 ++++++++++++++++++++++ 5 files changed, 668 insertions(+), 21 deletions(-) create mode 100644 src/chainstate/stacks/boot/pox_3_tests.rs diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 81f4efa899..4b59d9b347 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -4359,9 +4359,9 @@ fn test_epoch_switch_pox_3_contract_instantiation() { // ^ ^ ^ ^ //.. -> B6 -> B7 -> B8 -> B9 -> B10 -> B11 -> B12 -> B13 -> B14 -> B15 -> B16 -> B17 -> B18 -> B19 //.. -> S5 -> S6 -> S7 -> S8 -> S9 -> S10 -> S11 -> S12 -> S13 -> S14 -> S15 -> S16 -> S17 -> S18 - // \ - // \ - // _ _ _ S19 -> S20 -> .. + // \ + // \ + // _ _ _ S19 -> S20 -> .. let parent = if ix == 0 { BlockHeaderHash([0; 32]) } else if ix == 15 { @@ -4457,7 +4457,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let expected_runtime = match burn_block_height { x if x < 4 => u64::MAX, x if x >= 4 && x < 8 => 205205, - x => 210210 + x => 210210, }; assert_eq!( chainstate diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index 20eed937eb..b822259310 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -972,6 +972,8 @@ impl StacksChainState { pub mod contract_tests; #[cfg(test)] pub mod pox_2_tests; +#[cfg(test)] +pub mod pox_3_tests; #[cfg(test)] pub mod test { diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index 64d9b8c07e..d1b2d80651 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -877,7 +877,7 @@ first-reward-cycle: first-reward-cycle, reward-set-indexes: (list), lock-period: lock-period, - delegated-to: tx-sender }) + delegated-to: (some tx-sender) }) ;; return the lock-up information, so the node can actually carry out the lock. (ok { stacker: stacker, @@ -1069,16 +1069,15 @@ ;; register the PoX address with the amount stacked ;; for the new cycles (let ((extended-reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-extend-cycle extend-count amount-ustx tx-sender))) - (reward-set-indexes (match stacker-state - ;; if there's active stacker state, we need to extend the existing reward-set-indexes - old-state (let ((cur-cycle-index (- first-reward-cycle (get first-reward-cycle old-state))) - (old-indexes (get reward-set-indexes old-state)) - ;; build index list by taking the old-indexes starting from cur cycle - ;; and adding the new indexes to it. this way, the index is valid starting from the current cycle - (new-list (concat (default-to (list) (slice? old-indexes cur-cycle-index (len old-indexes))) - extended-reward-set-indexes))) - (unwrap-panic (as-max-len? new-list u12))) - extended-reward-set-indexes))) + (reward-set-indexes + ;; use the active stacker state and extend the existing reward-set-indexes + (let ((cur-cycle-index (- first-reward-cycle (get first-reward-cycle stacker-state))) + (old-indexes (get reward-set-indexes stacker-state)) + ;; build index list by taking the old-indexes starting from cur cycle + ;; and adding the new indexes to it. this way, the index is valid starting from the current cycle + (new-list (concat (default-to (list) (slice? old-indexes cur-cycle-index (len old-indexes))) + extended-reward-set-indexes))) + (unwrap-panic (as-max-len? new-list u12))))) ;; update stacker record (map-set stacking-state { stacker: tx-sender } @@ -1268,7 +1267,7 @@ reward-set-indexes: (list), first-reward-cycle: first-reward-cycle, lock-period: lock-period, - delegated-to: tx-sender }) + delegated-to: (some tx-sender) }) ;; return the lock-up information, so the node can actually carry out the lock. (ok { stacker: stacker, diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 5d7b2ea82d..851fc9f745 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -7,7 +7,7 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; use crate::chainstate::stacks::boot::{ - BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_3_NAME, }; use crate::chainstate::stacks::db::{ MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, @@ -144,6 +144,18 @@ pub fn check_all_stacker_link_invariants( info!("Invoked check all"; "tip" => %tip, "first" => first_cycle_number, "last" => max_cycle_number); for cycle in first_cycle_number..(max_cycle_number + 1) { + // check if it makes sense to test invariants yet. + // For cycles where PoX-3 is active, check if Epoch24 has activated first. + let active_pox_contract = peer + .config + .burnchain + .pox_constants + .active_pox_contract(peer.config.burnchain.reward_cycle_to_block_height(cycle)); + if active_pox_contract == POX_3_NAME && epoch < StacksEpochId::Epoch24 { + info!("Skipping check on a PoX-3 reward cycle because Epoch24 has not started yet"); + continue; + } + check_stacker_link_invariants(peer, tip, cycle); } } @@ -402,11 +414,27 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c .config .burnchain .reward_cycle_to_block_height(cycle_number); + + let tip_epoch = SortitionDB::get_stacks_epoch(peer.sortdb().conn(), current_burn_height as u64) + .unwrap() + .unwrap(); + + let cycle_start_epoch = SortitionDB::get_stacks_epoch(peer.sortdb().conn(), cycle_start) + .unwrap() + .unwrap(); + + if cycle_start_epoch.epoch_id == StacksEpochId::Epoch22 + || cycle_start_epoch.epoch_id == StacksEpochId::Epoch23 + { + info!("Skipping reward set validation checks on reward cycles that start in Epoch 2.2 or Epoch 2.3"); + return; + } + let reward_set_entries = get_reward_set_entries_index_order_at(peer, tip, cycle_start); let mut checked_total = 0; for (actual_index, entry) in reward_set_entries.iter().enumerate() { debug!( - "Cycle {}: Check {:?} (stacked={}, stacker={})", + "Cycle {}: Check {:?} (stacked={}, stacker={}, tip_epoch={})", cycle_number, &entry.reward_address, entry.amount_stacked, @@ -414,11 +442,12 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c .stacker .as_ref() .map(|s| format!("{}", &s)) - .unwrap_or("(none)".to_string()) + .unwrap_or("(none)".to_string()), + &tip_epoch.epoch_id, ); checked_total += entry.amount_stacked; if let Some(stacker) = &entry.stacker { - if tip_cycle > cycle_start { + if tip_cycle > cycle_number { // if the checked cycle is before the tip's cycle, // the reward-set-entrie's stacker links are no longer necessarily valid // (because the reward cycles for those entries has passed) @@ -426,6 +455,15 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c continue; } + if tip_epoch.epoch_id == StacksEpochId::Epoch22 + || tip_epoch.epoch_id == StacksEpochId::Epoch23 + { + // if the current tip is epoch-2.2 or epoch-2.3, the stacker invariant checks + // no longer make sense: the stacker has unlocked, even though a reward cycle + // is still active (i.e., the last active cycle from epoch-2.1). + continue; + } + let StackingStateCheckData { pox_addr, cycle_indexes, @@ -446,6 +484,12 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c /// Get the `cycle_number`'s total stacked amount at the given chaintip pub fn get_reward_cycle_total(peer: &mut TestPeer, tip: &StacksBlockId, cycle_number: u64) -> u128 { + let active_pox_contract = peer.config.burnchain.pox_constants.active_pox_contract( + peer.config + .burnchain + .reward_cycle_to_block_height(cycle_number), + ); + with_clarity_db_ro(peer, tip, |db| { let total_stacked_key = TupleData::from_data(vec![( "reward-cycle".into(), @@ -454,7 +498,7 @@ pub fn get_reward_cycle_total(peer: &mut TestPeer, tip: &StacksBlockId, cycle_nu .unwrap() .into(); db.fetch_entry_unknown_descriptor( - &boot_code_id(boot::POX_2_NAME, false), + &boot_code_id(active_pox_contract, false), "reward-cycle-total-stacked", &total_stacked_key, ) diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs new file mode 100644 index 0000000000..88b47a0d91 --- /dev/null +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -0,0 +1,602 @@ +use std::collections::{HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::convert::TryInto; + +use crate::address::AddressHashMode; +use crate::burnchains::PoxConstants; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::burn::ConsensusHash; +use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; +use crate::chainstate::stacks::boot::{ + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, +}; +use crate::chainstate::stacks::db::{ + MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, +}; +use crate::chainstate::stacks::index::marf::MarfConnection; +use crate::chainstate::stacks::index::MarfTrieId; +use crate::chainstate::stacks::*; +use crate::clarity_vm::database::marf::MarfedKV; +use crate::clarity_vm::database::HeadersDBConn; +use crate::core::*; +use crate::util_lib::db::{DBConn, FromRow}; +use crate::vm::events::StacksTransactionEvent; +use clarity::types::Address; +use clarity::vm::contexts::OwnedEnvironment; +use clarity::vm::contracts::Contract; +use clarity::vm::costs::CostOverflowingMath; +use clarity::vm::database::*; +use clarity::vm::errors::{ + CheckErrors, Error, IncomparableError, InterpreterError, InterpreterResult, RuntimeErrorType, +}; +use clarity::vm::eval; +use clarity::vm::representations::SymbolicExpression; +use clarity::vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; +use clarity::vm::types::Value::Response; +use clarity::vm::types::{ + BuffData, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, SequenceData, + StacksAddressExtensions, StandardPrincipalData, TupleData, TupleTypeSignature, TypeSignature, + Value, NONE, +}; +use stacks_common::util::hash::hex_bytes; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{Sha256Sum, Sha512Trunc256Sum}; + +use crate::net::test::TestPeer; +use crate::util_lib::boot::boot_code_id; +use crate::{ + burnchains::Burnchain, + chainstate::{ + burn::db::sortdb::SortitionDB, + stacks::{events::TransactionOrigin, tests::make_coinbase}, + }, + clarity_vm::{clarity::ClarityBlockConnection, database::marf::WritableMarfStore}, + net::test::TestEventObserver, +}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, VRFSeed, +}; + +use super::{test::*, RawRewardSetEntry}; +use crate::clarity_vm::clarity::Error as ClarityError; + +use crate::chainstate::burn::operations::*; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; + +const USTX_PER_HOLDER: u128 = 1_000_000; + +/// Return the BlockSnapshot for the latest sortition in the provided +/// SortitionDB option-reference. Panics on any errors. +fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { + SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() +} + +fn make_test_epochs_pox() -> (Vec, PoxConstants) { + let EMPTY_SORTITIONS = 25; + let EPOCH_2_1_HEIGHT = 11; // 36 + let EPOCH_2_2_HEIGHT = EPOCH_2_1_HEIGHT + 14; // 50 + let EPOCH_2_3_HEIGHT = EPOCH_2_2_HEIGHT + 2; // 52 + let EPOCH_2_4_HEIGHT = EPOCH_2_2_HEIGHT + 6; // 56 + + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: EMPTY_SORTITIONS + EPOCH_2_1_HEIGHT, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: EMPTY_SORTITIONS + EPOCH_2_1_HEIGHT, + end_height: EMPTY_SORTITIONS + EPOCH_2_2_HEIGHT, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: EMPTY_SORTITIONS + EPOCH_2_2_HEIGHT, + end_height: EMPTY_SORTITIONS + EPOCH_2_3_HEIGHT, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: EMPTY_SORTITIONS + EPOCH_2_3_HEIGHT, + end_height: EMPTY_SORTITIONS + EPOCH_2_4_HEIGHT, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: EMPTY_SORTITIONS + EPOCH_2_4_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + ]; + + let mut pox_constants = PoxConstants::mainnet_default(); + pox_constants.reward_cycle_length = 5; + pox_constants.prepare_length = 2; + pox_constants.anchor_threshold = 1; + pox_constants.v1_unlock_height = (EMPTY_SORTITIONS + EPOCH_2_1_HEIGHT + 1) as u32; + pox_constants.v2_unlock_height = (EMPTY_SORTITIONS + EPOCH_2_2_HEIGHT + 1) as u32; + pox_constants.pox_3_activation_height = (EMPTY_SORTITIONS + EPOCH_2_4_HEIGHT + 1) as u32; + + (epochs, pox_constants) +} + +/// In this test case, two Stackers, Alice and Bob stack and interact with the +/// PoX v1 contract and PoX v2 contract across the epoch transition and then +/// again with the PoX v3 contract. +/// +/// Alice: stacks via PoX v1 for 4 cycles. The third of these cycles occurs after +/// the PoX v1 -> v2 transition, and so Alice gets "early unlocked". +/// After the early unlock, Alice re-stacks in PoX v2 +/// Alice tries to stack again via PoX v1, which is allowed by the contract, +/// but forbidden by the VM (because PoX has transitioned to v2) +/// Bob: stacks via PoX v2 for 6 cycles. He attempted to stack via PoX v1 as well, +/// but is forbidden because he has already placed an account lock via PoX v2. +/// +#[test] +fn simple_pox_lockup_transition_pox_2() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + eprintln!("First v2 cycle = {}", first_v2_cycle); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + "pox_3_tests::simple_pox_lockup_transition_pox_2", + 7104, + Some(epochs.clone()), + Some(&observer), + ); + + // TODO: this is set to None for now, because I have to think through how this + // check should handle the case when PoX-3 hasn't been published yet! + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let charlie = keys.pop().unwrap(); + + let EXPECTED_ALICE_FIRST_REWARD_CYCLE = 6; + + let mut coinbase_nonce = 0; + + // these checks are very repetitive + let reward_cycle_checks = |tip_index_block| { + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap() as u128; + let (min_ustx, reward_addrs, total_stacked) = + with_sortdb(&mut peer, |ref mut c, ref sortdb| { + ( + c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), + get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block) + .unwrap(), + c.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) + .unwrap(), + ) + }); + + eprintln!( + "\nreward cycle: {}\nmin-uSTX: {}\naddrs: {:?}\ntotal-stacked: {}\n", + cur_reward_cycle, min_ustx, &reward_addrs, total_stacked + ); + + if cur_reward_cycle < EXPECTED_ALICE_FIRST_REWARD_CYCLE { + // no reward addresses yet + assert_eq!(reward_addrs.len(), 0); + } else if cur_reward_cycle < EXPECTED_FIRST_V2_CYCLE as u128 { + // After the start of Alice's first cycle, but before the first V2 cycle, + // Alice is the only Stacker, so check that. + let (amount_ustx, pox_addr, lock_period, first_reward_cycle) = + get_stacker_info(&mut peer, &key_to_stacks_addr(&alice).into()).unwrap(); + eprintln!("\nAlice: {} uSTX stacked for {} cycle(s); addr is {:?}; first reward cycle is {}\n", amount_ustx, lock_period, &pox_addr, first_reward_cycle); + + // one reward address, and it's Alice's + // either way, there's a single reward address + assert_eq!(reward_addrs.len(), 1); + assert_eq!( + (reward_addrs[0].0).version(), + AddressHashMode::SerializeP2PKH as u8 + ); + assert_eq!( + (reward_addrs[0].0).hash160(), + key_to_stacks_addr(&alice).bytes + ); + assert_eq!(reward_addrs[0].1, 1024 * POX_THRESHOLD_STEPS_USTX); + } else { + // v2 reward cycles have begun, so reward addrs should be read from PoX2 which is Bob + Alice + assert_eq!(reward_addrs.len(), 2); + assert_eq!( + (reward_addrs[0].0).version(), + AddressHashMode::SerializeP2PKH as u8 + ); + assert_eq!( + (reward_addrs[0].0).hash160(), + key_to_stacks_addr(&bob).bytes + ); + assert_eq!(reward_addrs[0].1, 512 * POX_THRESHOLD_STEPS_USTX); + + assert_eq!( + (reward_addrs[1].0).version(), + AddressHashMode::SerializeP2PKH as u8 + ); + assert_eq!( + (reward_addrs[1].0).hash160(), + key_to_stacks_addr(&alice).bytes + ); + assert_eq!(reward_addrs[1].1, 512 * POX_THRESHOLD_STEPS_USTX); + } + }; + + // our "tenure counter" is now at 0 + let tip = get_tip(peer.sortdb.as_ref()); + assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); + + // first tenure is empty + peer.tenure_with_txs(&[], &mut coinbase_nonce); + + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + + let alice_account = get_account(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!( + alice_account.stx_balance.amount_unlocked(), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + assert_eq!(alice_account.stx_balance.amount_locked(), 0); + assert_eq!(alice_account.stx_balance.unlock_height(), 0); + + // next tenure include Alice's lockup + let tip = get_tip(peer.sortdb.as_ref()); + let alice_lockup = make_pox_lockup( + &alice, + 0, + 1024 * POX_THRESHOLD_STEPS_USTX, + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + 4, + tip.block_height, + ); + + // our "tenure counter" is now at 1 + assert_eq!(tip.block_height, 1 + EMPTY_SORTITIONS as u64); + + let tip_index_block = peer.tenure_with_txs(&[alice_lockup], &mut coinbase_nonce); + + // check the stacking minimum + let total_liquid_ustx = get_liquid_ustx(&mut peer); + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + chainstate.get_stacking_minimum(sortdb, &tip_index_block) + }) + .unwrap(); + assert_eq!( + min_ustx, + total_liquid_ustx / POX_TESTNET_STACKING_THRESHOLD_25 + ); + + // no reward addresses + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) + }) + .unwrap(); + assert_eq!(reward_addrs.len(), 0); + + // check the first reward cycle when Alice's tokens get stacked + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); + let alice_first_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap() as u128; + + assert_eq!(alice_first_reward_cycle, EXPECTED_ALICE_FIRST_REWARD_CYCLE); + + // alice locked, so balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + + // produce blocks until immediately before the 2.1 epoch switch + while get_tip(peer.sortdb.as_ref()).block_height < epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + + // alice is still locked, balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + } + + // Have Charlie try to use the PoX2 contract. This transaction + // should be accepted (checked via the tx receipt). Also, importantly, + // the cost tracker should assign costs to Charlie's transaction. + // This is also checked by the transaction receipt. + let tip = get_tip(peer.sortdb.as_ref()); + + let test = make_pox_2_contract_call( + &charlie, + 0, + "delegate-stx", + vec![ + Value::UInt(1_000_000), + PrincipalData::from(key_to_stacks_addr(&charlie)).into(), + Value::none(), + Value::none(), + ], + ); + peer.tenure_with_txs(&[test], &mut coinbase_nonce); + + // alice is still locked, balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + + // in the next tenure, PoX 2 should now exist. + // Lets have Bob lock up for v2 + // this will lock for cycles 8, 9, 10, and 11 + // the first v2 cycle will be 8 + let tip = get_tip(peer.sortdb.as_ref()); + + let bob_lockup = make_pox_2_lockup( + &bob, + 0, + 512 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 6, + tip.block_height, + ); + + let block_id = peer.tenure_with_txs(&[bob_lockup], &mut coinbase_nonce); + + assert_eq!( + get_tip(peer.sortdb.as_ref()).block_height as u32, + pox_constants.v1_unlock_height + 1, + "Test should have reached 1 + PoX-v1 unlock height" + ); + + // Auto unlock height is reached, Alice balance should be unlocked + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + + // Now, Bob tries to lock in PoX v1 too, but it shouldn't work! + let tip = get_tip(peer.sortdb.as_ref()); + + let bob_lockup = make_pox_lockup( + &bob, + 1, + 512 * POX_THRESHOLD_STEPS_USTX, + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + 4, + tip.block_height, + ); + + let block_id = peer.tenure_with_txs(&[bob_lockup], &mut coinbase_nonce); + + // At this point, the auto unlock height for v1 accounts has been reached. + // let Alice stack in PoX v2 + let tip = get_tip(peer.sortdb.as_ref()); + + let alice_lockup = make_pox_2_lockup( + &alice, + 1, + 512 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 12, + tip.block_height, + ); + peer.tenure_with_txs(&[alice_lockup], &mut coinbase_nonce); + + // Alice locked half her balance in PoX 2 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); + + // now, let's roll the chain forward until just before Epoch-2.2 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should always include this half lockup + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // this block is mined in epoch-2.2 + peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); + // this block should unlock alice's balance + peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + + // now, roll the chain forward to Epoch-2.4 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should always be unlocked + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + } + + let tip = get_tip(peer.sortdb.as_ref()).block_height; + let bob_lockup = make_pox_3_lockup( + &bob, + 2, + 512 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 6, + tip, + ); + + let alice_lockup = make_pox_3_lockup( + &alice, + 2, + 512 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip, + ); + + peer.tenure_with_txs(&[bob_lockup, alice_lockup], &mut coinbase_nonce); + + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); + let bob_balance = get_balance(&mut peer, &key_to_stacks_addr(&bob).into()); + assert_eq!(bob_balance, 512 * POX_THRESHOLD_STEPS_USTX); + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut charlie_txs = HashMap::new(); + + eprintln!("Alice addr: {}", alice_address); + eprintln!("Bob addr: {}", bob_address); + + let mut tested_charlie = false; + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == key_to_stacks_addr(&charlie) { + assert!( + r.execution_cost != ExecutionCost::zero(), + "Execution cost is not zero!" + ); + charlie_txs.insert(t.auth.get_origin_nonce(), r); + + tested_charlie = true; + } + } + } + } + + assert!(tested_charlie, "Charlie TX must be tested"); + // Alice should have three accepted transactions: + // TX0 -> Alice's initial lockup in PoX 1 + // TX1 -> Alice's PoX 2 lockup + // TX2 -> Alice's PoX 3 lockup + assert_eq!(alice_txs.len(), 3, "Alice should have 3 confirmed txs"); + // Bob should have two accepted transactions: + // TX0 -> Bob's initial lockup in PoX 2 + // TX1 -> Bob's attempt to lock again in PoX 1 -- this one should fail + // because PoX 1 is now defunct. Checked via the tx receipt. + // TX2 -> Bob's PoX 3 lockup + assert_eq!(bob_txs.len(), 3, "Bob should have 3 confirmed txs"); + // Charlie should have one accepted transactions: + // TX0 -> Charlie's delegation in PoX 2. This tx just checks that the + // initialization code tracks costs in txs that occur after the + // initialization code (which uses a free tracker). + assert_eq!(charlie_txs.len(), 1, "Charlie should have 1 confirmed txs"); + + // TX0 -> Alice's initial lockup in PoX 1 + assert!( + match alice_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Alice tx0 should have committed okay" + ); + + // TX1 -> Alice's PoX 2 lockup + assert!( + match alice_txs.get(&1).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Alice tx1 should have committed okay" + ); + + // TX2 -> Alice's PoX 3 lockup + assert!( + match alice_txs.get(&1).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Alice tx3 should have committed okay" + ); + + // TX0 -> Bob's initial lockup in PoX 2 + assert!( + match bob_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Bob tx0 should have committed okay" + ); + + // TX1 -> Bob's attempt to lock again in PoX 1 -- this one should fail + // because PoX 1 is now defunct. Checked via the tx receipt. + assert_eq!( + bob_txs.get(&1).unwrap().result, + Value::err_none(), + "Bob tx1 should have resulted in a runtime error" + ); + + // TX0 -> Charlie's delegation in PoX 2. This tx just checks that the + // initialization code tracks costs in txs that occur after the + // initialization code (which uses a free tracker). + assert!( + match charlie_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Charlie tx0 should have committed okay" + ); +} From 0c03f854c633981003ed169d65da56955bd43b9e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 4 May 2023 12:22:59 -0500 Subject: [PATCH 117/158] working invariants in the simple test case --- src/chainstate/coordinator/mod.rs | 18 ++++- src/chainstate/stacks/boot/pox_2_tests.rs | 84 ++++++++++++++++++++--- src/chainstate/stacks/boot/pox_3_tests.rs | 82 ++-------------------- 3 files changed, 98 insertions(+), 86 deletions(-) diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index e7f3103a8b..dbce910d47 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -44,6 +44,7 @@ use crate::chainstate::coordinator::comm::{ ArcCounterCoordinatorNotices, CoordinatorEvents, CoordinatorNotices, CoordinatorReceivers, }; use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::POX_3_NAME; use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::{ db::{ @@ -246,7 +247,7 @@ impl From for Error { pub trait RewardSetProvider { fn get_reward_set( &self, - current_burn_height: u64, + cycle_start_burn_height: u64, chainstate: &mut StacksChainState, burnchain: &Burnchain, sortdb: &SortitionDB, @@ -259,6 +260,7 @@ pub struct OnChainRewardSetProvider(); impl RewardSetProvider for OnChainRewardSetProvider { fn get_reward_set( &self, + // Todo: `current_burn_height` is a misleading name: should be the `cycle_start_burn_height` current_burn_height: u64, chainstate: &mut StacksChainState, burnchain: &Burnchain, @@ -280,7 +282,19 @@ impl RewardSetProvider for OnChainRewardSetProvider { return Ok(RewardSet::empty()); } StacksEpochId::Epoch24 => { - // Epoch 2.4 computes reward sets + // Epoch 2.4 computes reward sets, but *only* if PoX-3 is active + if burnchain + .pox_constants + .active_pox_contract(current_burn_height) + != POX_3_NAME + { + // Note: this should not happen in mainnet or testnet, because the no reward cycle start height + // exists between Epoch 2.4's instantiation height and the pox-3 activation height. + // However, this *will* happen in testing if Epoch 2.4's instantiation height is set == a reward cycle + // start height + info!("PoX reward cycle defaulting to burn in Epoch 2.4 because cycle start is before PoX-3 activation"); + return Ok(RewardSet::empty()); + } } }; diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 851fc9f745..4820ff2e8a 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -106,18 +106,19 @@ pub fn get_stx_account_at( with_clarity_db_ro(peer, tip, |db| db.get_account_stx_balance(account)) } -/// Get the STXBalance for `account` at the given chaintip -pub fn get_stacking_state_pox_2( +/// get the stacking-state entry for an account at the chaintip +fn get_stacking_state_pox( peer: &mut TestPeer, tip: &StacksBlockId, account: &PrincipalData, + pox_contract: &str, ) -> Option { with_clarity_db_ro(peer, tip, |db| { let lookup_tuple = Value::Tuple( TupleData::from_data(vec![("stacker".into(), account.clone().into())]).unwrap(), ); db.fetch_entry_unknown_descriptor( - &boot_code_id(boot::POX_2_NAME, false), + &boot_code_id(pox_contract, false), "stacking-state", &lookup_tuple, ) @@ -126,6 +127,15 @@ pub fn get_stacking_state_pox_2( }) } +/// Get the pox-2 stacking-state entry for `account` at the given chaintip +pub fn get_stacking_state_pox_2( + peer: &mut TestPeer, + tip: &StacksBlockId, + account: &PrincipalData, +) -> Option { + get_stacking_state_pox(peer, tip, account, boot::POX_2_NAME) +} + /// Perform `check_stacker_link_invariants` on cycles [first_cycle_number, max_cycle_number] pub fn check_all_stacker_link_invariants( peer: &mut TestPeer, @@ -152,7 +162,12 @@ pub fn check_all_stacker_link_invariants( .pox_constants .active_pox_contract(peer.config.burnchain.reward_cycle_to_block_height(cycle)); if active_pox_contract == POX_3_NAME && epoch < StacksEpochId::Epoch24 { - info!("Skipping check on a PoX-3 reward cycle because Epoch24 has not started yet"); + info!( + "Skipping check on a PoX-3 reward cycle because Epoch24 has not started yet"; + "cycle" => cycle, + "epoch" => %epoch, + "active_pox_contract" => %active_pox_contract, + ); continue; } @@ -292,7 +307,21 @@ pub fn check_stacking_state_invariants( .canonical_balance_repr() }); - let stacking_state_entry = get_stacking_state_pox_2(peer, tip, stacker) + let tip_burn_height = StacksChainState::get_stacks_block_header_info_by_index_block_hash( + peer.chainstate().db(), + tip, + ) + .unwrap() + .unwrap() + .burn_header_height; + + let active_pox_contract = peer + .config + .burnchain + .pox_constants + .active_pox_contract(tip_burn_height.into()); + + let stacking_state_entry = get_stacking_state_pox(peer, tip, stacker, active_pox_contract) .expect("Invariant violated: reward-cycle entry has stacker field set, but not present in stacker-state") .expect_tuple(); let first_cycle = stacking_state_entry @@ -324,7 +353,9 @@ pub fn check_stacking_state_invariants( assert_eq!( account_state.unlock_height() + 1, stacking_state_unlock_ht, - "Invariant violated: stacking-state and account state have different unlock heights" + "Invariant violated: stacking-state and account state have different unlock heights. Tip height = {}, PoX Contract: {}", + tip_burn_height, + active_pox_contract, ); let mut cycle_indexes = HashMap::new(); @@ -349,7 +380,7 @@ pub fn check_stacking_state_invariants( ); let entry_value = with_clarity_db_ro(peer, tip, |db| { db.fetch_entry_unknown_descriptor( - &boot_code_id(boot::POX_2_NAME, false), + &boot_code_id(active_pox_contract, false), "reward-cycle-pox-address-list", &entry_key ) @@ -426,7 +457,29 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c if cycle_start_epoch.epoch_id == StacksEpochId::Epoch22 || cycle_start_epoch.epoch_id == StacksEpochId::Epoch23 { - info!("Skipping reward set validation checks on reward cycles that start in Epoch 2.2 or Epoch 2.3"); + info!( + "Skipping reward set validation checks on reward cycles that start in Epoch 2.2 or Epoch 2.3"; + "cycle" => cycle_number, + ); + return; + } + + if cycle_start_epoch.epoch_id == StacksEpochId::Epoch24 + && cycle_start + <= peer + .config + .burnchain + .pox_constants + .pox_3_activation_height + .into() + { + info!( + "Skipping validation of reward set that started in Epoch24, but its cycle starts before pox-3 activation"; + "cycle" => cycle_number, + "cycle_start" => cycle_start, + "pox_3_activation" => peer.config.burnchain.pox_constants.pox_3_activation_height, + "epoch_2_4_start" => cycle_start_epoch.start_height, + ); return; } @@ -464,6 +517,15 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c continue; } + if tip_epoch.epoch_id >= StacksEpochId::Epoch24 + && current_burn_height + <= peer.config.burnchain.pox_constants.pox_3_activation_height + { + // if the tip is epoch-2.4, and pox-3 isn't the active pox contract yet, + // the invariant checks will not make sense for the same reasons as above + continue; + } + let StackingStateCheckData { pox_addr, cycle_indexes, @@ -471,7 +533,11 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c } = check_stacking_state_invariants(peer, tip, stacker, true); assert_eq!(&entry.reward_address, &pox_addr, "Invariant violated: reward-cycle entry has a different PoX addr than in stacker-state"); - assert_eq!(cycle_indexes.get(&(cycle_number as u128)).cloned().unwrap(), actual_index as u128, "Invariant violated: stacking-state.reward-set-indexes entry at cycle_number must point to this stacker's entry"); + assert_eq!( + cycle_indexes.get(&(cycle_number as u128)).cloned().unwrap(), + actual_index as u128, + "Invariant violated: stacking-state.reward-set-indexes entry at cycle_number must point to this stacker's entry" + ); } } let expected_total = get_reward_cycle_total(peer, tip, cycle_number); diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs index 88b47a0d91..4d1ec308bb 100644 --- a/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -77,8 +77,12 @@ fn make_test_epochs_pox() -> (Vec, PoxConstants) { let EPOCH_2_1_HEIGHT = 11; // 36 let EPOCH_2_2_HEIGHT = EPOCH_2_1_HEIGHT + 14; // 50 let EPOCH_2_3_HEIGHT = EPOCH_2_2_HEIGHT + 2; // 52 + // epoch-2.4 will start at the first block of cycle 11! + // this means that cycle 11 should also be treated like a "burn" let EPOCH_2_4_HEIGHT = EPOCH_2_2_HEIGHT + 6; // 56 + // cycle 11 = 60 + let epochs = vec![ StacksEpoch { epoch_id: StacksEpochId::Epoch10, @@ -188,10 +192,8 @@ fn simple_pox_lockup_transition_pox_2() { Some(&observer), ); - // TODO: this is set to None for now, because I have to think through how this - // check should handle the case when PoX-3 hasn't been published yet! peer.config.check_pox_invariants = - Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 20)); let alice = keys.pop().unwrap(); let bob = keys.pop().unwrap(); @@ -201,75 +203,6 @@ fn simple_pox_lockup_transition_pox_2() { let mut coinbase_nonce = 0; - // these checks are very repetitive - let reward_cycle_checks = |tip_index_block| { - let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); - let cur_reward_cycle = burnchain - .block_height_to_reward_cycle(tip_burn_block_height) - .unwrap() as u128; - let (min_ustx, reward_addrs, total_stacked) = - with_sortdb(&mut peer, |ref mut c, ref sortdb| { - ( - c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), - get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block) - .unwrap(), - c.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) - .unwrap(), - ) - }); - - eprintln!( - "\nreward cycle: {}\nmin-uSTX: {}\naddrs: {:?}\ntotal-stacked: {}\n", - cur_reward_cycle, min_ustx, &reward_addrs, total_stacked - ); - - if cur_reward_cycle < EXPECTED_ALICE_FIRST_REWARD_CYCLE { - // no reward addresses yet - assert_eq!(reward_addrs.len(), 0); - } else if cur_reward_cycle < EXPECTED_FIRST_V2_CYCLE as u128 { - // After the start of Alice's first cycle, but before the first V2 cycle, - // Alice is the only Stacker, so check that. - let (amount_ustx, pox_addr, lock_period, first_reward_cycle) = - get_stacker_info(&mut peer, &key_to_stacks_addr(&alice).into()).unwrap(); - eprintln!("\nAlice: {} uSTX stacked for {} cycle(s); addr is {:?}; first reward cycle is {}\n", amount_ustx, lock_period, &pox_addr, first_reward_cycle); - - // one reward address, and it's Alice's - // either way, there's a single reward address - assert_eq!(reward_addrs.len(), 1); - assert_eq!( - (reward_addrs[0].0).version(), - AddressHashMode::SerializeP2PKH as u8 - ); - assert_eq!( - (reward_addrs[0].0).hash160(), - key_to_stacks_addr(&alice).bytes - ); - assert_eq!(reward_addrs[0].1, 1024 * POX_THRESHOLD_STEPS_USTX); - } else { - // v2 reward cycles have begun, so reward addrs should be read from PoX2 which is Bob + Alice - assert_eq!(reward_addrs.len(), 2); - assert_eq!( - (reward_addrs[0].0).version(), - AddressHashMode::SerializeP2PKH as u8 - ); - assert_eq!( - (reward_addrs[0].0).hash160(), - key_to_stacks_addr(&bob).bytes - ); - assert_eq!(reward_addrs[0].1, 512 * POX_THRESHOLD_STEPS_USTX); - - assert_eq!( - (reward_addrs[1].0).version(), - AddressHashMode::SerializeP2PKH as u8 - ); - assert_eq!( - (reward_addrs[1].0).hash160(), - key_to_stacks_addr(&alice).bytes - ); - assert_eq!(reward_addrs[1].1, 512 * POX_THRESHOLD_STEPS_USTX); - } - }; - // our "tenure counter" is now at 0 let tip = get_tip(peer.sortdb.as_ref()); assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); @@ -500,8 +433,7 @@ fn simple_pox_lockup_transition_pox_2() { let mut bob_txs = HashMap::new(); let mut charlie_txs = HashMap::new(); - eprintln!("Alice addr: {}", alice_address); - eprintln!("Bob addr: {}", bob_address); + debug!("Alice addr: {}, Bob addr: {}", alice_address, bob_address); let mut tested_charlie = false; @@ -509,7 +441,7 @@ fn simple_pox_lockup_transition_pox_2() { for r in b.receipts.into_iter() { if let TransactionOrigin::Stacks(ref t) = r.transaction { let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); + debug!("Transaction addr: {}", addr); if addr == alice_address { alice_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == bob_address { From 8f6d96e0185750b21ab624f82dfc75486ec283a5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 4 May 2023 15:58:53 -0500 Subject: [PATCH 118/158] implement pox-3 auto-unlock, add pox-3 auto unlock test --- src/chainstate/stacks/boot/mod.rs | 42 +- src/chainstate/stacks/boot/pox_2_tests.rs | 19 +- src/chainstate/stacks/boot/pox_3_tests.rs | 469 +++++++++++++++++++++- src/chainstate/stacks/db/blocks.rs | 87 ++-- 4 files changed, 569 insertions(+), 48 deletions(-) diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index b822259310..d72003b4d0 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -224,11 +224,12 @@ impl StacksChainState { fn get_user_stacking_state( clarity: &mut ClarityTransactionConnection, principal: &PrincipalData, + pox_contract_name: &str, ) -> TupleData { // query the stacking state for this user before deleting it let is_mainnet = clarity.is_mainnet(); let sender_addr = PrincipalData::from(boot::boot_code_addr(clarity.is_mainnet())); - let pox_contract = boot::boot_code_id(POX_2_NAME, clarity.is_mainnet()); + let pox_contract = boot::boot_code_id(pox_contract_name, clarity.is_mainnet()); let user_stacking_state = clarity .with_readonly_clarity_env( is_mainnet, @@ -326,14 +327,45 @@ impl StacksChainState { /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. - pub fn handle_pox_cycle_start( + /// + /// This should only be called for PoX v2 cycles. + pub fn handle_pox_cycle_start_pox_2( + clarity: &mut ClarityTransactionConnection, + cycle_number: u64, + cycle_info: Option, + ) -> Result, Error> { + Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_2_NAME) + } + + /// Do all the necessary Clarity operations at the start of a PoX reward cycle. + /// Currently, this just means applying any auto-unlocks to Stackers who qualified. + /// + /// This should only be called for PoX v3 cycles. + pub fn handle_pox_cycle_start_pox_3( clarity: &mut ClarityTransactionConnection, cycle_number: u64, cycle_info: Option, + ) -> Result, Error> { + Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_3_NAME) + } + + /// Do all the necessary Clarity operations at the start of a PoX reward cycle. + /// Currently, this just means applying any auto-unlocks to Stackers who qualified. + /// + fn handle_pox_cycle_start( + clarity: &mut ClarityTransactionConnection, + cycle_number: u64, + cycle_info: Option, + pox_contract_name: &str, ) -> Result, Error> { clarity.with_clarity_db(|db| Ok(Self::mark_pox_cycle_handled(db, cycle_number)))?; - debug!("Handling PoX reward cycle start"; "reward_cycle" => cycle_number, "cycle_active" => cycle_info.is_some()); + debug!( + "Handling PoX reward cycle start"; + "reward_cycle" => cycle_number, + "cycle_active" => cycle_info.is_some(), + "pox_contract" => pox_contract_name + ); let cycle_info = match cycle_info { Some(x) => x, @@ -341,7 +373,7 @@ impl StacksChainState { }; let sender_addr = PrincipalData::from(boot::boot_code_addr(clarity.is_mainnet())); - let pox_contract = boot::boot_code_id(POX_2_NAME, clarity.is_mainnet()); + let pox_contract = boot::boot_code_id(pox_contract_name, clarity.is_mainnet()); let mut total_events = vec![]; for (principal, amount_locked) in cycle_info.missed_reward_slots.iter() { @@ -366,7 +398,7 @@ impl StacksChainState { }).expect("FATAL: failed to accelerate PoX unlock"); // query the stacking state for this user before deleting it - let user_data = Self::get_user_stacking_state(clarity, principal); + let user_data = Self::get_user_stacking_state(clarity, principal, pox_contract_name); // perform the unlock let (result, _, mut events, _) = clarity diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 4820ff2e8a..c6b8c0e97c 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -107,7 +107,7 @@ pub fn get_stx_account_at( } /// get the stacking-state entry for an account at the chaintip -fn get_stacking_state_pox( +pub fn get_stacking_state_pox( peer: &mut TestPeer, tip: &StacksBlockId, account: &PrincipalData, @@ -175,7 +175,6 @@ pub fn check_all_stacker_link_invariants( } } -#[cfg(test)] pub fn generate_pox_clarity_value(str_hash: &str) -> Value { let byte_vec = hex_bytes(str_hash).unwrap(); let pox_addr_tuple = TupleData::from_data(vec![ @@ -187,19 +186,17 @@ pub fn generate_pox_clarity_value(str_hash: &str) -> Value { Value::Tuple(pox_addr_tuple) } -#[cfg(test)] -struct PoxPrintFields { - op_name: String, - stacker: Value, - balance: Value, - locked: Value, - burnchain_unlock_height: Value, +pub struct PoxPrintFields { + pub op_name: String, + pub stacker: Value, + pub balance: Value, + pub locked: Value, + pub burnchain_unlock_height: Value, } -#[cfg(test)] // This function takes in a StacksTransactionEvent for a print statement from a pox function that modifies // a stacker's state. It verifies that the values in the print statement are as expected. -fn check_pox_print_event( +pub fn check_pox_print_event( event: &StacksTransactionEvent, common_data: PoxPrintFields, op_data: HashMap<&str, Value>, diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs index 4d1ec308bb..18c04fed52 100644 --- a/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -7,8 +7,13 @@ use crate::burnchains::PoxConstants; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; +use crate::chainstate::stacks::boot::pox_2_tests::{ + check_pox_print_event, generate_pox_clarity_value, get_reward_cycle_total, + get_reward_set_entries_at, get_stacking_state_pox, get_stacking_state_pox_2, + get_stx_account_at, PoxPrintFields, +}; use crate::chainstate::stacks::boot::{ - BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_3_NAME, }; use crate::chainstate::stacks::db::{ MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, @@ -153,10 +158,10 @@ fn make_test_epochs_pox() -> (Vec, PoxConstants) { /// Alice: stacks via PoX v1 for 4 cycles. The third of these cycles occurs after /// the PoX v1 -> v2 transition, and so Alice gets "early unlocked". /// After the early unlock, Alice re-stacks in PoX v2 -/// Alice tries to stack again via PoX v1, which is allowed by the contract, -/// but forbidden by the VM (because PoX has transitioned to v2) /// Bob: stacks via PoX v2 for 6 cycles. He attempted to stack via PoX v1 as well, /// but is forbidden because he has already placed an account lock via PoX v2. +/// +/// After the PoX-3 contract is instantiated, Alice and Bob both stack via PoX v3. /// #[test] fn simple_pox_lockup_transition_pox_2() { @@ -532,3 +537,461 @@ fn simple_pox_lockup_transition_pox_2() { "Charlie tx0 should have committed okay" ); } + +#[test] +fn test_simple_pox_2_auto_unlock_ab() { + test_simple_pox_2_auto_unlock(true) +} + +#[test] +fn test_simple_pox_2_auto_unlock_ba() { + test_simple_pox_2_auto_unlock(false) +} + +/// In this test case, two Stackers, Alice and Bob stack and interact with the +/// PoX v1 contract and PoX v2 contract across the epoch transition. +/// +/// Alice: stacks via PoX v1 for 4 cycles. The third of these cycles occurs after +/// the PoX v1 -> v2 transition, and so Alice gets "early unlocked". +/// After the early unlock, Alice re-stacks in PoX v2 +/// Alice tries to stack again via PoX v1, which is allowed by the contract, +/// but forbidden by the VM (because PoX has transitioned to v2) +/// Bob: stacks via PoX v2 for 6 cycles. He attempted to stack via PoX v1 as well, +/// but is forbidden because he has already placed an account lock via PoX v2. +/// +/// Note: this test is symmetric over the order of alice and bob's stacking calls. +/// when alice goes first, the auto-unlock code doesn't need to perform a "move" +/// when bob goes first, the auto-unlock code does need to perform a "move" +fn test_simple_pox_2_auto_unlock(alice_first: bool) { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + eprintln!("First v2 cycle = {}", first_v2_cycle); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + &format!("pox_3_tests::simple_pox_auto_unlock_{}", alice_first), + 6002, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + + let mut coinbase_nonce = 0; + + // produce blocks until epoch 2.1 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 2 should now exist. + // Lets have Bob lock up for v2 + // this will lock for cycles 8, 9, 10, and 11 + // the first v2 cycle will be 8 + let tip = get_tip(peer.sortdb.as_ref()); + + let alice_lockup = make_pox_2_lockup( + &alice, + 0, + 1024 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip.block_height, + ); + + let bob_lockup = make_pox_2_lockup( + &bob, + 0, + 1 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 6, + tip.block_height, + ); + + let txs = if alice_first { + [alice_lockup, bob_lockup] + } else { + [bob_lockup, alice_lockup] + }; + let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice and bob + // for the pox-2 cycles + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 2); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&bob).bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + } + + // we'll produce blocks until the next reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE) + 1; + + // but first, check that bob has locked tokens at (height_target + 1) + let (bob_bal, _) = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ) + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let first_auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; + + // check that the "raw" reward sets for all cycles just contains entries for alice + // at the cycle start + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + } + + // now check that bob has no locked tokens at (height_target + 1) + let (bob_bal, _) = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ) + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); + assert_eq!(bob_bal.amount_locked(), 0); + + // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block + let (bob_bal, _) = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ) + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); + assert_eq!(bob_bal.amount_locked(), 0); + + // check that the total reward cycle amounts have decremented correctly + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + } + + // check that bob's stacking-state is gone and alice's stacking-state is correct + assert!( + get_stacking_state_pox_2( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal() + ) + .is_none(), + "Bob should not have a stacking-state entry" + ); + + let alice_state = get_stacking_state_pox_2( + &mut peer, + &latest_block, + &key_to_stacks_addr(&alice).to_account_principal(), + ) + .expect("Alice should have stacking-state entry") + .expect_tuple(); + let reward_indexes_str = format!("{}", alice_state.get("reward-set-indexes").unwrap()); + assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); + + // now, lets check behavior in Epochs 2.2-2.4, with pox-3 auto unlock tests + + // produce blocks until epoch 2.2 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[4].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + } + + // check that alice is unlocked now + peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + + // produce blocks until epoch 2.4 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // repeat the lockups as before, so we can test the pox-3 auto unlock behavior + let tip = get_tip(peer.sortdb.as_ref()); + + let alice_lockup = make_pox_3_lockup( + &alice, + 1, + 1024 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip.block_height, + ); + + let bob_lockup = make_pox_3_lockup( + &bob, + 1, + 1 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 6, + tip.block_height, + ); + + let txs = if alice_first { + [alice_lockup, bob_lockup] + } else { + [bob_lockup, alice_lockup] + }; + latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice and bob + // for the pox-3 cycles + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 2); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&bob).bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + } + + // we'll produce blocks until the next reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle) + 1; + let second_auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; + + // but first, check that bob has locked tokens at (height_target + 1) + let (bob_bal, _) = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ) + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // check that the "raw" reward sets for all cycles just contains entries for alice + // at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + } + + // now check that bob has no locked tokens at (height_target + 1) + let (bob_bal, _) = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ) + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); + assert_eq!(bob_bal.amount_locked(), 0); + + // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block + let (bob_bal, _) = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ) + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); + assert_eq!(bob_bal.amount_locked(), 0); + + // check that the total reward cycle amounts have decremented correctly + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + } + + // check that bob's stacking-state is gone and alice's stacking-state is correct + assert!( + get_stacking_state_pox( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + POX_3_NAME, + ) + .is_none(), + "Bob should not have a stacking-state entry" + ); + + let alice_state = get_stacking_state_pox( + &mut peer, + &latest_block, + &key_to_stacks_addr(&alice).to_account_principal(), + POX_3_NAME, + ) + .expect("Alice should have stacking-state entry") + .expect_tuple(); + let reward_indexes_str = format!("{}", alice_state.get("reward-set-indexes").unwrap()); + assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut coinbase_txs = vec![]; + + for b in blocks.into_iter() { + for (i, r) in b.receipts.into_iter().enumerate() { + if i == 0 { + coinbase_txs.push(r); + continue; + } + match r.transaction { + TransactionOrigin::Stacks(ref t) => { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } + } + _ => {} + } + } + } + + assert_eq!(alice_txs.len(), 2); + assert_eq!(bob_txs.len(), 2); + + // TX0 -> Bob's initial lockup in PoX 2 + assert!( + match bob_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Bob tx0 should have committed okay" + ); + + assert_eq!(coinbase_txs.len(), 37); + + info!( + "Expected first auto-unlock coinbase index: {}", + first_auto_unlock_coinbase + ); + + // Check that the event produced by "handle-unlock" has a well-formed print event + // and that this event is included as part of the coinbase tx + for unlock_coinbase_index in [first_auto_unlock_coinbase, second_auto_unlock_coinbase] { + // expect the unlock to occur 1 block after the handle-unlock method was invoked. + let expected_unlock_height = unlock_coinbase_index + EMPTY_SORTITIONS + 1; + let expected_cycle = pox_constants + .block_height_to_reward_cycle(0, expected_unlock_height) + .unwrap(); + + let auto_unlock_tx = coinbase_txs[unlock_coinbase_index as usize].events[0].clone(); + let pox_addr_val = generate_pox_clarity_value("60c59ab11f7063ef44c16d3dc856f76bbb915eba"); + let auto_unlock_op_data = HashMap::from([ + ("first-cycle-locked", Value::UInt(expected_cycle.into())), + ("first-unlocked-cycle", Value::UInt(expected_cycle.into())), + ("pox-addr", pox_addr_val), + ]); + let common_data = PoxPrintFields { + op_name: "handle-unlock".to_string(), + stacker: Value::Principal( + StacksAddress::from_string("ST1GCB6NH3XR67VT4R5PKVJ2PYXNVQ4AYQATXNP4P") + .unwrap() + .to_account_principal(), + ), + balance: Value::UInt(10230000000000), + locked: Value::UInt(10000000000), + burnchain_unlock_height: Value::UInt(expected_unlock_height.into()), + }; + check_pox_print_event(&auto_unlock_tx, common_data, auto_unlock_op_data); + } +} diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 40da3decc4..2bc21b30e8 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -5605,38 +5605,67 @@ impl StacksChainState { // Do not try to handle auto-unlocks on pox_reward_cycle 0 // This cannot even occur in the mainchain, because 2.1 starts much // after the 1st reward cycle, however, this could come up in mocknets or regtest. - if pox_reward_cycle > 1 { - // do not try to handle auto-unlocks before the reward set has been calculated (at block = 0 of cycle) - // or written to the sortition db (at block = 1 of cycle) - if Burnchain::is_before_reward_cycle( - burn_dbconn.get_burn_start_height().into(), - burn_tip_height, - burn_dbconn.get_pox_reward_cycle_length().into(), - ) { - debug!("check_and_handle_reward_start: before reward cycle"); - return Ok(vec![]); - } - let handled = clarity_tx.with_clarity_db_readonly(|clarity_db| { - Self::handled_pox_cycle_start(clarity_db, pox_reward_cycle) - }); - debug!("check_and_handle_reward_start: handled = {}", handled); + if pox_reward_cycle <= 1 { + return Ok(vec![]); + } - if !handled { - let pox_start_cycle_info = sortition_dbconn.get_pox_start_cycle_info( - parent_sortition_id, - chain_tip.burn_header_height.into(), - pox_reward_cycle, - )?; - debug!("check_and_handle_reward_start: got pox reward cycle info"); - let events = clarity_tx.block.as_free_transaction(|clarity_tx| { - Self::handle_pox_cycle_start(clarity_tx, pox_reward_cycle, pox_start_cycle_info) - })?; - debug!("check_and_handle_reward_start: handled pox cycle start"); - return Ok(events); - } + // do not try to handle auto-unlocks before the reward set has been calculated (at block = 0 of cycle) + // or written to the sortition db (at block = 1 of cycle) + if Burnchain::is_before_reward_cycle( + burn_dbconn.get_burn_start_height().into(), + burn_tip_height, + burn_dbconn.get_pox_reward_cycle_length().into(), + ) { + debug!("check_and_handle_reward_start: before reward cycle"); + return Ok(vec![]); } + let handled = clarity_tx.with_clarity_db_readonly(|clarity_db| { + Self::handled_pox_cycle_start(clarity_db, pox_reward_cycle) + }); + debug!("check_and_handle_reward_start: handled = {}", handled); - Ok(vec![]) + if handled { + // already handled this cycle, don't need to do anything + return Ok(vec![]); + } + + let active_epoch = clarity_tx.get_epoch(); + + let pox_start_cycle_info = sortition_dbconn.get_pox_start_cycle_info( + parent_sortition_id, + chain_tip.burn_header_height.into(), + pox_reward_cycle, + )?; + debug!("check_and_handle_reward_start: got pox reward cycle info"); + let events = clarity_tx.block.as_free_transaction(|clarity_tx| { + match active_epoch { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 => { + // prior to epoch-2.4, the semantics of this method were such that any epoch + // would invoke the `handle_pox_cycle_start_pox_2()` method. + // however, only epoch-2.1 ever actually *does* invoke this method, + // so, with some careful testing, this branch could perhaps be simplified + // such that only Epoch21 matches, and all the other ones _panic_. + // For now, I think it's better to preserve the exact prior semantics. + Self::handle_pox_cycle_start_pox_2( + clarity_tx, + pox_reward_cycle, + pox_start_cycle_info, + ) + } + StacksEpochId::Epoch24 => Self::handle_pox_cycle_start_pox_3( + clarity_tx, + pox_reward_cycle, + pox_start_cycle_info, + ), + } + })?; + debug!("check_and_handle_reward_start: handled pox cycle start"); + return Ok(events); } /// Called in both follower and miner block assembly paths. From 9672c33f1b9bec4e010175cd5a4362d050b9a7e2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 4 May 2023 17:34:38 -0500 Subject: [PATCH 119/158] fix pox-2 tests for the updated invariant checks --- src/chainstate/stacks/boot/pox_2_tests.rs | 81 ++++++++++++++++------- 1 file changed, 57 insertions(+), 24 deletions(-) diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index c6b8c0e97c..4662721ae4 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -7,7 +7,8 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; use crate::chainstate::stacks::boot::{ - BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_3_NAME, + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_2_NAME, + POX_3_NAME, }; use crate::chainstate::stacks::db::{ MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, @@ -298,6 +299,7 @@ pub fn check_stacking_state_invariants( tip: &StacksBlockId, stacker: &PrincipalData, expect_indexes: bool, + active_pox_contract: &str, ) -> StackingStateCheckData { let account_state = with_clarity_db_ro(peer, tip, |db| { db.get_stx_balance_snapshot(stacker) @@ -312,14 +314,11 @@ pub fn check_stacking_state_invariants( .unwrap() .burn_header_height; - let active_pox_contract = peer - .config - .burnchain - .pox_constants - .active_pox_contract(tip_burn_height.into()); - let stacking_state_entry = get_stacking_state_pox(peer, tip, stacker, active_pox_contract) - .expect("Invariant violated: reward-cycle entry has stacker field set, but not present in stacker-state") + .expect(&format!( + "Invariant violated: reward-cycle entry has stacker field set, but not present in stacker-state (pox_contract = {})", + active_pox_contract, + )) .expect_tuple(); let first_cycle = stacking_state_entry .get("first-reward-cycle") @@ -451,6 +450,12 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c .unwrap() .unwrap(); + let active_pox_contract = peer.config.burnchain.pox_constants.active_pox_contract( + peer.config + .burnchain + .reward_cycle_to_block_height(cycle_number), + ); + if cycle_start_epoch.epoch_id == StacksEpochId::Epoch22 || cycle_start_epoch.epoch_id == StacksEpochId::Epoch23 { @@ -461,15 +466,7 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c return; } - if cycle_start_epoch.epoch_id == StacksEpochId::Epoch24 - && cycle_start - <= peer - .config - .burnchain - .pox_constants - .pox_3_activation_height - .into() - { + if cycle_start_epoch.epoch_id == StacksEpochId::Epoch24 && active_pox_contract != POX_3_NAME { info!( "Skipping validation of reward set that started in Epoch24, but its cycle starts before pox-3 activation"; "cycle" => cycle_number, @@ -527,7 +524,7 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c pox_addr, cycle_indexes, .. - } = check_stacking_state_invariants(peer, tip, stacker, true); + } = check_stacking_state_invariants(peer, tip, stacker, true, active_pox_contract); assert_eq!(&entry.reward_address, &pox_addr, "Invariant violated: reward-cycle entry has a different PoX addr than in stacker-state"); assert_eq!( @@ -3000,12 +2997,24 @@ fn test_delegate_extend_transition_pox_2() { first_cycle: alice_first_cycle, lock_period: alice_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &alice_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &alice_principal, + false, + POX_2_NAME, + ); let StackingStateCheckData { first_cycle: bob_first_cycle, lock_period: bob_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &bob_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &bob_principal, + false, + POX_2_NAME, + ); assert_eq!( alice_first_cycle as u64, first_v2_cycle, @@ -3053,12 +3062,24 @@ fn test_delegate_extend_transition_pox_2() { first_cycle: alice_first_cycle, lock_period: alice_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &alice_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &alice_principal, + false, + POX_2_NAME, + ); let StackingStateCheckData { first_cycle: bob_first_cycle, lock_period: bob_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &bob_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &bob_principal, + false, + POX_2_NAME, + ); assert_eq!( alice_first_cycle as u64, first_v2_cycle, @@ -3109,12 +3130,24 @@ fn test_delegate_extend_transition_pox_2() { first_cycle: alice_first_cycle, lock_period: alice_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &alice_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &alice_principal, + false, + POX_2_NAME, + ); let StackingStateCheckData { first_cycle: bob_first_cycle, lock_period: bob_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &bob_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &bob_principal, + false, + POX_2_NAME, + ); assert_eq!( alice_first_cycle as u64, first_v2_cycle, From cf53475aa242448cdad150be9f6c12a36856646d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 5 May 2023 09:46:32 -0500 Subject: [PATCH 120/158] fix bug in pox-3-increase handler, add test for delegate-stack-increase --- clarity/src/vm/database/structures.rs | 2 +- src/chainstate/stacks/boot/mod.rs | 17 + src/chainstate/stacks/boot/pox_2_tests.rs | 18 +- src/chainstate/stacks/boot/pox_3_tests.rs | 673 ++++++++++++++++++++-- 4 files changed, 667 insertions(+), 43 deletions(-) diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 8eba16734a..9216a49681 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -666,7 +666,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { .checked_sub(new_total_locked) .expect("STX underflow: more is locked than total balance"); - self.balance = STXBalance::LockedPoxTwo { + self.balance = STXBalance::LockedPoxThree { amount_unlocked, amount_locked: new_total_locked, unlock_height: self.balance.unlock_height(), diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index d72003b4d0..3e6c04711b 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -1597,6 +1597,23 @@ pub mod test { make_tx(key, nonce, 0, payload) } + pub fn make_pox_3_contract_call( + key: &StacksPrivateKey, + nonce: u64, + function_name: &str, + args: Vec, + ) -> StacksTransaction { + let payload = TransactionPayload::new_contract_call( + boot_code_test_addr(), + POX_3_NAME, + function_name, + args, + ) + .unwrap(); + + make_tx(key, nonce, 0, payload) + } + // make a stream of invalid pox-lockup transactions fn make_invalid_pox_lockups(key: &StacksPrivateKey, mut nonce: u64) -> Vec { let mut ret = vec![]; diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 4662721ae4..882e018c66 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -98,13 +98,16 @@ pub fn get_reward_set_entries_index_order_at( }) } -/// Get the STXBalance for `account` at the given chaintip +/// Get the canonicalized STXBalance for `account` at the given chaintip pub fn get_stx_account_at( peer: &mut TestPeer, tip: &StacksBlockId, account: &PrincipalData, ) -> STXBalance { - with_clarity_db_ro(peer, tip, |db| db.get_account_stx_balance(account)) + with_clarity_db_ro(peer, tip, |db| { + db.get_stx_balance_snapshot(account) + .canonical_balance_repr() + }) } /// get the stacking-state entry for an account at the chaintip @@ -585,6 +588,7 @@ pub fn get_partial_stacked( pox_addr: &Value, cycle_number: u64, sender: &PrincipalData, + pox_contract: &str, ) -> u128 { with_clarity_db_ro(peer, tip, |db| { let key = TupleData::from_data(vec![ @@ -595,7 +599,7 @@ pub fn get_partial_stacked( .unwrap() .into(); db.fetch_entry_unknown_descriptor( - &boot_code_id(boot::POX_2_NAME, false), + &boot_code_id(pox_contract, false), "partial-stacked-by-cycle", &key, ) @@ -1501,6 +1505,7 @@ fn delegate_stack_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } @@ -1525,6 +1530,7 @@ fn delegate_stack_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } @@ -1606,6 +1612,7 @@ fn delegate_stack_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, alice_first_lock_amount); } @@ -1617,6 +1624,7 @@ fn delegate_stack_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, alice_delegation_amount,); } @@ -4530,6 +4538,7 @@ fn stack_aggregation_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } @@ -4556,6 +4565,7 @@ fn stack_aggregation_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } @@ -4629,6 +4639,7 @@ fn stack_aggregation_increase() { &bob_pox_addr, cur_reward_cycle + 1, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 1); @@ -4640,6 +4651,7 @@ fn stack_aggregation_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, alice_delegation_amount); } diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs index 18c04fed52..6bc6f2cace 100644 --- a/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -8,12 +8,13 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; use crate::chainstate::stacks::boot::pox_2_tests::{ - check_pox_print_event, generate_pox_clarity_value, get_reward_cycle_total, + check_pox_print_event, generate_pox_clarity_value, get_partial_stacked, get_reward_cycle_total, get_reward_set_entries_at, get_stacking_state_pox, get_stacking_state_pox_2, get_stx_account_at, PoxPrintFields, }; use crate::chainstate::stacks::boot::{ - BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_3_NAME, + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_2_NAME, + POX_3_NAME, }; use crate::chainstate::stacks::db::{ MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, @@ -595,7 +596,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, &format!("pox_3_tests::simple_pox_auto_unlock_{}", alice_first), - 6002, + 7102, Some(epochs.clone()), Some(&observer), ); @@ -671,15 +672,10 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE) + 1; // but first, check that bob has locked tokens at (height_target + 1) - let (bob_bal, _) = get_stx_account_at( + let bob_bal = get_stx_account_at( &mut peer, &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), - ) - .canonical_repr_at_block( - height_target + 1, - burnchain.pox_constants.v1_unlock_height, - burnchain.pox_constants.v2_unlock_height, ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); @@ -702,28 +698,18 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { } // now check that bob has no locked tokens at (height_target + 1) - let (bob_bal, _) = get_stx_account_at( + let bob_bal = get_stx_account_at( &mut peer, &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), - ) - .canonical_repr_at_block( - height_target + 1, - burnchain.pox_constants.v1_unlock_height, - burnchain.pox_constants.v2_unlock_height, ); assert_eq!(bob_bal.amount_locked(), 0); // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block - let (bob_bal, _) = get_stx_account_at( + let bob_bal = get_stx_account_at( &mut peer, &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), - ) - .canonical_repr_at_block( - height_target + 1, - burnchain.pox_constants.v1_unlock_height, - burnchain.pox_constants.v2_unlock_height, ); assert_eq!(bob_bal.amount_locked(), 0); @@ -831,15 +817,10 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { let second_auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; // but first, check that bob has locked tokens at (height_target + 1) - let (bob_bal, _) = get_stx_account_at( + let bob_bal = get_stx_account_at( &mut peer, &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), - ) - .canonical_repr_at_block( - height_target + 1, - burnchain.pox_constants.v1_unlock_height, - burnchain.pox_constants.v2_unlock_height, ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); @@ -860,28 +841,18 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { } // now check that bob has no locked tokens at (height_target + 1) - let (bob_bal, _) = get_stx_account_at( + let bob_bal = get_stx_account_at( &mut peer, &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), - ) - .canonical_repr_at_block( - height_target + 1, - burnchain.pox_constants.v1_unlock_height, - burnchain.pox_constants.v2_unlock_height, ); assert_eq!(bob_bal.amount_locked(), 0); // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block - let (bob_bal, _) = get_stx_account_at( + let bob_bal = get_stx_account_at( &mut peer, &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), - ) - .canonical_repr_at_block( - height_target + 1, - burnchain.pox_constants.v1_unlock_height, - burnchain.pox_constants.v2_unlock_height, ); assert_eq!(bob_bal.amount_locked(), 0); @@ -995,3 +966,627 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { check_pox_print_event(&auto_unlock_tx, common_data, auto_unlock_op_data); } } + +/// In this test case, Alice delegates to Bob. +/// Bob stacks Alice's funds via PoX v2 for 6 cycles. In the third cycle, +/// Bob increases Alice's stacking amount. +/// +#[test] +fn delegate_stack_increase() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + &format!("pox_3_delegate_stack_increase"), + 7103, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let num_blocks = 35; + + let alice = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let alice_principal = PrincipalData::from(alice_address.clone()); + let bob = keys.pop().unwrap(); + let bob_address = key_to_stacks_addr(&bob); + let bob_principal = PrincipalData::from(bob_address.clone()); + let bob_pox_addr = make_pox_addr(AddressHashMode::SerializeP2PKH, bob_address.bytes.clone()); + let mut alice_nonce = 0; + let mut bob_nonce = 0; + + let alice_delegation_amount = 1023 * POX_THRESHOLD_STEPS_USTX; + let alice_first_lock_amount = 512 * POX_THRESHOLD_STEPS_USTX; + + let mut coinbase_nonce = 0; + + // produce blocks until epoch 2.1 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 2 should now exist. + let tip = get_tip(peer.sortdb.as_ref()); + + // submit delegation tx + let alice_delegation_1 = make_pox_2_contract_call( + &alice, + alice_nonce, + "delegate-stx", + vec![ + Value::UInt(alice_delegation_amount), + bob_principal.clone().into(), + Value::none(), + Value::none(), + ], + ); + + let alice_delegation_pox_2_nonce = alice_nonce; + alice_nonce += 1; + + let delegate_stack_tx = make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-stx", + vec![ + alice_principal.clone().into(), + Value::UInt(alice_first_lock_amount), + bob_pox_addr.clone(), + Value::UInt(tip.block_height as u128), + Value::UInt(6), + ], + ); + + bob_nonce += 1; + + let mut latest_block = peer.tenure_with_txs( + &[alice_delegation_1, delegate_stack_tx], + &mut coinbase_nonce, + ); + + let expected_pox_2_unlock_ht = + burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 6) - 1; + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); + + // check that the partial stacking state contains entries for bob + for cycle_number in EXPECTED_FIRST_V2_CYCLE..(EXPECTED_FIRST_V2_CYCLE + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_2_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // we'll produce blocks until the 1st reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 1) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + + // check that the partial stacking state contains entries for bob + for cycle_number in EXPECTED_FIRST_V2_CYCLE..(EXPECTED_FIRST_V2_CYCLE + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_2_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + let mut txs_to_submit = vec![]; + + let fail_direct_increase_delegation = alice_nonce; + txs_to_submit.push(make_pox_2_contract_call( + &alice, + alice_nonce, + "stack-increase", + vec![Value::UInt(1)], + )); + alice_nonce += 1; + + let fail_delegate_too_much_locked = bob_nonce; + txs_to_submit.push(make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_delegation_amount - alice_first_lock_amount + 1), + ], + )); + bob_nonce += 1; + + let fail_invalid_amount = bob_nonce; + txs_to_submit.push(make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(0), + ], + )); + bob_nonce += 1; + + let fail_insufficient_funds = bob_nonce; + txs_to_submit.push(make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_bal.amount_unlocked() + 1), + ], + )); + bob_nonce += 1; + + txs_to_submit.push(make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_delegation_amount - alice_first_lock_amount), + ], + )); + let bob_delegate_increase_pox_2_nonce = bob_nonce; + bob_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_delegation_amount); + assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); + + // check that the partial stacking state contains entries for bob and they've incremented correctly + for cycle_number in (EXPECTED_FIRST_V2_CYCLE)..(EXPECTED_FIRST_V2_CYCLE + 2) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_2_NAME, + ); + assert_eq!(partial_stacked, alice_first_lock_amount); + } + + for cycle_number in (EXPECTED_FIRST_V2_CYCLE + 2)..(EXPECTED_FIRST_V2_CYCLE + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_2_NAME, + ); + assert_eq!(partial_stacked, alice_delegation_amount,); + } + + // okay, now let's progress through epochs 2.2-2.4, and perform the delegation tests + // on pox-3 + + // roll the chain forward until just before Epoch-2.2 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should always include this half lockup + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + alice_delegation_amount + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &bob_principal).amount_locked(), + 0, + ); + } + + // this block is mined in epoch-2.2 + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + alice_delegation_amount + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &bob_principal).amount_locked(), + 0, + ); + // this block should unlock alice's balance + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + 0, + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &bob_principal).amount_locked(), + 0, + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_unlocked(), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &bob_principal).amount_unlocked(), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + + // Roll to Epoch-2.4 and re-do the above tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + + // submit delegation tx + let alice_delegation_1 = make_pox_3_contract_call( + &alice, + alice_nonce, + "delegate-stx", + vec![ + Value::UInt(alice_delegation_amount), + bob_principal.clone().into(), + Value::none(), + Value::none(), + ], + ); + let alice_delegation_pox_3_nonce = alice_nonce; + alice_nonce += 1; + + let delegate_stack_tx = make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-stx", + vec![ + alice_principal.clone().into(), + Value::UInt(alice_first_lock_amount), + bob_pox_addr.clone(), + Value::UInt(tip.block_height as u128), + Value::UInt(6), + ], + ); + + bob_nonce += 1; + + latest_block = peer.tenure_with_txs( + &[alice_delegation_1, delegate_stack_tx], + &mut coinbase_nonce, + ); + + let expected_pox_3_unlock_ht = burnchain.reward_cycle_to_block_height(first_v3_cycle + 6) - 1; + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + assert_eq!(alice_bal.unlock_height(), expected_pox_3_unlock_ht); + + // check that the partial stacking state contains entries for bob + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // we'll produce blocks until the 3rd reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + let bob_bal = get_stx_account_at(&mut peer, &latest_block, &bob_principal); + assert_eq!(bob_bal.amount_locked(), 0); + + // check that the partial stacking state contains entries for bob + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + let mut txs_to_submit = vec![]; + + let pox_3_fail_direct_increase_delegation = alice_nonce; + txs_to_submit.push(make_pox_3_contract_call( + &alice, + alice_nonce, + "stack-increase", + vec![Value::UInt(1)], + )); + alice_nonce += 1; + + let pox_3_fail_delegate_too_much_locked = bob_nonce; + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_delegation_amount - alice_first_lock_amount + 1), + ], + )); + bob_nonce += 1; + + let pox_3_fail_invalid_amount = bob_nonce; + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(0), + ], + )); + bob_nonce += 1; + + let pox_3_fail_insufficient_funds = bob_nonce; + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_bal.amount_unlocked() + 1), + ], + )); + bob_nonce += 1; + + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_delegation_amount - alice_first_lock_amount), + ], + )); + let bob_delegate_increase_pox_3_nonce = bob_nonce; + bob_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + alice_delegation_amount + ); + + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).unlock_height(), + expected_pox_3_unlock_ht, + ); + + // check that the partial stacking state contains entries for bob and they've incremented correctly + for cycle_number in first_v3_cycle..(first_v3_cycle + 4) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!( + partial_stacked, + alice_first_lock_amount, + "Unexpected partially stacked amount in cycle: {} = {} + {}", + cycle_number, + first_v3_cycle, + first_v3_cycle - cycle_number, + ); + } + + for cycle_number in (first_v3_cycle + 4)..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, alice_delegation_amount); + } + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len() as u64, 4); + assert_eq!(bob_txs.len() as u64, 10); + + // transaction should fail because Alice cannot increase her own stacking amount while delegating + assert_eq!( + &alice_txs[&fail_direct_increase_delegation] + .result + .to_string(), + "(err 20)" + ); + + // transaction should fail because Alice did not delegate enough funds to Bob + assert_eq!( + &bob_txs[&fail_delegate_too_much_locked].result.to_string(), + "(err 22)" + ); + + // transaction should fail because Alice doesn't have enough funds + assert_eq!( + &bob_txs[&fail_insufficient_funds].result.to_string(), + "(err 1)" + ); + + // transaction should fail because the amount supplied is invalid (i.e., 0) + assert_eq!( + &bob_txs[&fail_invalid_amount].result.to_string(), + "(err 18)" + ); + + assert_eq!( + &alice_txs[&pox_3_fail_direct_increase_delegation] + .result + .to_string(), + "(err 30)" + ); + + // transaction should fail because Alice did not delegate enough funds to Bob + assert_eq!( + &bob_txs[&pox_3_fail_delegate_too_much_locked] + .result + .to_string(), + "(err 22)" + ); + + // transaction should fail because Alice doesn't have enough funds + assert_eq!( + &bob_txs[&pox_3_fail_insufficient_funds].result.to_string(), + "(err 1)" + ); + + // transaction should fail because the amount supplied is invalid (i.e., 0) + assert_eq!( + &bob_txs[&pox_3_fail_invalid_amount].result.to_string(), + "(err 18)" + ); + + for delegation_nonce in [alice_delegation_pox_2_nonce, alice_delegation_pox_3_nonce] { + let delegate_stx_tx = &alice_txs.get(&delegation_nonce).unwrap().clone().events[0]; + let delegate_stx_op_data = HashMap::from([ + ("pox-addr", Value::none()), + ("amount-ustx", Value::UInt(10230000000000)), + ("unlock-burn-height", Value::none()), + ( + "delegate-to", + Value::Principal( + StacksAddress::from_string("ST1GCB6NH3XR67VT4R5PKVJ2PYXNVQ4AYQATXNP4P") + .unwrap() + .to_account_principal(), + ), + ), + ]); + let common_data = PoxPrintFields { + op_name: "delegate-stx".to_string(), + stacker: Value::Principal( + StacksAddress::from_string("ST2Q1B4S2DY2Y96KYNZTVCCZZD1V9AGWCS5MFXM4C") + .unwrap() + .to_account_principal(), + ), + balance: Value::UInt(10240000000000), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + check_pox_print_event(delegate_stx_tx, common_data, delegate_stx_op_data); + } + + // Check that the call to `delegate-stack-increase` has a well-formed print event. + for (unlock_height, del_increase_nonce) in [ + (expected_pox_2_unlock_ht, bob_delegate_increase_pox_2_nonce), + (expected_pox_3_unlock_ht, bob_delegate_increase_pox_3_nonce), + ] { + let delegate_stack_increase_tx = + &bob_txs.get(&del_increase_nonce).unwrap().clone().events[0]; + let pox_addr_val = generate_pox_clarity_value("60c59ab11f7063ef44c16d3dc856f76bbb915eba"); + let delegate_op_data = HashMap::from([ + ("pox-addr", pox_addr_val), + ("increase-by", Value::UInt(5110000000000)), + ("total-locked", Value::UInt(10230000000000)), + ( + "delegator", + Value::Principal( + StacksAddress::from_string("ST1GCB6NH3XR67VT4R5PKVJ2PYXNVQ4AYQATXNP4P") + .unwrap() + .to_account_principal(), + ), + ), + ]); + let common_data = PoxPrintFields { + op_name: "delegate-stack-increase".to_string(), + stacker: Value::Principal( + StacksAddress::from_string("ST2Q1B4S2DY2Y96KYNZTVCCZZD1V9AGWCS5MFXM4C") + .unwrap() + .to_account_principal(), + ), + balance: Value::UInt(5120000000000), + locked: Value::UInt(5120000000000), + burnchain_unlock_height: Value::UInt(unlock_height.into()), + }; + check_pox_print_event(delegate_stack_increase_tx, common_data, delegate_op_data); + } +} From 1dbbc4ecc8cf03f73a38442e86395e8e1fe1c2ae Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 5 May 2023 10:27:19 -0500 Subject: [PATCH 121/158] test: add pox_3_tests::stack_increase --- src/chainstate/stacks/boot/pox_3_tests.rs | 428 ++++++++++++++++++++++ 1 file changed, 428 insertions(+) diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs index 6bc6f2cace..bf5a142c7c 100644 --- a/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -1590,3 +1590,431 @@ fn delegate_stack_increase() { check_pox_print_event(delegate_stack_increase_tx, common_data, delegate_op_data); } } + +#[test] +fn stack_increase() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + &format!("pox_3_stack_increase"), + 7105, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let num_blocks = 35; + + let alice = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let alice_principal = PrincipalData::from(alice_address.clone()); + let mut alice_nonce = 0; + + let mut coinbase_nonce = 0; + + let first_lockup_amt = 512 * POX_THRESHOLD_STEPS_USTX; + let total_balance = 1024 * POX_THRESHOLD_STEPS_USTX; + let increase_amt = total_balance - first_lockup_amt; + + // produce blocks until epoch 2.1 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 2 should now exist. + let tip = get_tip(peer.sortdb.as_ref()); + + // submit an increase: this should fail, because Alice is not yet locked + let fail_no_lock_tx = alice_nonce; + let alice_increase = make_pox_2_increase(&alice, alice_nonce, increase_amt); + alice_nonce += 1; + + let alice_lockup = make_pox_2_lockup( + &alice, + alice_nonce, + first_lockup_amt, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip.block_height, + ); + alice_nonce += 1; + + let mut latest_block = + peer.tenure_with_txs(&[alice_increase, alice_lockup], &mut coinbase_nonce); + + let expected_pox_2_unlock_ht = + burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 6) - 1; + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), first_lockup_amt); + assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); + assert_eq!(alice_bal.get_total_balance(), total_balance,); + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + // we'll produce blocks until the 1st reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 1) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // check that the "raw" reward sets for all cycles contains entries for alice + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + let mut txs_to_submit = vec![]; + let fail_bad_amount = alice_nonce; + txs_to_submit.push(make_pox_2_increase(&alice, alice_nonce, 0)); + alice_nonce += 1; + + // this stack-increase tx should work + let pox_2_success_increase = alice_nonce; + txs_to_submit.push(make_pox_2_increase(&alice, alice_nonce, increase_amt)); + alice_nonce += 1; + + // increase by an amount we don't have! + let fail_not_enough_funds = alice_nonce; + txs_to_submit.push(make_pox_2_increase(&alice, alice_nonce, 1)); + alice_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), first_lockup_amt + increase_amt,); + assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); + assert_eq!(alice_bal.get_total_balance(), total_balance,); + + // check that the total reward cycle amounts have incremented correctly + for cycle_number in first_v2_cycle..(first_v2_cycle + 2) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + first_lockup_amt, + ); + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + assert!( + first_v2_cycle + 2 < first_v3_cycle, + "Make sure that we can actually test a stack-increase in pox-2 before pox-3 activates" + ); + + for cycle_number in (first_v2_cycle + 2)..first_v3_cycle { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + first_lockup_amt + increase_amt, + ); + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[0].amount_stacked, + first_lockup_amt + increase_amt, + ); + } + + // Roll to Epoch-2.4 and re-do the above tests + // okay, now let's progress through epochs 2.2-2.4, and perform the delegation tests + // on pox-3 + + // roll the chain forward until just before Epoch-2.2 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should always include this half lockup + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + first_lockup_amt + increase_amt, + ); + } + + // this block is mined in epoch-2.2 + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + first_lockup_amt + increase_amt, + ); + + // this block should unlock alice's balance + + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + 0, + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_unlocked(), + total_balance, + ); + + // Roll to Epoch-2.4 and re-do the above stack-increase tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 2 should now exist. + let tip = get_tip(peer.sortdb.as_ref()); + + // submit an increase: this should fail, because Alice is not yet locked + let pox_3_fail_no_lock_tx = alice_nonce; + let alice_increase = make_pox_3_contract_call( + &alice, + alice_nonce, + "stack-increase", + vec![Value::UInt(increase_amt)], + ); + alice_nonce += 1; + + let alice_lockup = make_pox_3_lockup( + &alice, + alice_nonce, + first_lockup_amt, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip.block_height, + ); + alice_nonce += 1; + + let mut latest_block = + peer.tenure_with_txs(&[alice_increase, alice_lockup], &mut coinbase_nonce); + + let expected_pox_3_unlock_ht = burnchain.reward_cycle_to_block_height(first_v3_cycle + 6) - 1; + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), first_lockup_amt); + assert_eq!(alice_bal.unlock_height(), expected_pox_3_unlock_ht); + assert_eq!(alice_bal.get_total_balance(), total_balance,); + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + // we'll produce blocks until the 3rd reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + let mut txs_to_submit = vec![]; + let pox_3_fail_bad_amount = alice_nonce; + let bad_amount_tx = + make_pox_3_contract_call(&alice, alice_nonce, "stack-increase", vec![Value::UInt(0)]); + txs_to_submit.push(bad_amount_tx); + alice_nonce += 1; + + // this stack-increase tx should work + let pox_3_success_increase = alice_nonce; + let good_amount_tx = make_pox_3_contract_call( + &alice, + alice_nonce, + "stack-increase", + vec![Value::UInt(increase_amt)], + ); + txs_to_submit.push(good_amount_tx); + alice_nonce += 1; + + // increase by an amount we don't have! + let pox_3_fail_not_enough_funds = alice_nonce; + let not_enough_tx = + make_pox_3_contract_call(&alice, alice_nonce, "stack-increase", vec![Value::UInt(1)]); + txs_to_submit.push(not_enough_tx); + alice_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), first_lockup_amt + increase_amt,); + assert_eq!(alice_bal.unlock_height(), expected_pox_3_unlock_ht); + assert_eq!(alice_bal.get_total_balance(), total_balance,); + + // check that the total reward cycle amounts have incremented correctly + for cycle_number in first_v3_cycle..(first_v3_cycle + 4) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + first_lockup_amt, + ); + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + for cycle_number in (first_v3_cycle + 4)..(first_v3_cycle + 6) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + first_lockup_amt + increase_amt, + ); + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[0].amount_stacked, + first_lockup_amt + increase_amt, + ); + } + + // now let's check some tx receipts + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len() as u64, alice_nonce); + + // transaction should fail because lock isn't applied + assert_eq!(&alice_txs[&fail_no_lock_tx].result.to_string(), "(err 27)"); + + // transaction should fail because Alice doesn't have enough funds + assert_eq!( + &alice_txs[&fail_not_enough_funds].result.to_string(), + "(err 1)" + ); + + // transaction should fail because the amount supplied is invalid (i.e., 0) + assert_eq!(&alice_txs[&fail_bad_amount].result.to_string(), "(err 18)"); + + // transaction should fail because lock isn't applied + assert_eq!( + &alice_txs[&pox_3_fail_no_lock_tx].result.to_string(), + "(err 27)" + ); + + // transaction should fail because Alice doesn't have enough funds + assert_eq!( + &alice_txs[&pox_3_fail_not_enough_funds].result.to_string(), + "(err 1)" + ); + + // transaction should fail because the amount supplied is invalid (i.e., 0) + assert_eq!( + &alice_txs[&pox_3_fail_bad_amount].result.to_string(), + "(err 18)" + ); + + // Check that the call to `stack-increase` has a well-formed print event. + for (increase_nonce, unlock_height) in [ + (pox_2_success_increase, expected_pox_2_unlock_ht), + (pox_3_success_increase, expected_pox_3_unlock_ht), + ] { + let stack_increase_tx = &alice_txs.get(&increase_nonce).unwrap().clone().events[0]; + let pox_addr_val = generate_pox_clarity_value("ae1593226f85e49a7eaff5b633ff687695438cc9"); + let stack_op_data = HashMap::from([ + ("increase-by", Value::UInt(5120000000000)), + ("total-locked", Value::UInt(10240000000000)), + ("pox-addr", pox_addr_val), + ]); + let common_data = PoxPrintFields { + op_name: "stack-increase".to_string(), + stacker: Value::Principal( + StacksAddress::from_string("ST2Q1B4S2DY2Y96KYNZTVCCZZD1V9AGWCS5MFXM4C") + .unwrap() + .to_account_principal(), + ), + balance: Value::UInt(5120000000000), + locked: Value::UInt(5120000000000), + burnchain_unlock_height: Value::UInt(unlock_height.into()), + }; + check_pox_print_event(stack_increase_tx, common_data, stack_op_data); + } +} From e29e7187def26390384878b1bed4af472ee7a09e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 5 May 2023 12:22:32 -0500 Subject: [PATCH 122/158] test: more porting of pox-2 tests to pox-3 --- src/chainstate/stacks/boot/mod.rs | 18 + src/chainstate/stacks/boot/pox_2_tests.rs | 8 +- src/chainstate/stacks/boot/pox_3_tests.rs | 1015 ++++++++++++++++++++- 3 files changed, 1034 insertions(+), 7 deletions(-) diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index 3e6c04711b..4219c7f093 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -1544,6 +1544,24 @@ pub mod test { make_tx(key, nonce, 0, payload) } + pub fn make_pox_3_extend( + key: &StacksPrivateKey, + nonce: u64, + addr: PoxAddress, + lock_period: u128, + ) -> StacksTransaction { + let addr_tuple = Value::Tuple(addr.as_clarity_tuple().unwrap()); + let payload = TransactionPayload::new_contract_call( + boot_code_test_addr(), + POX_3_NAME, + "stack-extend", + vec![Value::UInt(lock_period), addr_tuple], + ) + .unwrap(); + + make_tx(key, nonce, 0, payload) + } + fn make_tx( key: &StacksPrivateKey, nonce: u64, diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index 882e018c66..e76cf5fe5c 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -288,11 +288,11 @@ pub fn check_pox_print_event( } pub struct StackingStateCheckData { - pox_addr: PoxAddress, + pub pox_addr: PoxAddress, /// this is a map from reward cycle number to the value in reward-set-indexes - cycle_indexes: HashMap, - first_cycle: u128, - lock_period: u128, + pub cycle_indexes: HashMap, + pub first_cycle: u128, + pub lock_period: u128, } /// Check the stacking-state invariants of `stacker` diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs index bf5a142c7c..43580dcdb1 100644 --- a/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -8,9 +8,9 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; use crate::chainstate::stacks::boot::pox_2_tests::{ - check_pox_print_event, generate_pox_clarity_value, get_partial_stacked, get_reward_cycle_total, - get_reward_set_entries_at, get_stacking_state_pox, get_stacking_state_pox_2, - get_stx_account_at, PoxPrintFields, + check_pox_print_event, check_stacking_state_invariants, generate_pox_clarity_value, + get_partial_stacked, get_reward_cycle_total, get_reward_set_entries_at, get_stacking_state_pox, + get_stacking_state_pox_2, get_stx_account_at, PoxPrintFields, StackingStateCheckData, }; use crate::chainstate::stacks::boot::{ BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_2_NAME, @@ -2018,3 +2018,1012 @@ fn stack_increase() { check_pox_print_event(stack_increase_tx, common_data, stack_op_data); } } + +#[test] +fn pox_extend_transition() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + &format!("pox_3_pox_extend_transition"), + 7110, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let alice_principal = PrincipalData::from(alice_address.clone()); + let bob_address = key_to_stacks_addr(&bob); + let bob_principal = PrincipalData::from(bob_address.clone()); + + let EXPECTED_ALICE_FIRST_REWARD_CYCLE = 6; + let mut coinbase_nonce = 0; + + let INITIAL_BALANCE = 1024 * POX_THRESHOLD_STEPS_USTX; + let ALICE_LOCKUP = 1024 * POX_THRESHOLD_STEPS_USTX; + let BOB_LOCKUP = 512 * POX_THRESHOLD_STEPS_USTX; + + // these checks should pass between Alice's first reward cycle, + // and the start of V2 reward cycles + let alice_rewards_to_v2_start_checks = |tip_index_block, peer: &mut TestPeer| { + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap() as u128; + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + ( + c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), + get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), + c.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) + .unwrap(), + ) + }); + + assert!( + cur_reward_cycle >= EXPECTED_ALICE_FIRST_REWARD_CYCLE + && cur_reward_cycle < first_v2_cycle as u128 + ); + // Alice is the only Stacker, so check that. + let (amount_ustx, pox_addr, lock_period, first_reward_cycle) = + get_stacker_info(peer, &key_to_stacks_addr(&alice).into()).unwrap(); + eprintln!( + "\nAlice: {} uSTX stacked for {} cycle(s); addr is {:?}; first reward cycle is {}\n", + amount_ustx, lock_period, &pox_addr, first_reward_cycle + ); + + // one reward address, and it's Alice's + // either way, there's a single reward address + assert_eq!(reward_addrs.len(), 1); + assert_eq!( + (reward_addrs[0].0).version(), + AddressHashMode::SerializeP2PKH as u8 + ); + assert_eq!( + (reward_addrs[0].0).hash160(), + key_to_stacks_addr(&alice).bytes + ); + assert_eq!(reward_addrs[0].1, ALICE_LOCKUP); + }; + + // these checks should pass after the start of V2 reward cycles + let v2_rewards_checks = |tip_index_block, peer: &mut TestPeer| { + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap() as u128; + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + ( + c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), + get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), + c.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) + .unwrap(), + ) + }); + + eprintln!( + "reward_cycle = {}, reward_addrs = {}, total_stacked = {}", + cur_reward_cycle, + reward_addrs.len(), + total_stacked + ); + + assert!(cur_reward_cycle >= first_v2_cycle as u128); + // v2 reward cycles have begun, so reward addrs should be read from PoX2 which is Bob + Alice + assert_eq!(reward_addrs.len(), 2); + assert_eq!( + (reward_addrs[0].0).version(), + AddressHashMode::SerializeP2PKH as u8 + ); + assert_eq!( + (reward_addrs[0].0).hash160(), + key_to_stacks_addr(&bob).bytes + ); + assert_eq!(reward_addrs[0].1, BOB_LOCKUP); + + assert_eq!( + (reward_addrs[1].0).version(), + AddressHashMode::SerializeP2PKH as u8 + ); + assert_eq!( + (reward_addrs[1].0).hash160(), + key_to_stacks_addr(&alice).bytes + ); + assert_eq!(reward_addrs[1].1, ALICE_LOCKUP); + }; + + // first tenure is empty + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + let alice_account = get_account(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_account.stx_balance.amount_unlocked(), INITIAL_BALANCE); + assert_eq!(alice_account.stx_balance.amount_locked(), 0); + assert_eq!(alice_account.stx_balance.unlock_height(), 0); + + // next tenure include Alice's lockup + let tip = get_tip(peer.sortdb.as_ref()); + let alice_lockup = make_pox_lockup( + &alice, + 0, + ALICE_LOCKUP, + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + 4, + tip.block_height, + ); + + let tip_index_block = peer.tenure_with_txs(&[alice_lockup], &mut coinbase_nonce); + + // check the stacking minimum + let total_liquid_ustx = get_liquid_ustx(&mut peer); + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + chainstate.get_stacking_minimum(sortdb, &tip_index_block) + }) + .unwrap(); + assert_eq!( + min_ustx, + total_liquid_ustx / POX_TESTNET_STACKING_THRESHOLD_25 + ); + + // no reward addresses + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) + }) + .unwrap(); + assert_eq!(reward_addrs.len(), 0); + + // check the first reward cycle when Alice's tokens get stacked + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); + let alice_first_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap(); + + assert_eq!( + alice_first_reward_cycle as u128, + EXPECTED_ALICE_FIRST_REWARD_CYCLE + ); + let height_target = burnchain.reward_cycle_to_block_height(alice_first_reward_cycle) + 1; + + // alice locked, so balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // produce blocks until epoch 2.1 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + alice_rewards_to_v2_start_checks(latest_block, &mut peer); + } + + // in the next tenure, PoX 2 should now exist. + // Lets have Bob lock up for v2 + // this will lock for cycles 8, 9, 10 + // the first v2 cycle will be 8 + let tip = get_tip(peer.sortdb.as_ref()); + + let bob_lockup = make_pox_2_lockup( + &bob, + 0, + BOB_LOCKUP, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 3, + tip.block_height, + ); + + // Alice _will_ auto-unlock: she can stack-extend in PoX v2 + let alice_lockup = make_pox_2_extend( + &alice, + 1, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + ); + + latest_block = peer.tenure_with_txs(&[bob_lockup, alice_lockup], &mut coinbase_nonce); + alice_rewards_to_v2_start_checks(latest_block, &mut peer); + + // Extend bob's lockup via `stack-extend` for 1 more cycle + let bob_extend = make_pox_2_extend( + &bob, + 1, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 1, + ); + + latest_block = peer.tenure_with_txs(&[bob_extend], &mut coinbase_nonce); + + alice_rewards_to_v2_start_checks(latest_block, &mut peer); + + // produce blocks until the v2 reward cycles start + let height_target = burnchain.reward_cycle_to_block_height(first_v2_cycle) - 1; + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // alice is still locked, balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + + alice_rewards_to_v2_start_checks(latest_block, &mut peer); + } + + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + v2_rewards_checks(latest_block, &mut peer); + + // Roll to Epoch-2.4 and re-do the above tests + + // roll the chain forward until just before Epoch-2.2 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should be locked, and so should bob's + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + let bob_balance = get_balance(&mut peer, &key_to_stacks_addr(&bob).into()); + assert_eq!(bob_balance, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // this block is mined in epoch-2.2 + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + let bob_balance = get_balance(&mut peer, &key_to_stacks_addr(&bob).into()); + assert_eq!(bob_balance, 512 * POX_THRESHOLD_STEPS_USTX); + + // this block should unlock alice and bob's balance + + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_account = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + let bob_account = get_stx_account_at(&mut peer, &latest_block, &bob_principal); + assert_eq!(alice_account.amount_locked(), 0); + assert_eq!(alice_account.amount_unlocked(), INITIAL_BALANCE); + assert_eq!(bob_account.amount_locked(), 0); + assert_eq!(bob_account.amount_unlocked(), INITIAL_BALANCE); + + // Roll to Epoch-2.4 and re-do the above stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let alice_lockup = make_pox_3_lockup( + &alice, + 2, + ALICE_LOCKUP, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 4, + tip.block_height, + ); + let alice_pox_3_lock_nonce = 2; + let alice_first_pox_3_unlock_height = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 4) - 1; + let alice_pox_3_start_burn_height = tip.block_height; + + latest_block = peer.tenure_with_txs(&[alice_lockup], &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 4) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, ALICE_LOCKUP,); + } + + // check the first reward cycle when Alice's tokens get stacked + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &latest_block); + let alice_first_v3_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap(); + + let height_target = burnchain.reward_cycle_to_block_height(alice_first_v3_reward_cycle) + 1; + + // alice locked, so balance should be 0 + let alice_balance = get_balance(&mut peer, &alice_principal); + assert_eq!(alice_balance, 0); + + // advance to the first v3 reward cycle + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let bob_lockup = make_pox_3_lockup( + &bob, + 2, + BOB_LOCKUP, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 3, + tip.block_height, + ); + + // Alice can stack-extend in PoX v2 + let alice_lockup = make_pox_3_extend( + &alice, + 3, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + ); + + let alice_pox_3_extend_nonce = 3; + let alice_extend_pox_3_unlock_height = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 10) - 1; + + latest_block = peer.tenure_with_txs(&[bob_lockup, alice_lockup], &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 1) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, ALICE_LOCKUP,); + } + + for cycle_number in (first_v3_cycle + 1)..(first_v3_cycle + 4) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 2); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[1].amount_stacked, ALICE_LOCKUP,); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&bob).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, BOB_LOCKUP,); + } + + for cycle_number in (first_v3_cycle + 4)..(first_v3_cycle + 10) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, ALICE_LOCKUP,); + } + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len(), 4); + assert_eq!(bob_txs.len(), 3); + + for tx in alice_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Alice txs should all have committed okay" + ); + } + + for tx in bob_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Bob txs should all have committed okay" + ); + } + + // Check that the call to `stack-stx` has a well-formed print event. + let stack_tx = &alice_txs + .get(&alice_pox_3_lock_nonce) + .unwrap() + .clone() + .events[0]; + let pox_addr_val = generate_pox_clarity_value("ae1593226f85e49a7eaff5b633ff687695438cc9"); + let stack_op_data = HashMap::from([ + ("lock-amount", Value::UInt(ALICE_LOCKUP)), + ( + "unlock-burn-height", + Value::UInt(alice_first_pox_3_unlock_height.into()), + ), + ( + "start-burn-height", + Value::UInt(alice_pox_3_start_burn_height.into()), + ), + ("pox-addr", pox_addr_val.clone()), + ("lock-period", Value::UInt(4)), + ]); + let common_data = PoxPrintFields { + op_name: "stack-stx".to_string(), + stacker: Value::Principal(alice_principal.clone()), + balance: Value::UInt(10240000000000), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + check_pox_print_event(stack_tx, common_data, stack_op_data); + + // Check that the call to `stack-extend` has a well-formed print event. + let stack_extend_tx = &alice_txs + .get(&alice_pox_3_extend_nonce) + .unwrap() + .clone() + .events[0]; + let stack_ext_op_data = HashMap::from([ + ("extend-count", Value::UInt(6)), + ("pox-addr", pox_addr_val), + ( + "unlock-burn-height", + Value::UInt(alice_extend_pox_3_unlock_height.into()), + ), + ]); + let common_data = PoxPrintFields { + op_name: "stack-extend".to_string(), + stacker: Value::Principal(alice_principal.clone()), + balance: Value::UInt(0), + locked: Value::UInt(ALICE_LOCKUP), + burnchain_unlock_height: Value::UInt(alice_first_pox_3_unlock_height.into()), + }; + check_pox_print_event(stack_extend_tx, common_data, stack_ext_op_data); +} + +#[test] +fn delegate_extend_transition_pox_3() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + "pox_3_delegate_extend_transition_pox_2", + 7114, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let charlie = keys.pop().unwrap(); + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let charlie_address = key_to_stacks_addr(&charlie); + + let mut coinbase_nonce = 0; + + let INITIAL_BALANCE = 1024 * POX_THRESHOLD_STEPS_USTX; + let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; + + // our "tenure counter" is now at 0 + let tip = get_tip(peer.sortdb.as_ref()); + assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); + + // first tenure is empty + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + // Roll to Epoch-2.4 and perform the delegate-stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 2 should now exist. + // Lets have Bob lock up for v2 + // this will lock for cycles 8, 9, 10 + // the first v2 cycle will be 8 + let tip = get_tip(peer.sortdb.as_ref()); + + let mut alice_nonce = 0; + let mut bob_nonce = 0; + let mut charlie_nonce = 0; + + let bob_delegate_tx = make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stx", + vec![ + Value::UInt(2048 * POX_THRESHOLD_STEPS_USTX), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::none(), + ], + ); + bob_nonce += 1; + + let alice_delegate_tx = make_pox_3_contract_call( + &alice, + alice_nonce, + "delegate-stx", + vec![ + Value::UInt(2048 * POX_THRESHOLD_STEPS_USTX), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::none(), + ], + ); + alice_nonce += 1; + + let delegate_stack_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "delegate-stack-stx", + vec![ + PrincipalData::from(bob_address.clone()).into(), + Value::UInt(LOCKUP_AMT), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(tip.block_height as u128), + Value::UInt(3), + ], + ); + let delegate_stack_stx_nonce = charlie_nonce; + let delegate_stack_stx_unlock_ht = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) - 1; + let delegate_stack_stx_lock_ht = tip.block_height; + charlie_nonce += 1; + + let delegate_alice_stack_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "delegate-stack-stx", + vec![ + PrincipalData::from(alice_address.clone()).into(), + Value::UInt(LOCKUP_AMT), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(tip.block_height as u128), + Value::UInt(6), + ], + ); + charlie_nonce += 1; + + // Charlie agg commits the first 3 cycles, but wait until delegate-extended bob to + // agg commit the 4th cycle + // aggregate commit to each cycle delegate-stack-stx locked for (cycles 6, 7, 8, 9) + let agg_commit_txs = [0, 1, 2].map(|ix| { + let tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128 + ix), + ], + ); + charlie_nonce += 1; + tx + }); + let mut txs = vec![ + bob_delegate_tx, + alice_delegate_tx, + delegate_stack_tx, + delegate_alice_stack_tx, + ]; + + txs.extend(agg_commit_txs); + + latest_block = peer.tenure_with_txs(txs.as_slice(), &mut coinbase_nonce); + + for cycle_number in first_v3_cycle..(first_v3_cycle + 3) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&charlie).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, 2 * LOCKUP_AMT); + } + + for cycle_number in (first_v3_cycle + 3)..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 0); + } + + let alice_principal = alice_address.clone().into(); + let bob_principal = bob_address.clone().into(); + let charlie_principal: PrincipalData = charlie_address.clone().into(); + + let StackingStateCheckData { + first_cycle: alice_first_cycle, + lock_period: alice_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &alice_principal, + false, + POX_3_NAME, + ); + let StackingStateCheckData { + first_cycle: bob_first_cycle, + lock_period: bob_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &bob_principal, + false, + POX_3_NAME, + ); + + assert_eq!( + alice_first_cycle as u64, first_v3_cycle, + "Alice's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(alice_lock_period, 6); + assert_eq!( + bob_first_cycle as u64, first_v3_cycle, + "Bob's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(bob_lock_period, 3); + + // Extend bob's lockup via `delegate-stack-extend` for 1 more cycle + let delegate_extend_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "delegate-stack-extend", + vec![ + PrincipalData::from(bob_address.clone()).into(), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(1), + ], + ); + let delegate_stack_extend_nonce = charlie_nonce; + let delegate_stack_extend_unlock_ht = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 4) - 1; + charlie_nonce += 1; + + let agg_commit_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128 + 3), + ], + ); + let stack_agg_nonce = charlie_nonce; + let stack_agg_cycle = first_v3_cycle + 3; + let delegate_stack_extend_unlock_ht = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 4) - 1; + charlie_nonce += 1; + + latest_block = peer.tenure_with_txs(&[delegate_extend_tx, agg_commit_tx], &mut coinbase_nonce); + let StackingStateCheckData { + first_cycle: alice_first_cycle, + lock_period: alice_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &alice_principal, + false, + POX_3_NAME, + ); + let StackingStateCheckData { + first_cycle: bob_first_cycle, + lock_period: bob_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &bob_principal, + false, + POX_3_NAME, + ); + + assert_eq!( + alice_first_cycle as u64, first_v3_cycle, + "Alice's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(alice_lock_period, 6); + assert_eq!( + bob_first_cycle as u64, first_v3_cycle, + "Bob's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(bob_lock_period, 4); + + for cycle_number in first_v3_cycle..(first_v3_cycle + 4) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&charlie).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, 2 * LOCKUP_AMT); + } + + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle) + 1; + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + } + + let tip = get_tip(peer.sortdb.as_ref()); + + // Extend bob's lockup via `delegate-stack-extend` for 1 more cycle + // so that we can check the first-reward-cycle is correctly updated + let delegate_extend_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "delegate-stack-extend", + vec![ + PrincipalData::from(bob_address.clone()).into(), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(3), + ], + ); + charlie_nonce += 1; + + latest_block = peer.tenure_with_txs(&[delegate_extend_tx], &mut coinbase_nonce); + let StackingStateCheckData { + first_cycle: alice_first_cycle, + lock_period: alice_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &alice_principal, + false, + POX_3_NAME, + ); + let StackingStateCheckData { + first_cycle: bob_first_cycle, + lock_period: bob_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &bob_principal, + false, + POX_3_NAME, + ); + + assert_eq!( + alice_first_cycle as u64, first_v3_cycle, + "Alice's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(alice_lock_period, 6); + assert_eq!( + bob_first_cycle as u64, first_v3_cycle, + "Bob's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(bob_lock_period, 7); + + // now let's check some tx receipts + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut charlie_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == charlie_address { + charlie_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len(), alice_nonce as usize); + assert_eq!(bob_txs.len(), bob_nonce as usize); + assert_eq!(charlie_txs.len(), charlie_nonce as usize); + + for tx in alice_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Alice txs should all have committed okay" + ); + } + for tx in bob_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Bob txs should all have committed okay" + ); + } + for tx in charlie_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Charlie txs should all have committed okay" + ); + } + + // Check that the call to `delegate-stack-stx` has a well-formed print event. + let delegate_stack_tx = &charlie_txs + .get(&delegate_stack_stx_nonce) + .unwrap() + .clone() + .events[0]; + let pox_addr_val = generate_pox_clarity_value("12d93ae7b61e5b7d905c85828d4320e7c221f433"); + let delegate_op_data = HashMap::from([ + ("lock-amount", Value::UInt(LOCKUP_AMT)), + ( + "unlock-burn-height", + Value::UInt(delegate_stack_stx_unlock_ht.into()), + ), + ( + "start-burn-height", + Value::UInt(delegate_stack_stx_lock_ht.into()), + ), + ("pox-addr", pox_addr_val.clone()), + ("lock-period", Value::UInt(3)), + ("delegator", Value::Principal(charlie_principal.clone())), + ]); + let common_data = PoxPrintFields { + op_name: "delegate-stack-stx".to_string(), + stacker: Value::Principal(bob_principal.clone()), + balance: Value::UInt(LOCKUP_AMT), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + check_pox_print_event(delegate_stack_tx, common_data, delegate_op_data); + + // Check that the call to `delegate-stack-extend` has a well-formed print event. + let delegate_stack_extend_tx = &charlie_txs + .get(&delegate_stack_extend_nonce) + .unwrap() + .clone() + .events[0]; + let delegate_ext_op_data = HashMap::from([ + ("pox-addr", pox_addr_val.clone()), + ( + "unlock-burn-height", + Value::UInt(delegate_stack_extend_unlock_ht.into()), + ), + ("extend-count", Value::UInt(1)), + ("delegator", Value::Principal(charlie_principal.clone())), + ]); + let common_data = PoxPrintFields { + op_name: "delegate-stack-extend".to_string(), + stacker: Value::Principal(bob_principal.clone()), + balance: Value::UInt(0), + locked: Value::UInt(LOCKUP_AMT), + burnchain_unlock_height: Value::UInt(delegate_stack_stx_unlock_ht.into()), + }; + check_pox_print_event(delegate_stack_extend_tx, common_data, delegate_ext_op_data); + + // Check that the call to `stack-aggregation-commit` has a well-formed print event. + let stack_agg_commit_tx = &charlie_txs.get(&stack_agg_nonce).unwrap().clone().events[0]; + let stack_agg_commit_op_data = HashMap::from([ + ("pox-addr", pox_addr_val), + ("reward-cycle", Value::UInt(stack_agg_cycle.into())), + ("amount-ustx", Value::UInt(2 * LOCKUP_AMT)), + ]); + let common_data = PoxPrintFields { + op_name: "stack-aggregation-commit".to_string(), + stacker: Value::Principal(charlie_principal.clone()), + balance: Value::UInt(LOCKUP_AMT), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + check_pox_print_event(stack_agg_commit_tx, common_data, stack_agg_commit_op_data); +} From 2dc1a5547289df6a0b3f9442b7ee55776327f88c Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Wed, 3 May 2023 22:21:33 +0200 Subject: [PATCH 123/158] feat: allow epoch2.2 through epoch2.4 to be configured in regtest mode (AKA neon/krypton) --- src/core/mod.rs | 36 ++++++++++++++++++++++++---- testnet/stacks-node/src/config.rs | 39 +++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 4 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 04c498fa99..d1b9192b53 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -297,7 +297,7 @@ lazy_static! { } lazy_static! { - pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 6] = [ + pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 7] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -336,15 +336,22 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch23, start_height: BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_3 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4 + }, ]; } lazy_static! { - pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 4] = [ + pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 7] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -369,10 +376,31 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch21, start_height: 2000, - end_height: STACKS_EPOCH_MAX, + end_height: 3000, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_1 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 3000, + end_height: 4000, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 4000, + end_height: 5000, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 5000, + end_height: STACKS_EPOCH_MAX, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4 + }, ]; } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 8bb7e167c1..7ab3fdab1e 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -408,6 +408,33 @@ impl Config { burnchain.pox_constants.v1_unlock_height = v1_unlock_height; } + if let Some(epochs) = &self.burnchain.epochs { + // Iterate through the epochs vector and find the item where epoch_id == StacksEpochId::Epoch23 + if let Some(epoch) = epochs + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch23) + { + // Override v2_unlock_height to the start_height of epoch2.3 + debug!( + "Override v2_unlock_height from {} to {}", + burnchain.pox_constants.v2_unlock_height, epoch.start_height + ); + burnchain.pox_constants.v2_unlock_height = epoch.start_height as u32; + } + + if let Some(epoch) = epochs + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch24) + { + // Override pox_3_activation_height to the start_height of epoch2.4 + debug!( + "Override pox_3_activation_height from {} to {}", + burnchain.pox_constants.pox_3_activation_height, epoch.start_height + ); + burnchain.pox_constants.pox_3_activation_height = epoch.start_height as u32; + } + } + if let Some(sunset_start) = self.burnchain.sunset_start { debug!( "Override sunset_start from {} to {}", @@ -507,6 +534,12 @@ impl Config { Ok(StacksEpochId::Epoch2_05) } else if epoch_name == EPOCH_CONFIG_2_1_0 { Ok(StacksEpochId::Epoch21) + } else if epoch_name == EPOCH_CONFIG_2_2_0 { + Ok(StacksEpochId::Epoch22) + } else if epoch_name == EPOCH_CONFIG_2_3_0 { + Ok(StacksEpochId::Epoch23) + } else if epoch_name == EPOCH_CONFIG_2_4_0 { + Ok(StacksEpochId::Epoch24) } else { Err(format!("Unknown epoch name specified: {}", epoch_name)) }?; @@ -529,6 +562,9 @@ impl Config { StacksEpochId::Epoch20, StacksEpochId::Epoch2_05, StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, ]; for (expected_epoch, configured_epoch) in expected_list .iter() @@ -1396,6 +1432,9 @@ pub const EPOCH_CONFIG_1_0_0: &'static str = "1.0"; pub const EPOCH_CONFIG_2_0_0: &'static str = "2.0"; pub const EPOCH_CONFIG_2_0_5: &'static str = "2.05"; pub const EPOCH_CONFIG_2_1_0: &'static str = "2.1"; +pub const EPOCH_CONFIG_2_2_0: &'static str = "2.2"; +pub const EPOCH_CONFIG_2_3_0: &'static str = "2.3"; +pub const EPOCH_CONFIG_2_4_0: &'static str = "2.4"; #[derive(Clone, Deserialize, Default, Debug)] pub struct BurnchainConfigFile { From 4d98d56920fcf926ff60402d8fd88e9f5328126c Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Wed, 3 May 2023 22:40:59 +0200 Subject: [PATCH 124/158] fix: v2_unlock_height should be Epoch2.2 start_height+1 --- testnet/stacks-node/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 7ab3fdab1e..fa165ae47c 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -412,14 +412,14 @@ impl Config { // Iterate through the epochs vector and find the item where epoch_id == StacksEpochId::Epoch23 if let Some(epoch) = epochs .iter() - .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch23) + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch22) { // Override v2_unlock_height to the start_height of epoch2.3 debug!( "Override v2_unlock_height from {} to {}", burnchain.pox_constants.v2_unlock_height, epoch.start_height ); - burnchain.pox_constants.v2_unlock_height = epoch.start_height as u32; + burnchain.pox_constants.v2_unlock_height = epoch.start_height as u32 + 1; } if let Some(epoch) = epochs From 71d1ed25e673b0b55cf44ceae418cf884c186539 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 4 May 2023 17:27:21 +0200 Subject: [PATCH 125/158] chore: fix debug message for v2_unlock_height override --- testnet/stacks-node/src/config.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index fa165ae47c..368d9b5f9d 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -417,7 +417,8 @@ impl Config { // Override v2_unlock_height to the start_height of epoch2.3 debug!( "Override v2_unlock_height from {} to {}", - burnchain.pox_constants.v2_unlock_height, epoch.start_height + burnchain.pox_constants.v2_unlock_height, + epoch.start_height + 1 ); burnchain.pox_constants.v2_unlock_height = epoch.start_height as u32 + 1; } From f4a712a98e36ca6fbb8a699c07535ea266b6ad18 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 5 May 2023 14:51:06 -0400 Subject: [PATCH 126/158] chore: fix typos --- testnet/stacks-node/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 368d9b5f9d..c7b47f2aad 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -409,12 +409,12 @@ impl Config { } if let Some(epochs) = &self.burnchain.epochs { - // Iterate through the epochs vector and find the item where epoch_id == StacksEpochId::Epoch23 + // Iterate through the epochs vector and find the item where epoch_id == StacksEpochId::Epoch22 if let Some(epoch) = epochs .iter() .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch22) { - // Override v2_unlock_height to the start_height of epoch2.3 + // Override v2_unlock_height to the start_height of epoch2.2 debug!( "Override v2_unlock_height from {} to {}", burnchain.pox_constants.v2_unlock_height, From eacbf98fec625a31e429af6d6439f65870720b3f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 5 May 2023 14:21:42 -0500 Subject: [PATCH 127/158] test: add segwit, pox-address get-burn-info validation tests --- src/chainstate/stacks/boot/pox_3_tests.rs | 761 +++++++++++++++++++++- 1 file changed, 759 insertions(+), 2 deletions(-) diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs index 43580dcdb1..c6e45da86b 100644 --- a/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -2541,7 +2541,7 @@ fn pox_extend_transition() { } #[test] -fn delegate_extend_transition_pox_3() { +fn delegate_extend_pox_3() { // the sim environment produces 25 empty sortitions before // tenures start being tracked. let EMPTY_SORTITIONS = 25; @@ -2563,7 +2563,7 @@ fn delegate_extend_transition_pox_3() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - "pox_3_delegate_extend_transition_pox_2", + "pox_3_delegate_extend", 7114, Some(epochs.clone()), Some(&observer), @@ -3027,3 +3027,760 @@ fn delegate_extend_transition_pox_3() { }; check_pox_print_event(stack_agg_commit_tx, common_data, stack_agg_commit_op_data); } + +#[test] +fn pox_3_getters() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + "pox_3_getters", + 7115, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let charlie = keys.pop().unwrap(); + let danielle = keys.pop().unwrap(); + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let charlie_address = key_to_stacks_addr(&charlie); + let mut coinbase_nonce = 0; + + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // Roll to Epoch-2.4 and perform the delegate-stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; + + // alice locks in v2 + let alice_lockup = make_pox_3_lockup( + &alice, + 0, + LOCKUP_AMT, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 4, + tip.block_height, + ); + + // bob deleates to charlie + let bob_delegate_tx = make_pox_3_contract_call( + &bob, + 0, + "delegate-stx", + vec![ + Value::UInt(LOCKUP_AMT), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::none(), + ], + ); + + // charlie calls delegate-stack-stx for bob + let charlie_delegate_stack_tx = make_pox_3_contract_call( + &charlie, + 0, + "delegate-stack-stx", + vec![ + PrincipalData::from(bob_address.clone()).into(), + Value::UInt(LOCKUP_AMT), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(tip.block_height as u128), + Value::UInt(4), + ], + ); + + let agg_commit_tx_1 = make_pox_3_contract_call( + &charlie, + 1, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128), + ], + ); + + let agg_commit_tx_2 = make_pox_3_contract_call( + &charlie, + 2, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128 + 1), + ], + ); + + let agg_commit_tx_3 = make_pox_3_contract_call( + &charlie, + 3, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128 + 2), + ], + ); + + let reject_pox = make_pox_3_contract_call(&danielle, 0, "reject-pox", vec![]); + + peer.tenure_with_txs( + &[ + alice_lockup, + bob_delegate_tx, + charlie_delegate_stack_tx, + agg_commit_tx_1, + agg_commit_tx_2, + agg_commit_tx_3, + reject_pox, + ], + &mut coinbase_nonce, + ); + + let result = eval_at_tip(&mut peer, "pox-3", &format!(" + {{ + ;; should be none + get-delegation-info-alice: (get-delegation-info '{}), + ;; should be (some $charlie_address) + get-delegation-info-bob: (get-delegation-info '{}), + ;; should be none + get-allowance-contract-callers: (get-allowance-contract-callers '{} '{}), + ;; should be 1 + get-num-reward-set-pox-addresses-current: (get-num-reward-set-pox-addresses u{}), + ;; should be 0 + get-num-reward-set-pox-addresses-future: (get-num-reward-set-pox-addresses u1000), + ;; should be 0 + get-partial-stacked-by-cycle-bob-0: (get-partial-stacked-by-cycle {{ version: 0x00, hashbytes: 0x{} }} u{} '{}), + get-partial-stacked-by-cycle-bob-1: (get-partial-stacked-by-cycle {{ version: 0x00, hashbytes: 0x{} }} u{} '{}), + get-partial-stacked-by-cycle-bob-2: (get-partial-stacked-by-cycle {{ version: 0x00, hashbytes: 0x{} }} u{} '{}), + ;; should be LOCKUP_AMT + get-partial-stacked-by-cycle-bob-3: (get-partial-stacked-by-cycle {{ version: 0x00, hashbytes: 0x{} }} u{} '{}), + ;; should be LOCKUP_AMT + get-total-pox-rejection-now: (get-total-pox-rejection u{}), + ;; should be 0 + get-total-pox-rejection-next: (get-total-pox-rejection u{}), + ;; should be 0 + get-total-pox-rejection-future: (get-total-pox-rejection u{}) + }}", &alice_address, + &bob_address, + &bob_address, &format!("{}.hello-world", &charlie_address), first_v3_cycle + 1, + &charlie_address.bytes, first_v3_cycle + 0, &charlie_address, + &charlie_address.bytes, first_v3_cycle + 1, &charlie_address, + &charlie_address.bytes, first_v3_cycle + 2, &charlie_address, + &charlie_address.bytes, first_v3_cycle + 3, &charlie_address, + first_v3_cycle, + first_v3_cycle + 1, + first_v3_cycle + 2, + )); + + eprintln!("{}", &result); + let data = result.expect_tuple().data_map; + + let alice_delegation_info = data + .get("get-delegation-info-alice") + .cloned() + .unwrap() + .expect_optional(); + assert!(alice_delegation_info.is_none()); + + let bob_delegation_info = data + .get("get-delegation-info-bob") + .cloned() + .unwrap() + .expect_optional() + .unwrap() + .expect_tuple() + .data_map; + let bob_delegation_addr = bob_delegation_info + .get("delegated-to") + .cloned() + .unwrap() + .expect_principal(); + let bob_delegation_amt = bob_delegation_info + .get("amount-ustx") + .cloned() + .unwrap() + .expect_u128(); + let bob_pox_addr_opt = bob_delegation_info + .get("pox-addr") + .cloned() + .unwrap() + .expect_optional(); + assert_eq!(bob_delegation_addr, charlie_address.to_account_principal()); + assert_eq!(bob_delegation_amt, LOCKUP_AMT as u128); + assert!(bob_pox_addr_opt.is_none()); + + let allowance = data + .get("get-allowance-contract-callers") + .cloned() + .unwrap() + .expect_optional(); + assert!(allowance.is_none()); + + let current_num_reward_addrs = data + .get("get-num-reward-set-pox-addresses-current") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(current_num_reward_addrs, 2); + + let future_num_reward_addrs = data + .get("get-num-reward-set-pox-addresses-future") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(future_num_reward_addrs, 0); + + for i in 0..3 { + let key = + ClarityName::try_from(format!("get-partial-stacked-by-cycle-bob-{}", &i)).unwrap(); + let partial_stacked = data.get(&key).cloned().unwrap().expect_optional(); + assert!(partial_stacked.is_none()); + } + let partial_stacked = data + .get("get-partial-stacked-by-cycle-bob-3") + .cloned() + .unwrap() + .expect_optional() + .unwrap() + .expect_tuple() + .data_map + .get("stacked-amount") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(partial_stacked, LOCKUP_AMT as u128); + + let rejected = data + .get("get-total-pox-rejection-now") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(rejected, LOCKUP_AMT as u128); + + let rejected = data + .get("get-total-pox-rejection-next") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(rejected, 0); + + let rejected = data + .get("get-total-pox-rejection-future") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(rejected, 0); +} + +fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { + let addrs = chainstate + .maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip_index_block, |clarity_tx| { + clarity_tx + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::Standard(StandardPrincipalData::transient()), + None, + LimitedCostTracker::new_free(), + |env| { + env.eval_read_only( + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), + ) + }, + ) + .unwrap() + }) + .unwrap(); + addrs + }) + .unwrap() + .expect_optional() + .expect("FATAL: expected list") + .expect_tuple(); + + let addrs = addrs_and_payout + .get("addrs") + .unwrap() + .to_owned() + .expect_list() + .into_iter() + .map(|tuple| PoxAddress::try_from_pox_tuple(false, &tuple).unwrap()) + .collect(); + + let payout = addrs_and_payout + .get("payout") + .unwrap() + .to_owned() + .expect_u128(); + (addrs, payout) +} + +#[test] +fn get_pox_addrs() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let (mut peer, keys) = instantiate_pox_peer_with_epoch( + &burnchain, + "pox_3_tests::get_pox_addrs", + 7102, + Some(epochs.clone()), + None, + ); + + assert_eq!(burnchain.pox_constants.reward_slots(), 6); + let mut coinbase_nonce = 0; + + let assert_latest_was_burn = |peer: &mut TestPeer| { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + + let conn = peer.sortdb().conn(); + + // check the *parent* burn block, because that's what we'll be + // checking with get_burn_pox_addr_info + let mut burn_ops = + SortitionDB::get_block_commits_by_block(conn, &tip.parent_sortition_id).unwrap(); + assert_eq!(burn_ops.len(), 1); + let commit = burn_ops.pop().unwrap(); + assert!(commit.all_outputs_burn()); + assert!(commit.burn_fee > 0); + + let (addrs, payout) = get_burn_pox_addr_info(peer); + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + info!("Checking burn outputs at burn_height = {}", burn_height); + if peer.config.burnchain.is_in_prepare_phase(burn_height) { + assert_eq!(addrs.len(), 1); + assert_eq!(payout, 1000); + assert!(addrs[0].is_burn()); + } else { + assert_eq!(addrs.len(), 2); + assert_eq!(payout, 500); + assert!(addrs[0].is_burn()); + assert!(addrs[1].is_burn()); + } + }; + + let assert_latest_was_pox = |peer: &mut TestPeer| { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + + let conn = peer.sortdb().conn(); + + // check the *parent* burn block, because that's what we'll be + // checking with get_burn_pox_addr_info + let mut burn_ops = + SortitionDB::get_block_commits_by_block(conn, &tip.parent_sortition_id).unwrap(); + assert_eq!(burn_ops.len(), 1); + let commit = burn_ops.pop().unwrap(); + assert!(!commit.all_outputs_burn()); + let commit_addrs = commit.commit_outs; + + let (addrs, payout) = get_burn_pox_addr_info(peer); + info!( + "Checking pox outputs at burn_height = {}, commit_addrs = {:?}, fetch_addrs = {:?}", + burn_height, commit_addrs, addrs + ); + assert_eq!(addrs.len(), 2); + assert_eq!(payout, 500); + assert!(commit_addrs.contains(&addrs[0])); + assert!(commit_addrs.contains(&addrs[1])); + addrs + }; + + // produce blocks until epoch 2.2 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + // if we reach epoch 2.1, perform the check + if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + assert_latest_was_burn(&mut peer); + } + } + + let mut txs = vec![]; + let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let stackers: Vec<_> = keys + .iter() + .zip([ + AddressHashMode::SerializeP2PKH, + AddressHashMode::SerializeP2SH, + AddressHashMode::SerializeP2WPKH, + AddressHashMode::SerializeP2WSH, + ]) + .map(|(key, hash_mode)| { + let pox_addr = PoxAddress::from_legacy(hash_mode, key_to_stacks_addr(key).bytes); + txs.push(make_pox_3_lockup( + key, + 0, + 1024 * POX_THRESHOLD_STEPS_USTX, + pox_addr.clone(), + 2, + tip_height, + )); + pox_addr + }) + .collect(); + + let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + + let target_height = burnchain.reward_cycle_to_block_height(first_v3_cycle); + // produce blocks until the first reward phase that everyone should be in + while get_tip(peer.sortdb.as_ref()).block_height < target_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we should be in the reward phase, produce the reward blocks + let reward_blocks = + burnchain.pox_constants.reward_cycle_length - burnchain.pox_constants.prepare_length; + let mut rewarded = HashSet::new(); + for i in 0..reward_blocks { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // only the first 2 reward blocks contain pox outputs, because there are 6 slots and only 4 are occuppied + if i < 2 { + assert_latest_was_pox(&mut peer) + .into_iter() + .filter(|addr| !addr.is_burn()) + .for_each(|addr| { + rewarded.insert(addr); + }); + } else { + assert_latest_was_burn(&mut peer); + } + } + + assert_eq!(rewarded.len(), 4); + for stacker in stackers.iter() { + assert!( + rewarded.contains(stacker), + "Reward cycle should include {}", + stacker + ); + } + + // now we should be back in a prepare phase + for _i in 0..burnchain.pox_constants.prepare_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we should be in the reward phase, produce the reward blocks + let mut rewarded = HashSet::new(); + for i in 0..reward_blocks { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // only the first 2 reward blocks contain pox outputs, because there are 6 slots and only 4 are occuppied + if i < 2 { + assert_latest_was_pox(&mut peer) + .into_iter() + .filter(|addr| !addr.is_burn()) + .for_each(|addr| { + rewarded.insert(addr); + }); + } else { + assert_latest_was_burn(&mut peer); + } + } + + assert_eq!(rewarded.len(), 4); + for stacker in stackers.iter() { + assert!( + rewarded.contains(stacker), + "Reward cycle should include {}", + stacker + ); + } + + // now we should be back in a prepare phase + for _i in 0..burnchain.pox_constants.prepare_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we're in the next reward cycle, but everyone is unstacked + for _i in 0..burnchain.pox_constants.reward_cycle_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } +} + +#[test] +fn stack_with_segwit() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let (mut peer, keys) = instantiate_pox_peer_with_epoch( + &burnchain, + "pox_3_tests::stack_with_segwit", + 7120, + Some(epochs.clone()), + None, + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + assert_eq!(burnchain.pox_constants.reward_slots(), 6); + let mut coinbase_nonce = 0; + + let assert_latest_was_burn = |peer: &mut TestPeer| { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + + let conn = peer.sortdb().conn(); + + // check the *parent* burn block, because that's what we'll be + // checking with get_burn_pox_addr_info + let mut burn_ops = + SortitionDB::get_block_commits_by_block(conn, &tip.parent_sortition_id).unwrap(); + assert_eq!(burn_ops.len(), 1); + let commit = burn_ops.pop().unwrap(); + assert!(commit.all_outputs_burn()); + assert!(commit.burn_fee > 0); + + let (addrs, payout) = get_burn_pox_addr_info(peer); + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + info!("Checking burn outputs at burn_height = {}", burn_height); + if peer.config.burnchain.is_in_prepare_phase(burn_height) { + assert_eq!(addrs.len(), 1); + assert_eq!(payout, 1000); + assert!(addrs[0].is_burn()); + } else { + assert_eq!(addrs.len(), 2); + assert_eq!(payout, 500); + assert!(addrs[0].is_burn()); + assert!(addrs[1].is_burn()); + } + }; + + let assert_latest_was_pox = |peer: &mut TestPeer| { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + + let conn = peer.sortdb().conn(); + + // check the *parent* burn block, because that's what we'll be + // checking with get_burn_pox_addr_info + let mut burn_ops = + SortitionDB::get_block_commits_by_block(conn, &tip.parent_sortition_id).unwrap(); + assert_eq!(burn_ops.len(), 1); + let commit = burn_ops.pop().unwrap(); + assert!(!commit.all_outputs_burn()); + let commit_addrs = commit.commit_outs; + + let (addrs, payout) = get_burn_pox_addr_info(peer); + info!( + "Checking pox outputs at burn_height = {}, commit_addrs = {:?}, fetch_addrs = {:?}", + burn_height, commit_addrs, addrs + ); + assert_eq!(addrs.len(), 2); + assert_eq!(payout, 500); + assert!(commit_addrs.contains(&addrs[0])); + assert!(commit_addrs.contains(&addrs[1])); + addrs + }; + + // produce blocks until epoch 2.2 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + // if we reach epoch 2.1, perform the check + if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + assert_latest_was_burn(&mut peer); + } + } + + let mut txs = vec![]; + let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let stackers: Vec<_> = keys + .iter() + .zip([ + PoxAddress::Addr20(false, PoxAddressType20::P2WPKH, [0x01; 20]), + PoxAddress::Addr32(false, PoxAddressType32::P2WSH, [0x02; 32]), + PoxAddress::Addr32(false, PoxAddressType32::P2TR, [0x03; 32]), + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, Hash160([0x04; 20])), + ]) + .map(|(key, pox_addr)| { + txs.push(make_pox_3_lockup( + key, + 0, + 1024 * POX_THRESHOLD_STEPS_USTX, + pox_addr.clone(), + 2, + tip_height, + )); + pox_addr + }) + .collect(); + + let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + + let target_height = burnchain.reward_cycle_to_block_height(first_v3_cycle); + // produce blocks until the first reward phase that everyone should be in + while get_tip(peer.sortdb.as_ref()).block_height < target_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we should be in the reward phase, produce the reward blocks + let reward_blocks = + burnchain.pox_constants.reward_cycle_length - burnchain.pox_constants.prepare_length; + let mut rewarded = HashSet::new(); + for i in 0..reward_blocks { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // only the first 2 reward blocks contain pox outputs, because there are 6 slots and only 4 are occuppied + if i < 2 { + assert_latest_was_pox(&mut peer) + .into_iter() + .filter(|addr| !addr.is_burn()) + .for_each(|addr| { + rewarded.insert(addr); + }); + } else { + assert_latest_was_burn(&mut peer); + } + } + + assert_eq!(rewarded.len(), 4); + for stacker in stackers.iter() { + assert!( + rewarded.contains(stacker), + "Reward cycle should include {}", + stacker + ); + } + + // now we should be back in a prepare phase + for _i in 0..burnchain.pox_constants.prepare_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we should be in the reward phase, produce the reward blocks + let mut rewarded = HashSet::new(); + for i in 0..reward_blocks { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // only the first 2 reward blocks contain pox outputs, because there are 6 slots and only 4 are occuppied + if i < 2 { + assert_latest_was_pox(&mut peer) + .into_iter() + .filter(|addr| !addr.is_burn()) + .for_each(|addr| { + rewarded.insert(addr); + }); + } else { + assert_latest_was_burn(&mut peer); + } + } + + assert_eq!(rewarded.len(), 4); + for stacker in stackers.iter() { + assert!( + rewarded.contains(stacker), + "Reward cycle should include {}", + stacker + ); + } + + // now we should be back in a prepare phase + for _i in 0..burnchain.pox_constants.prepare_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we're in the next reward cycle, but everyone is unstacked + for _i in 0..burnchain.pox_constants.reward_cycle_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } +} From ab3e85449798f6cebd3f1fd12cee7a722dc27c57 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 5 May 2023 14:46:46 -0500 Subject: [PATCH 128/158] tests: finish porting pox_2_tests to pox_3 --- src/chainstate/stacks/boot/pox_3_tests.rs | 616 +++++++++++++++++++++- 1 file changed, 612 insertions(+), 4 deletions(-) diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs index c6e45da86b..b78e563fcd 100644 --- a/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -2596,10 +2596,8 @@ fn delegate_extend_pox_3() { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - // in the next tenure, PoX 2 should now exist. - // Lets have Bob lock up for v2 - // this will lock for cycles 8, 9, 10 - // the first v2 cycle will be 8 + // in the next tenure, PoX 3 should now exist. + // charlie will lock bob and alice through the delegation interface let tip = get_tip(peer.sortdb.as_ref()); let mut alice_nonce = 0; @@ -3784,3 +3782,613 @@ fn stack_with_segwit() { assert_latest_was_burn(&mut peer); } } + +/// In this test case, Alice delegates to Bob. +/// Bob stacks Alice's funds via PoX v2 for 6 cycles. In the third cycle, +/// Bob increases Alice's stacking amount by less than the stacking min. +/// Bob is able to increase the pool's aggregate amount anyway. +/// +#[test] +fn stack_aggregation_increase() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + "pox_3::stack_aggregation_increase", + 7117, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + let alice = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let alice_principal = PrincipalData::from(alice_address.clone()); + let bob = keys.pop().unwrap(); + let bob_address = key_to_stacks_addr(&bob); + let bob_principal = PrincipalData::from(bob_address.clone()); + let bob_pox_addr = make_pox_addr(AddressHashMode::SerializeP2PKH, bob_address.bytes.clone()); + let charlie = keys.pop().unwrap(); + let charlie_address = key_to_stacks_addr(&charlie); + let charlie_pox_addr = make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ); + let dan = keys.pop().unwrap(); + let dan_address = key_to_stacks_addr(&dan); + let dan_principal = PrincipalData::from(dan_address.clone()); + let dan_pox_addr = make_pox_addr(AddressHashMode::SerializeP2PKH, dan_address.bytes.clone()); + let alice_nonce = 0; + let mut bob_nonce = 0; + let mut charlie_nonce = 0; + let mut dan_nonce = 0; + + let alice_first_lock_amount = 512 * POX_THRESHOLD_STEPS_USTX; + let alice_delegation_amount = alice_first_lock_amount + 1; + let dan_delegation_amount = alice_first_lock_amount + 1; + let dan_stack_amount = 511 * POX_THRESHOLD_STEPS_USTX; + + let mut coinbase_nonce = 0; + + // first tenure is empty + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + // Roll to Epoch-2.4 and perform the delegate-stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + + // submit delegation tx for alice + let alice_delegation_1 = make_pox_3_contract_call( + &alice, + alice_nonce, + "delegate-stx", + vec![ + Value::UInt(alice_delegation_amount), + bob_principal.clone().into(), + Value::none(), + Value::none(), + ], + ); + + // bob locks some of alice's tokens + let delegate_stack_tx_bob = make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-stx", + vec![ + alice_principal.clone().into(), + Value::UInt(alice_first_lock_amount), + bob_pox_addr.clone(), + Value::UInt(tip.block_height as u128), + Value::UInt(6), + ], + ); + bob_nonce += 1; + + // dan stacks some tokens + let stack_tx_dan = make_pox_3_lockup( + &dan, + dan_nonce, + dan_stack_amount, + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, dan_address.bytes.clone()), + 12, + tip.block_height, + ); + dan_nonce += 1; + + latest_block = peer.tenure_with_txs( + &[alice_delegation_1, delegate_stack_tx_bob, stack_tx_dan], + &mut coinbase_nonce, + ); + + // check that the partial stacking state contains entries for bob + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // we'll produce blocks until the 3rd reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let expected_alice_unlock = burnchain.reward_cycle_to_block_height(first_v3_cycle + 6) - 1; + let expected_dan_unlock = burnchain.reward_cycle_to_block_height(first_v3_cycle + 12) - 1; + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + assert_eq!(alice_bal.unlock_height(), expected_alice_unlock); + + let dan_bal = get_stx_account_at(&mut peer, &latest_block, &dan_principal); + assert_eq!(dan_bal.amount_locked(), dan_stack_amount); + assert_eq!(dan_bal.unlock_height(), expected_dan_unlock); + + // check that the partial stacking state still contains entries for bob + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + let mut txs_to_submit = vec![]; + + // bob locks in alice's tokens to a PoX address, + // which clears the partially-stacked state + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-commit-indexed", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + ], + )); + let bob_stack_aggregation_commit_indexed = bob_nonce; + bob_nonce += 1; + + // bob tries to lock tokens in a reward cycle that's already committed (should fail with + // ERR_STACKING_NO_SUCH_PRINCIPAL) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + Value::UInt(0), + ], + )); + let bob_err_stacking_no_such_principal = bob_nonce; + bob_nonce += 1; + + // bob locks up 1 more of alice's tokens + // takes effect in the _next_ reward cycle + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(1), + ], + )); + bob_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + let tip = get_tip(peer.sortdb.as_ref()); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // locked up more tokens, but unlock height is unchanged + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_delegation_amount); + assert_eq!(alice_bal.unlock_height(), expected_alice_unlock); + + // only 1 uSTX to lock in this next cycle for Alice + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cur_reward_cycle + 1, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 1); + + for cycle_number in (cur_reward_cycle + 2)..(first_v3_cycle + 6) { + // alice has 512 * POX_THRESHOLD_STEPS_USTX partially-stacked STX in all cycles after + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, alice_delegation_amount); + } + + let mut txs_to_submit = vec![]; + + // charlie tries to lock alice's additional tokens to his own PoX address (should fail with + // ERR_STACKING_NO_SUCH_PRINCIPAL) + txs_to_submit.push(make_pox_3_contract_call( + &charlie, + charlie_nonce, + "stack-aggregation-increase", + vec![ + charlie_pox_addr.clone(), + Value::UInt(cur_reward_cycle as u128), + Value::UInt(0), + ], + )); + let charlie_err_stacking_no_principal = charlie_nonce; + charlie_nonce += 1; + + // charlie tries to lock alice's additional tokens to bob's PoX address (should fail with + // ERR_STACKING_NO_SUCH_PRINCIPAL) + txs_to_submit.push(make_pox_3_contract_call( + &charlie, + charlie_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt(cur_reward_cycle as u128), + Value::UInt(0), + ], + )); + let charlie_err_stacking_no_principal_2 = charlie_nonce; + charlie_nonce += 1; + + // bob tries to retcon a reward cycle lockup (should fail with ERR_STACKING_INVALID_LOCK_PERIOD) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt(cur_reward_cycle as u128), + Value::UInt(0), + ], + )); + let bob_err_stacking_invalid_lock_period = bob_nonce; + bob_nonce += 1; + + // bob tries to lock tokens in a reward cycle that has no tokens stacked in it yet (should + // fail with ERR_DELEGATION_NO_REWARD_CYCLE) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 13) as u128), + Value::UInt(0), + ], + )); + let bob_err_delegation_no_reward_cycle = bob_nonce; + bob_nonce += 1; + + // bob tries to lock tokens to a non-existant PoX reward address (should fail with + // ERR_DELEGATION_NO_REWARD_SLOT) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + Value::UInt(2), + ], + )); + let bob_err_delegation_no_reward_slot = bob_nonce; + bob_nonce += 1; + + // bob tries to lock tokens to the wrong PoX address (should fail with ERR_DELEGATION_WRONG_REWARD_SLOT). + // slot 0 belongs to dan. + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + Value::UInt(0), + ], + )); + let bob_err_delegation_wrong_reward_slot = bob_nonce; + bob_nonce += 1; + + // bob locks tokens for Alice (bob's previous stack-aggregation-commit put his PoX address in + // slot 1 for this reward cycle) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + Value::UInt(1), + ], + )); + bob_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + alice_delegation_amount + ); + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut charlie_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == charlie_address { + charlie_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len(), 1); + assert_eq!(bob_txs.len(), 9); + assert_eq!(charlie_txs.len(), 2); + + // bob's stack-aggregation-commit-indexed succeeded and returned the right index + assert_eq!( + &bob_txs[&bob_stack_aggregation_commit_indexed] + .result + .to_string(), + "(ok u1)" + ); + + // check bob's errors + assert_eq!( + &bob_txs[&bob_err_stacking_no_such_principal] + .result + .to_string(), + "(err 4)" + ); + assert_eq!( + &bob_txs[&bob_err_stacking_invalid_lock_period] + .result + .to_string(), + "(err 2)" + ); + assert_eq!( + &bob_txs[&bob_err_delegation_no_reward_cycle] + .result + .to_string(), + "(err 4)" + ); + assert_eq!( + &bob_txs[&bob_err_delegation_no_reward_slot] + .result + .to_string(), + "(err 28)" + ); + assert_eq!( + &bob_txs[&bob_err_delegation_wrong_reward_slot] + .result + .to_string(), + "(err 29)" + ); + + // check charlie's errors + assert_eq!( + &charlie_txs[&charlie_err_stacking_no_principal] + .result + .to_string(), + "(err 4)" + ); + assert_eq!( + &charlie_txs[&charlie_err_stacking_no_principal_2] + .result + .to_string(), + "(err 4)" + ); +} + +/// Verify that delegate-stx validates the PoX addr, if given +#[test] +fn pox_3_delegate_stx_addr_validation() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + "pox_3::delegate_stx_addr", + 7100, + Some(epochs.clone()), + None, + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + let mut coinbase_nonce = 0; + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let charlie = keys.pop().unwrap(); + let danielle = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let charlie_address = key_to_stacks_addr(&charlie); + let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; + + // first tenure is empty + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + // Roll to Epoch-2.4 and perform the delegate-stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // alice delegates to charlie in v3 to a valid address + let alice_delegation = make_pox_3_contract_call( + &alice, + 0, + "delegate-stx", + vec![ + Value::UInt(LOCKUP_AMT), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::some(make_pox_addr( + AddressHashMode::SerializeP2PKH, + alice_address.bytes.clone(), + )) + .unwrap(), + ], + ); + + let bob_bad_pox_addr = Value::Tuple( + TupleData::from_data(vec![ + ( + ClarityName::try_from("version".to_owned()).unwrap(), + Value::buff_from_byte(0xff), + ), + ( + ClarityName::try_from("hashbytes".to_owned()).unwrap(), + Value::Sequence(SequenceData::Buffer(BuffData { + data: bob_address.bytes.as_bytes().to_vec(), + })), + ), + ]) + .unwrap(), + ); + + // bob delegates to charlie in v3 with an invalid address + let bob_delegation = make_pox_3_contract_call( + &bob, + 0, + "delegate-stx", + vec![ + Value::UInt(LOCKUP_AMT), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::some(bob_bad_pox_addr).unwrap(), + ], + ); + + peer.tenure_with_txs(&[alice_delegation, bob_delegation], &mut coinbase_nonce); + + let result = eval_at_tip( + &mut peer, + "pox-3", + &format!( + " + {{ + ;; should be (some $charlie_address) + get-delegation-info-alice: (get-delegation-info '{}), + ;; should be none + get-delegation-info-bob: (get-delegation-info '{}), + }}", + &alice_address, &bob_address, + ), + ); + + eprintln!("{}", &result); + let data = result.expect_tuple().data_map; + + // bob had an invalid PoX address + let bob_delegation_info = data + .get("get-delegation-info-bob") + .cloned() + .unwrap() + .expect_optional(); + assert!(bob_delegation_info.is_none()); + + // alice was valid + let alice_delegation_info = data + .get("get-delegation-info-alice") + .cloned() + .unwrap() + .expect_optional() + .unwrap() + .expect_tuple() + .data_map; + let alice_delegation_addr = alice_delegation_info + .get("delegated-to") + .cloned() + .unwrap() + .expect_principal(); + let alice_delegation_amt = alice_delegation_info + .get("amount-ustx") + .cloned() + .unwrap() + .expect_u128(); + let alice_pox_addr_opt = alice_delegation_info + .get("pox-addr") + .cloned() + .unwrap() + .expect_optional(); + assert_eq!( + alice_delegation_addr, + charlie_address.to_account_principal() + ); + assert_eq!(alice_delegation_amt, LOCKUP_AMT as u128); + assert!(alice_pox_addr_opt.is_some()); + + let alice_pox_addr = alice_pox_addr_opt.unwrap(); + + assert_eq!( + alice_pox_addr, + make_pox_addr(AddressHashMode::SerializeP2PKH, alice_address.bytes.clone(),) + ); +} From f568ce2303de6adb485be92033e4260fb57211b6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 5 May 2023 16:11:47 -0500 Subject: [PATCH 129/158] update tests with new default regtest epochs --- testnet/stacks-node/src/tests/epoch_22.rs | 32 +++++++---------------- testnet/stacks-node/src/tests/epoch_23.rs | 21 ++++----------- 2 files changed, 14 insertions(+), 39 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index d0e89244b9..2a42c7f083 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -7,7 +7,6 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::signal_mining_blocked; use stacks::chainstate::stacks::miner::signal_mining_ready; -use stacks::core::PEER_VERSION_EPOCH_2_2; use stacks::core::STACKS_EPOCH_MAX; use stacks::types::chainstate::StacksAddress; use stacks::types::PrivateKey; @@ -163,13 +162,9 @@ fn disable_pox() { epochs[2].end_height = epoch_2_1; epochs[3].start_height = epoch_2_1; epochs[3].end_height = epoch_2_2; - epochs.push(StacksEpoch { - epoch_id: StacksEpochId::Epoch22, - start_height: epoch_2_2, - end_height: STACKS_EPOCH_MAX, - block_limit: epochs[3].block_limit.clone(), - network_epoch: PEER_VERSION_EPOCH_2_2, - }); + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = STACKS_EPOCH_MAX; + epochs.truncate(5); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -697,13 +692,9 @@ fn pox_2_unlock_all() { epochs[2].end_height = epoch_2_1; epochs[3].start_height = epoch_2_1; epochs[3].end_height = epoch_2_2; - epochs.push(StacksEpoch { - epoch_id: StacksEpochId::Epoch22, - start_height: epoch_2_2, - end_height: STACKS_EPOCH_MAX, - block_limit: epochs[3].block_limit.clone(), - network_epoch: PEER_VERSION_EPOCH_2_2, - }); + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = STACKS_EPOCH_MAX; + epochs.truncate(5); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -1298,14 +1289,9 @@ fn test_pox_reorg_one_flap() { epochs[2].end_height = 151; epochs[3].start_height = 151; epochs[3].end_height = epoch_2_2; - epochs.push(StacksEpoch { - epoch_id: StacksEpochId::Epoch22, - start_height: epoch_2_2, - end_height: STACKS_EPOCH_MAX, - block_limit: epochs[3].block_limit.clone(), - network_epoch: PEER_VERSION_EPOCH_2_2, - }); - + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = STACKS_EPOCH_MAX; + epochs.truncate(5); conf_template.burnchain.epochs = Some(epochs); let privks: Vec<_> = (0..5) diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 130b094b05..58313947d8 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -18,8 +18,6 @@ use std::env; use std::thread; use stacks::burnchains::Burnchain; -use stacks::core::PEER_VERSION_EPOCH_2_2; -use stacks::core::PEER_VERSION_EPOCH_2_3; use stacks::core::STACKS_EPOCH_MAX; use stacks::vm::types::QualifiedContractIdentifier; @@ -124,20 +122,11 @@ fn trait_invocation_behavior() { epochs[2].end_height = epoch_2_1; epochs[3].start_height = epoch_2_1; epochs[3].end_height = epoch_2_2; - epochs.push(StacksEpoch { - epoch_id: StacksEpochId::Epoch22, - start_height: epoch_2_2, - end_height: epoch_2_3, - block_limit: epochs[3].block_limit.clone(), - network_epoch: PEER_VERSION_EPOCH_2_2, - }); - epochs.push(StacksEpoch { - epoch_id: StacksEpochId::Epoch23, - start_height: epoch_2_3, - end_height: STACKS_EPOCH_MAX, - block_limit: epochs[3].block_limit.clone(), - network_epoch: PEER_VERSION_EPOCH_2_3, - }); + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = epoch_2_3; + epochs[5].start_height = epoch_2_3; + epochs[5].end_height = STACKS_EPOCH_MAX; + epochs.truncate(6); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); From fe85271002912f770e50b36a06c10f89d909f509 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Wed, 3 May 2023 21:04:30 +0200 Subject: [PATCH 130/158] feat: add `pox-3` to contract list in `/v2/pox` RPC response --- src/net/rpc.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/net/rpc.rs b/src/net/rpc.rs index 10543820fa..51bec323fb 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -117,7 +117,7 @@ use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Hash160; use stacks_common::util::hash::{hex_bytes, to_hex}; -use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME}; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; use crate::chainstate::stacks::StacksBlockHeader; use crate::clarity_vm::database::marf::MarfedKV; use stacks_common::types::chainstate::BlockHeaderHash; @@ -317,6 +317,12 @@ impl RPCPoxInfoData { ))? + 1; + let pox_3_first_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .ok_or(net_error::ChainstateError( + "PoX-3 first reward cycle begins before first burn block height".to_string(), + ))?; + let data = chainstate .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { clarity_tx.with_readonly_clarity_env( @@ -520,6 +526,14 @@ impl RPCPoxInfoData { as u64, first_reward_cycle_id: pox_2_first_cycle, }, + RPCPoxContractVersion { + contract_id: boot_code_id(POX_3_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain + .pox_constants + .pox_3_activation_height + as u64, + first_reward_cycle_id: pox_3_first_cycle, + }, ], }) } From 22baac43b18ffbc6b2a40e31af300708aaa1e2d2 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 4 May 2023 17:18:36 +0200 Subject: [PATCH 131/158] chore: PR feedback (pox_3_first_cycle off-by-one) --- src/net/rpc.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/net/rpc.rs b/src/net/rpc.rs index 51bec323fb..7f967f1b71 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -308,7 +308,8 @@ impl RPCPoxInfoData { .block_height_to_reward_cycle(burnchain.first_block_height as u64) .ok_or(net_error::ChainstateError( "PoX-1 first reward cycle begins before first burn block height".to_string(), - ))?; + ))? + + 1; let pox_2_first_cycle = burnchain .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) From 7a8961de87f2ce0bcd7641ead9a6a4a8d67fe5f1 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 4 May 2023 17:37:52 +0200 Subject: [PATCH 132/158] Revert "chore: PR feedback (pox_3_first_cycle off-by-one)" This reverts commit 5e6c36ed8076ae5ecf071e2c9a087d18fe8ea517. --- src/net/rpc.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/net/rpc.rs b/src/net/rpc.rs index 7f967f1b71..51bec323fb 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -308,8 +308,7 @@ impl RPCPoxInfoData { .block_height_to_reward_cycle(burnchain.first_block_height as u64) .ok_or(net_error::ChainstateError( "PoX-1 first reward cycle begins before first burn block height".to_string(), - ))? - + 1; + ))?; let pox_2_first_cycle = burnchain .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) From c5a1be2978b47840dd9982c4df68d81d47754416 Mon Sep 17 00:00:00 2001 From: Matthew Little Date: Thu, 4 May 2023 17:38:37 +0200 Subject: [PATCH 133/158] chore: PR feedback (pox_3_first_cycle off-by-one) --- src/net/rpc.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/net/rpc.rs b/src/net/rpc.rs index 51bec323fb..628921e51a 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -321,7 +321,8 @@ impl RPCPoxInfoData { .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) .ok_or(net_error::ChainstateError( "PoX-3 first reward cycle begins before first burn block height".to_string(), - ))?; + ))? + + 1; let data = chainstate .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { From cd66a949ca730ffcd7224d82be86ddcb74e120f1 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Thu, 4 May 2023 17:08:24 -0400 Subject: [PATCH 134/158] added epoch2.4 test disable_pox_v2 is failing only on final assertion, not crashing wip, trying to get epoch 2.4 to start right rename test, revert epoch_22 fix_to_pox_contract passes auto unlock test passing added more asserts for auto unlock test --- src/chainstate/stacks/boot/mod.rs | 15 +- src/chainstate/stacks/boot/pox-3.clar | 3 +- testnet/stacks-node/src/tests/epoch_22.rs | 54 +- testnet/stacks-node/src/tests/epoch_24.rs | 1340 +++++++++++++++++ testnet/stacks-node/src/tests/mod.rs | 1 + .../src/tests/neon_integrations.rs | 2 +- 6 files changed, 1384 insertions(+), 31 deletions(-) create mode 100644 testnet/stacks-node/src/tests/epoch_24.rs diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index 4219c7f093..351dbbc365 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -35,6 +35,7 @@ use crate::util_lib::strings::VecDisplay; use clarity::codec::StacksMessageCodec; use clarity::types::chainstate::BlockHeaderHash; use clarity::util::hash::to_hex; +use clarity::vm::analysis::CheckErrors; use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; use clarity::vm::contexts::ContractContext; @@ -44,6 +45,7 @@ use clarity::vm::costs::{ use clarity::vm::database::ClarityDatabase; use clarity::vm::database::{NULL_BURN_STATE_DB, NULL_HEADER_DB}; use clarity::vm::errors::InterpreterError; +use clarity::vm::errors::Error as VmError; use clarity::vm::events::StacksTransactionEvent; use clarity::vm::representations::ClarityName; use clarity::vm::representations::ContractName; @@ -145,7 +147,7 @@ pub fn make_contract_id(addr: &StacksAddress, name: &str) -> QualifiedContractId ) } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct RawRewardSetEntry { pub reward_address: PoxAddress, pub amount_stacked: u128, @@ -688,6 +690,7 @@ impl StacksChainState { .iter() .fold(0, |agg, entry| agg + entry.amount_stacked); + info!("CALCULATING REWARD SET: checking total ustx"); assert!( participation <= liquid_ustx, "CORRUPTION: More stacking participation than liquid STX" @@ -988,7 +991,7 @@ impl StacksChainState { .pox_constants .active_pox_contract(reward_cycle_start_height); - match pox_contract_name { + let result = match pox_contract_name { x if x == POX_1_NAME => self.get_reward_addresses_pox_1(sortdb, block_id, reward_cycle), x if x == POX_2_NAME => self.get_reward_addresses_pox_2(sortdb, block_id, reward_cycle), x if x == POX_3_NAME => self.get_reward_addresses_pox_3(sortdb, block_id, reward_cycle), @@ -996,6 +999,14 @@ impl StacksChainState { panic!("Blockchain implementation failure: PoX contract name '{}' is unknown. Chainstate is corrupted.", unknown_contract); } + }; + + match result { + Err(Error::ClarityError(ClarityError::Interpreter(VmError::Unchecked(CheckErrors::NoSuchContract(_))))) => { + warn!("Reward cycle attempted to calculate rewards before the PoX contract was instantiated"); + return Ok(vec![]) + } + x => x, } } } diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index d1b2d80651..c2191fd1d2 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -19,6 +19,7 @@ (define-constant ERR_DELEGATION_EXPIRES_DURING_LOCK 21) (define-constant ERR_DELEGATION_TOO_MUCH_LOCKED 22) (define-constant ERR_DELEGATION_POX_ADDR_REQUIRED 23) + (define-constant ERR_INVALID_START_BURN_HEIGHT 24) (define-constant ERR_NOT_CURRENT_STACKER 25) (define-constant ERR_STACK_EXTEND_NOT_LOCKED 26) @@ -1313,4 +1314,4 @@ (get amount rejected) u0 ) -) +) \ No newline at end of file diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 2a42c7f083..ab2122844b 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -105,7 +105,7 @@ fn disable_pox() { let pox_pubkey_1 = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_1 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_1) .to_bytes() @@ -115,7 +115,7 @@ fn disable_pox() { let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_2 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_2) .to_bytes() @@ -125,7 +125,7 @@ fn disable_pox() { let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_3 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_3) .to_bytes() @@ -231,15 +231,15 @@ fn disable_pox() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let pox_addr_tuple_3 = execute( &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, @@ -287,8 +287,8 @@ fn disable_pox() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, 1, @@ -398,7 +398,7 @@ fn disable_pox() { &conf.get_chainstate_path_str(), None, ) - .unwrap(); + .unwrap(); let sortdb = btc_regtest_controller.sortdb_mut(); let mut reward_cycle_pox_addrs = HashMap::new(); @@ -566,7 +566,7 @@ fn disable_pox() { let result = Value::try_deserialize_hex_untyped( tx.get("raw_result").unwrap().as_str().unwrap(), ) - .unwrap(); + .unwrap(); assert_eq!(result.to_string(), "(err none)"); abort_tested = true; } @@ -635,7 +635,7 @@ fn pox_2_unlock_all() { let pox_pubkey_1 = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_1 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_1) .to_bytes() @@ -645,7 +645,7 @@ fn pox_2_unlock_all() { let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_2 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_2) .to_bytes() @@ -655,7 +655,7 @@ fn pox_2_unlock_all() { let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_3 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_3) .to_bytes() @@ -761,15 +761,15 @@ fn pox_2_unlock_all() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let pox_addr_tuple_3 = execute( &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, @@ -817,8 +817,8 @@ fn pox_2_unlock_all() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_publish( &spender_sk, @@ -1062,7 +1062,7 @@ fn pox_2_unlock_all() { &conf.get_chainstate_path_str(), None, ) - .unwrap(); + .unwrap(); let sortdb = btc_regtest_controller.sortdb_mut(); let mut reward_cycle_pox_addrs = HashMap::new(); @@ -1218,7 +1218,7 @@ fn pox_2_unlock_all() { let result = Value::try_deserialize_hex_untyped( tx.get("raw_result").unwrap().as_str().unwrap(), ) - .unwrap(); + .unwrap(); assert_eq!(result.to_string(), format!("(ok u{})", epoch_2_2 + 1)); unlock_ht_22_tested = true; } @@ -1234,7 +1234,7 @@ fn pox_2_unlock_all() { let result = Value::try_deserialize_hex_untyped( tx.get("raw_result").unwrap().as_str().unwrap(), ) - .unwrap(); + .unwrap(); assert_eq!(result.to_string(), format!("(ok u{})", 230 + 60)); unlock_ht_21_tested = true; } @@ -1498,7 +1498,7 @@ fn test_pox_reorg_one_flap() { let pox_pubkey = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey) .to_bytes() @@ -1525,8 +1525,8 @@ fn test_pox_reorg_one_flap() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), ClarityVersion::Clarity1, ) - .unwrap() - .unwrap(), + .unwrap() + .unwrap(), Value::UInt((sort_height + 1) as u128), Value::UInt(12), ], @@ -1677,4 +1677,4 @@ fn test_pox_reorg_one_flap() { let tip_info = get_chain_info(&c); info!("Final tip for miner {}: {:?}", i, &tip_info); } -} +} \ No newline at end of file diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs new file mode 100644 index 0000000000..373df1ab82 --- /dev/null +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -0,0 +1,1340 @@ +// Copyright (C) 2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{env, thread}; +use std::collections::HashMap; +use clarity::boot_util::boot_code_id; +use clarity::vm::{ClarityVersion, Value}; +use clarity::vm::types::PrincipalData; +use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::boot::RawRewardSetEntry; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; +use stacks_common::util::hash::{bytes_to_hex, Hash160, hex_bytes}; +use stacks_common::util::secp256k1::Secp256k1PublicKey; +use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::tests::neon_integrations::{get_account, get_chain_info, get_pox_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop}; +use crate::tests::{make_contract_call, to_addr}; + +use stacks::core; +use stacks::core::{PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, StacksEpoch}; +use stacks_common::consts::STACKS_EPOCH_MAX; +use stacks_common::types::{Address, StacksEpochId}; +use crate::{BitcoinRegtestController, BurnchainController, neon}; +use crate::tests::bitcoin_regtest::BitcoinCoreController; +use stacks::clarity_cli::vm_execute as execute; +use stacks_common::address::AddressHashMode; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::util::sleep_ms; + +#[cfg(test)] +pub fn get_reward_set_entries_at_block( + state: &mut StacksChainState, + burnchain: &Burnchain, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + burn_block_height: u64, +) -> Result, Error> { + state + .get_reward_addresses(burnchain, sortdb, burn_block_height, block_id) + .and_then(|mut addrs| { + addrs.sort_by_key(|k| k.reward_address.bytes()); + Ok(addrs) + }) +} + +#[test] +#[ignore] +/// Verify the buggy stacks-increase behavior that was possible in PoX-2, and does not crash the +/// node in Epoch 2.4 +/// +/// Verify that transition to Epoch 2.4 occurs smoothly even if miners do not mine in the +/// same block as the PoX-3 activation height. +/// +/// Verify the PoX-3 payouts get made to the expected recipients. +fn fix_to_pox_contract() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 230; + let v1_unlock_height = 231; + let epoch_2_2 = 255; // two blocks before next prepare phase. + let epoch_2_3 = 265; + let epoch_2_4 = 280; + let pox_3_activation_height = epoch_2_4; + + let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + + let spender_sk = StacksPrivateKey::new(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let spender_2_sk = StacksPrivateKey::new(); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + + let mut initial_balances = vec![]; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: stacked + increase_by + 100_000, + }); + + initial_balances.push(InitialBalance { + address: spender_2_addr.clone(), + amount: stacked + 100_000, + }); + + let pox_pubkey_1 = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash_1 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_1) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_2 = Secp256k1PublicKey::from_hex( + "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", + ) + .unwrap(); + let pox_pubkey_hash_2 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_2) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_3 = Secp256k1PublicKey::from_hex( + "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", + ) + .unwrap(); + let pox_pubkey_hash_3 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_3) + .to_bytes() + .to_vec(), + ); + + let (mut conf, _) = neon_integration_test_conf(); + + // we'll manually post a forked stream to the node + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = epoch_2_3; + epochs[5].start_height = epoch_2_3; + epochs[5].end_height = epoch_2_4; + epochs[6].start_height = epoch_2_4; + epochs[6].end_height = STACKS_EPOCH_MAX; + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + pox_3_activation_height as u32, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // stack right away + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_1 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let pox_addr_tuple_3 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let tx = make_contract_call( + &spender_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_1.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.05 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_1); + assert!(sort_height > v1_unlock_height); + + // *now* advance to 2.1 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.1"); + + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_2 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + let tx = make_contract_call( + &spender_sk, + 1, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + sleep_ms(5_000); + submit_tx(&http_origin, &tx); + + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..20 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // invoke stack-increase again, in Epoch-2.2, it should + // runtime abort + let aborted_increase_nonce_2_2 = 2; + let tx = make_contract_call( + &spender_sk, + aborted_increase_nonce_2_2, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-increase", + &[Value::UInt(5000)], + ); + + info!("Submit 2.2 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + + // transition to epoch 2.3 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_3 + 1 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // invoke stack-increase again, in Epoch-2.3, it should + // runtime abort + let aborted_increase_nonce_2_3 = 3; + let tx = make_contract_call( + &spender_sk, + aborted_increase_nonce_2_3, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-increase", + &[Value::UInt(5000)], + ); + + info!("Submit 2.3 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // transition to 2 blocks before epoch 2.4 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_4 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_4); + + // *now* advance to 2.4 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.4"); + + // now, try stacking in pox-3 + let sort_height = channel.get_sortitions_processed(); + let tx = make_contract_call( + &spender_sk, + 4, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.4 stacking tx to {:?}", &http_origin); + sleep_ms(5_000); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_2_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_3.clone(), + Value::UInt(sort_height as u128), + Value::UInt(10), + ], + ); + + info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..5 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // invoke stack-increase + let tx = make_contract_call( + &spender_sk, + 5, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-increase", + &[Value::UInt(increase_by.into())], + ); + + info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + for _i in 0..19 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + let tip_info = get_chain_info(&conf); + let tip = StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + let mut reward_cycle_pox_addrs = HashMap::new(); + + info!("Last tip height = {}", tip_info.burn_block_height); + + for height in 211..tip_info.burn_block_height { + let reward_cycle = pox_constants + .block_height_to_reward_cycle(burnchain_config.first_block_height, height) + .unwrap(); + + if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { + reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); + } + + let iconn = sortdb.index_conn(); + let pox_addrs = chainstate + .clarity_eval_read_only( + &iconn, + &tip, + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", height), + ) + .expect_optional() + .unwrap() + .expect_tuple() + .get_owned("addrs") + .unwrap() + .expect_list(); + + debug!("Test burnchain height {}", height); + if !burnchain_config.is_in_prepare_phase(height) { + if pox_addrs.len() > 0 { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_0, 1); + } + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_1, 1); + } + } + } + } + + let reward_cycle_min = *reward_cycle_pox_addrs.keys().min().unwrap(); + let reward_cycle_max = *reward_cycle_pox_addrs.keys().max().unwrap(); + + let pox_addr_1 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_1).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_2 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_2).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_3 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_3).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let burn_pox_addr = PoxAddress::Standard( + StacksAddress::new( + 26, + Hash160::from_hex("0000000000000000000000000000000000000000").unwrap(), + ), + Some(AddressHashMode::SerializeP2PKH), + ); + + let expected_slots = HashMap::from([ + ( + 21u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 22u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 23u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + // cycle 24 is the first 2.1, it should have pox_2 and 1 burn slot + ( + 24, + HashMap::from([ + (pox_addr_2.clone(), 13u64), + (burn_pox_addr.clone(), 1), + ]), + ), + ( + 25, + HashMap::from([ + (pox_addr_2.clone(), 13u64), + (burn_pox_addr.clone(), 1), + ]), + ), + // Epoch 2.2 has started, so the reward set should be all burns. + (26, HashMap::from([(burn_pox_addr.clone(), 14)])), + // Epoch 2.3 has started, so the reward set should be all burns. + (27, HashMap::from([(burn_pox_addr.clone(), 14)])), + (28, HashMap::from([(burn_pox_addr.clone(), 14)])), + // cycle 29 is the first 2.4 cycle, it should have pox_2 and pox_3 with equal + // slots (because increase hasn't gone into effect yet) + ( + 29, + HashMap::from([ + (pox_addr_2.clone(), 6u64), + (pox_addr_3.clone(), 6), + (burn_pox_addr.clone(), 2), + ]), + ), + // stack-increase has been invoked, but this should not skew reward set heavily + // because pox-3 fixes the total-locked bug + ( + 30, + HashMap::from([ + (pox_addr_2.clone(), 7u64), + (pox_addr_3.clone(), 6), + (burn_pox_addr.clone(), 1), + ]), + ), + ]); + + for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { + let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + for (pox_addr, slots) in cycle_counts.iter() { + assert_eq!( + *slots, + expected_slots[&reward_cycle][&pox_addr], + "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", + &pox_addr, + reward_cycle, + ); + info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); + } + } + + let mut abort_tested_2_2 = false; + let mut abort_tested_2_3 = false; + let blocks = test_observer::get_blocks(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); + if &tx_sender == &spender_addr + && (parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 || + parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3) + { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + _ => panic!("Expected aborted_increase_nonce to be a contract call"), + }; + assert_eq!(contract_call.contract_name.as_str(), "pox-2"); + assert_eq!(contract_call.function_name.as_str(), "stack-increase"); + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + assert_eq!(result.to_string(), "(err none)"); + if parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 { + abort_tested_2_2 = true; + } else if parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3 { + abort_tested_2_3 = true; + } else { + panic!("Unexpected nonce for the aborted stack-increase transaction.") + } + } + } + } + + assert!(abort_tested_2_2, "The stack-increase transaction must have been aborted in Epoch 2.2, \ + and it must have been tested in the tx receipts"); + assert!(abort_tested_2_3, "The stack-increase transaction must have been aborted in Epoch 2.3, \ + and it must have been tested in the tx receipts"); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + + +#[test] +#[ignore] +/// Verify that stackers that don't meet the stacking threshold get auto-unlocked in PoX-3. +fn verify_auto_unlock_behavior() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 230; + let v1_unlock_height = 231; + let epoch_2_2 = 255; // two blocks before next prepare phase. + let epoch_2_3 = 265; + let epoch_2_4 = 280; + let pox_3_activation_height = epoch_2_4; + + let first_stacked_init = 200_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let first_stacked_incr = 40_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let small_stacked = 17_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + + let spender_sk = StacksPrivateKey::new(); + let spender_stx_addr: StacksAddress = to_addr(&spender_sk); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let spender_2_sk = StacksPrivateKey::new(); + let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + + let mut initial_balances = vec![]; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: first_stacked_init + first_stacked_incr + 100_000, + }); + + initial_balances.push(InitialBalance { + address: spender_2_addr.clone(), + amount: small_stacked + 100_000, + }); + + let pox_pubkey_1 = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash_1 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_1) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_2 = Secp256k1PublicKey::from_hex( + "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", + ) + .unwrap(); + let pox_pubkey_hash_2 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_2) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_3 = Secp256k1PublicKey::from_hex( + "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", + ) + .unwrap(); + let pox_pubkey_hash_3 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_3) + .to_bytes() + .to_vec(), + ); + + let (mut conf, _) = neon_integration_test_conf(); + + // we'll manually post a forked stream to the node + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = epoch_2_3; + epochs[5].start_height = epoch_2_3; + epochs[5].end_height = epoch_2_4; + epochs[6].start_height = epoch_2_4; + epochs[6].end_height = STACKS_EPOCH_MAX; + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + pox_3_activation_height as u32, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain_config + .block_height_to_reward_cycle(burnchain_config.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // stack right away + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_1 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let pox_addr_tuple_3 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let tx = make_contract_call( + &spender_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(first_stacked_init.into()), + pox_addr_tuple_1.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.05 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_1); + assert!(sort_height > v1_unlock_height); + + // *now* advance to 2.1 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.1"); + + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_2 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + let tx = make_contract_call( + &spender_sk, + 1, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(first_stacked_init.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + sleep_ms(5_000); + submit_tx(&http_origin, &tx); + + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..20 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + info!("Successfully transitioned to Epoch 2.2"); + + // transition to epoch 2.3 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_3 + 1 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let pox_info = get_pox_info(&http_origin); + info!("curr height: {}, curr cycle id: {}, pox active: {}", + tip_info.burn_block_height, pox_info.current_cycle.id, pox_info.current_cycle.is_pox_active); + } + + info!("Successfully transitioned to Epoch 2.3"); + + // transition to 2 blocks before epoch 2.4 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_4 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let pox_info = get_pox_info(&http_origin); + info!("curr height: {}, curr cycle id: {}, pox active: {}", + tip_info.burn_block_height, pox_info.current_cycle.id, pox_info.current_cycle.is_pox_active); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_4); + + // *now* advance to 2.4 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.4"); + + // now, try stacking in pox-3 + let sort_height = channel.get_sortitions_processed(); + let tx = make_contract_call( + &spender_sk, + 2, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-stx", + &[ + Value::UInt(first_stacked_init.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.4 stacking tx to {:?}", &http_origin); + sleep_ms(5_000); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_2_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-stx", + &[ + Value::UInt(small_stacked.into()), + pox_addr_tuple_3.clone(), + Value::UInt(sort_height as u128), + Value::UInt(10), + ], + ); + + info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..5 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // Check the locked balance of addr 1 + let account = get_account(&http_origin, &spender_stx_addr); + assert_eq!(account.locked, first_stacked_init); + + // Check the locked balance of addr 2 + let account = get_account(&http_origin, &spender_2_stx_addr); + assert_eq!(account.locked, small_stacked); + + // check that the "raw" reward sets for all cycles just contains entries for both addrs + // at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + let tip_info = get_chain_info(&conf); + let tip_block_id = StacksBlockId::new( + &tip_info.stacks_tip_consensus_hash, + &tip_info.stacks_tip + ); + + let reward_set_entries = get_reward_set_entries_at_block( + &mut chainstate, + &burnchain_config, + sortdb, + &tip_block_id, + tip_info.burn_block_height + ) + .unwrap(); + + assert_eq!(reward_set_entries.len(), 2); + info!("Reward set entries: pre stacks increase: {:?}", reward_set_entries); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + spender_stx_addr.bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, 200_000_000_000_000_000); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + spender_2_stx_addr.bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, 17_000_000_000_000_000); + } + + // invoke stack-increase + let tx = make_contract_call( + &spender_sk, + 3, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-increase", + &[Value::UInt(first_stacked_incr.into())], + ); + + info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + for _i in 0..19 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + let pox_info = get_pox_info(&http_origin); + info!("curr height: {}, curr cycle id: {}, pox active: {}", + tip_info.burn_block_height, pox_info.current_cycle.id, pox_info.current_cycle.is_pox_active); + } + + // Check that the locked balance of addr 1 has not changed + let account = get_account(&http_origin, &spender_stx_addr); + assert_eq!(account.locked, first_stacked_init + first_stacked_incr); + + // Check that addr 2 has no locked tokens at this height + let account = get_account(&http_origin, &spender_2_stx_addr); + assert_eq!(account.locked, 0); + + // check that the "raw" reward sets for all cycles just contains entries for the first + // address at the cycle start, since spender_addr_2 was auto-unlocked. + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + let tip_info = get_chain_info(&conf); + let tip_block_id = StacksBlockId::new( + &tip_info.stacks_tip_consensus_hash, + &tip_info.stacks_tip + ); + + let reward_set_entries = get_reward_set_entries_at_block( + &mut chainstate, + &burnchain_config, + sortdb, + &tip_block_id, + tip_info.burn_block_height + ) + .unwrap(); + + assert_eq!(reward_set_entries.len(), 1); + info!("Reward set entries: post stacks increase: {:?}", reward_set_entries); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + spender_stx_addr.bytes.0.to_vec() + ); + } + + let tip_info = get_chain_info(&conf); + let tip = StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + let mut reward_cycle_pox_addrs = HashMap::new(); + + info!("Last tip height = {}", tip_info.burn_block_height); + + for height in 211..tip_info.burn_block_height { + let reward_cycle = pox_constants + .block_height_to_reward_cycle(burnchain_config.first_block_height, height) + .unwrap(); + + if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { + reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); + } + + let iconn = sortdb.index_conn(); + let pox_addrs = chainstate + .clarity_eval_read_only( + &iconn, + &tip, + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", height), + ) + .expect_optional() + .unwrap() + .expect_tuple() + .get_owned("addrs") + .unwrap() + .expect_list(); + + debug!("Test burnchain height {}", height); + if !burnchain_config.is_in_prepare_phase(height) { + if pox_addrs.len() > 0 { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_0, 1); + } + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_1, 1); + } + } + } + } + + let reward_cycle_min = *reward_cycle_pox_addrs.keys().min().unwrap(); + let reward_cycle_max = *reward_cycle_pox_addrs.keys().max().unwrap(); + + let pox_addr_1 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_1).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_2 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_2).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_3 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_3).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let burn_pox_addr = PoxAddress::Standard( + StacksAddress::new( + 26, + Hash160::from_hex("0000000000000000000000000000000000000000").unwrap(), + ), + Some(AddressHashMode::SerializeP2PKH), + ); + + let expected_slots = HashMap::from([ + ( + 21u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 22u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 23u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + // cycle 24 is the first 2.1, it should have pox_2 and 1 burn slot + ( + 24, + HashMap::from([ + (pox_addr_2.clone(), 13u64), + (burn_pox_addr.clone(), 1), + ]), + ), + ( + 25, + HashMap::from([ + (pox_addr_2.clone(), 13u64), + (burn_pox_addr.clone(), 1), + ]), + ), + // Epoch 2.2 has started, so the reward set should be all burns. + (26, HashMap::from([(burn_pox_addr.clone(), 14)])), + // Epoch 2.3 has started, so the reward set should be all burns. + (27, HashMap::from([(burn_pox_addr.clone(), 14)])), + (28, HashMap::from([(burn_pox_addr.clone(), 14)])), + // cycle 29 is the first 2.4 cycle, it should have pox_2 and pox_3 with equal + // slots (because increase hasn't gone into effect yet). + ( + 29, + HashMap::from([ + (pox_addr_2.clone(), 12u64), + (pox_addr_3.clone(), 1), + (burn_pox_addr.clone(), 1), + ]), + ), + // stack-increase has been invoked, which causes spender_addr_2 to be below the stacking + // minimum, and thus they have zero reward addresses in reward cycle 30. + ( + 30, + HashMap::from([ + (pox_addr_2.clone(), 13u64), + (burn_pox_addr.clone(), 1), + ]), + ), + ]); + + for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { + let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + for (pox_addr, slots) in cycle_counts.iter() { + assert_eq!( + *slots, + expected_slots[&reward_cycle][&pox_addr], + "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", + &pox_addr, + reward_cycle, + ); + info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); + } + } + + test_observer::clear(); + channel.stop_chains_coordinator(); +} \ No newline at end of file diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 8eca8f21fc..39bddbc61a 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -47,6 +47,7 @@ mod epoch_23; mod integrations; mod mempool; pub mod neon_integrations; +mod epoch_24; // $ cat /tmp/out.clar pub const STORE_CONTRACT: &str = r#"(define-map store { key: (string-ascii 32) } { value: (string-ascii 32) }) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 4c0513fc81..202c55f8a2 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -594,7 +594,7 @@ pub fn submit_tx(http_origin: &str, tx: &Vec) -> String { ); return res; } else { - eprintln!("{}", res.text().unwrap()); + eprintln!("Submit tx error: {}", res.text().unwrap()); panic!(""); } } From 1bdc6a455cab87714e5a228533ee5c265d33001d Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Thu, 11 May 2023 19:21:48 -0400 Subject: [PATCH 135/158] fixed test, added to gh workflow --- .github/workflows/bitcoin-tests.yml | 2 + src/chainstate/stacks/boot/mod.rs | 11 +- src/chainstate/stacks/boot/pox-3.clar | 3 +- testnet/stacks-node/src/tests/epoch_22.rs | 54 ++--- testnet/stacks-node/src/tests/epoch_24.rs | 256 +++++++++++----------- testnet/stacks-node/src/tests/mod.rs | 2 +- 6 files changed, 170 insertions(+), 158 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 212ded7b76..a6fc20a3a2 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -103,6 +103,8 @@ jobs: - tests::epoch_22::test_pox_reorg_one_flap - tests::epoch_23::trait_invocation_behavior - tests::neon_integrations::bad_microblock_pubkey + - tests::epoch_24::fix_to_pox_contract + - tests::epoch_24::verify_auto_unlock_behavior steps: - uses: actions/checkout@v2 - name: Download docker image diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index 351dbbc365..8c195a5213 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -44,8 +44,8 @@ use clarity::vm::costs::{ }; use clarity::vm::database::ClarityDatabase; use clarity::vm::database::{NULL_BURN_STATE_DB, NULL_HEADER_DB}; -use clarity::vm::errors::InterpreterError; use clarity::vm::errors::Error as VmError; +use clarity::vm::errors::InterpreterError; use clarity::vm::events::StacksTransactionEvent; use clarity::vm::representations::ClarityName; use clarity::vm::representations::ContractName; @@ -690,7 +690,6 @@ impl StacksChainState { .iter() .fold(0, |agg, entry| agg + entry.amount_stacked); - info!("CALCULATING REWARD SET: checking total ustx"); assert!( participation <= liquid_ustx, "CORRUPTION: More stacking participation than liquid STX" @@ -1001,10 +1000,14 @@ impl StacksChainState { } }; + // Catch the epoch boundary edge case where burn height >= pox 3 activation height, but + // there hasn't yet been a Stacks block. match result { - Err(Error::ClarityError(ClarityError::Interpreter(VmError::Unchecked(CheckErrors::NoSuchContract(_))))) => { + Err(Error::ClarityError(ClarityError::Interpreter(VmError::Unchecked( + CheckErrors::NoSuchContract(_), + )))) => { warn!("Reward cycle attempted to calculate rewards before the PoX contract was instantiated"); - return Ok(vec![]) + return Ok(vec![]); } x => x, } diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index c2191fd1d2..d1b2d80651 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -19,7 +19,6 @@ (define-constant ERR_DELEGATION_EXPIRES_DURING_LOCK 21) (define-constant ERR_DELEGATION_TOO_MUCH_LOCKED 22) (define-constant ERR_DELEGATION_POX_ADDR_REQUIRED 23) - (define-constant ERR_INVALID_START_BURN_HEIGHT 24) (define-constant ERR_NOT_CURRENT_STACKER 25) (define-constant ERR_STACK_EXTEND_NOT_LOCKED 26) @@ -1314,4 +1313,4 @@ (get amount rejected) u0 ) -) \ No newline at end of file +) diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index ab2122844b..2a42c7f083 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -105,7 +105,7 @@ fn disable_pox() { let pox_pubkey_1 = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_1 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_1) .to_bytes() @@ -115,7 +115,7 @@ fn disable_pox() { let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_2 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_2) .to_bytes() @@ -125,7 +125,7 @@ fn disable_pox() { let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_3 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_3) .to_bytes() @@ -231,15 +231,15 @@ fn disable_pox() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let pox_addr_tuple_3 = execute( &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, @@ -287,8 +287,8 @@ fn disable_pox() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, 1, @@ -398,7 +398,7 @@ fn disable_pox() { &conf.get_chainstate_path_str(), None, ) - .unwrap(); + .unwrap(); let sortdb = btc_regtest_controller.sortdb_mut(); let mut reward_cycle_pox_addrs = HashMap::new(); @@ -566,7 +566,7 @@ fn disable_pox() { let result = Value::try_deserialize_hex_untyped( tx.get("raw_result").unwrap().as_str().unwrap(), ) - .unwrap(); + .unwrap(); assert_eq!(result.to_string(), "(err none)"); abort_tested = true; } @@ -635,7 +635,7 @@ fn pox_2_unlock_all() { let pox_pubkey_1 = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_1 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_1) .to_bytes() @@ -645,7 +645,7 @@ fn pox_2_unlock_all() { let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_2 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_2) .to_bytes() @@ -655,7 +655,7 @@ fn pox_2_unlock_all() { let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_3 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_3) .to_bytes() @@ -761,15 +761,15 @@ fn pox_2_unlock_all() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let pox_addr_tuple_3 = execute( &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, @@ -817,8 +817,8 @@ fn pox_2_unlock_all() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_publish( &spender_sk, @@ -1062,7 +1062,7 @@ fn pox_2_unlock_all() { &conf.get_chainstate_path_str(), None, ) - .unwrap(); + .unwrap(); let sortdb = btc_regtest_controller.sortdb_mut(); let mut reward_cycle_pox_addrs = HashMap::new(); @@ -1218,7 +1218,7 @@ fn pox_2_unlock_all() { let result = Value::try_deserialize_hex_untyped( tx.get("raw_result").unwrap().as_str().unwrap(), ) - .unwrap(); + .unwrap(); assert_eq!(result.to_string(), format!("(ok u{})", epoch_2_2 + 1)); unlock_ht_22_tested = true; } @@ -1234,7 +1234,7 @@ fn pox_2_unlock_all() { let result = Value::try_deserialize_hex_untyped( tx.get("raw_result").unwrap().as_str().unwrap(), ) - .unwrap(); + .unwrap(); assert_eq!(result.to_string(), format!("(ok u{})", 230 + 60)); unlock_ht_21_tested = true; } @@ -1498,7 +1498,7 @@ fn test_pox_reorg_one_flap() { let pox_pubkey = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey) .to_bytes() @@ -1525,8 +1525,8 @@ fn test_pox_reorg_one_flap() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), ClarityVersion::Clarity1, ) - .unwrap() - .unwrap(), + .unwrap() + .unwrap(), Value::UInt((sort_height + 1) as u128), Value::UInt(12), ], @@ -1677,4 +1677,4 @@ fn test_pox_reorg_one_flap() { let tip_info = get_chain_info(&c); info!("Final tip for miner {}: {:?}", i, &tip_info); } -} \ No newline at end of file +} diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 373df1ab82..228556fb44 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -13,33 +13,38 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{env, thread}; -use std::collections::HashMap; +use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::tests::neon_integrations::{ + get_account, get_chain_info, get_pox_info, neon_integration_test_conf, next_block_and_wait, + submit_tx, test_observer, wait_for_runloop, +}; +use crate::tests::{make_contract_call, to_addr}; use clarity::boot_util::boot_code_id; -use clarity::vm::{ClarityVersion, Value}; use clarity::vm::types::PrincipalData; +use clarity::vm::{ClarityVersion, Value}; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::RawRewardSetEntry; use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; -use stacks_common::util::hash::{bytes_to_hex, Hash160, hex_bytes}; +use stacks_common::util::hash::{bytes_to_hex, hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; -use crate::tests::neon_integrations::{get_account, get_chain_info, get_pox_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop}; -use crate::tests::{make_contract_call, to_addr}; +use std::collections::HashMap; +use std::{env, thread}; -use stacks::core; -use stacks::core::{PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, StacksEpoch}; -use stacks_common::consts::STACKS_EPOCH_MAX; -use stacks_common::types::{Address, StacksEpochId}; -use crate::{BitcoinRegtestController, BurnchainController, neon}; use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::{neon, BitcoinRegtestController, BurnchainController}; use stacks::clarity_cli::vm_execute as execute; -use stacks_common::address::AddressHashMode; +use stacks::core; +use stacks::core::{ + StacksEpoch, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, +}; +use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::codec::StacksMessageCodec; +use stacks_common::consts::STACKS_EPOCH_MAX; +use stacks_common::types::{Address, StacksEpochId}; use stacks_common::util::sleep_ms; #[cfg(test)] @@ -60,10 +65,10 @@ pub fn get_reward_set_entries_at_block( #[test] #[ignore] -/// Verify the buggy stacks-increase behavior that was possible in PoX-2, and does not crash the +/// Verify the buggy stacks-increase behavior that was possible in PoX-2 does not crash the /// node in Epoch 2.4 /// -/// Verify that transition to Epoch 2.4 occurs smoothly even if miners do not mine in the +/// Verify that the transition to Epoch 2.4 occurs smoothly even if miners do not mine in the /// same block as the PoX-3 activation height. /// /// Verify the PoX-3 payouts get made to the expected recipients. @@ -106,7 +111,7 @@ fn fix_to_pox_contract() { let pox_pubkey_1 = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_1 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_1) .to_bytes() @@ -116,7 +121,7 @@ fn fix_to_pox_contract() { let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_2 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_2) .to_bytes() @@ -126,7 +131,7 @@ fn fix_to_pox_contract() { let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_3 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_3) .to_bytes() @@ -235,15 +240,15 @@ fn fix_to_pox_contract() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let pox_addr_tuple_3 = execute( &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, @@ -292,8 +297,8 @@ fn fix_to_pox_contract() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, 1, @@ -313,7 +318,6 @@ fn fix_to_pox_contract() { sleep_ms(5_000); submit_tx(&http_origin, &tx); - // that it can mine _at all_ is a success criterion let mut last_block_height = get_chain_info(&conf).burn_block_height; for _i in 0..20 { @@ -342,7 +346,6 @@ fn fix_to_pox_contract() { info!("Submit 2.2 stack-increase tx to {:?}", &http_origin); submit_tx(&http_origin, &tx); - // transition to epoch 2.3 loop { let tip_info = get_chain_info(&conf); @@ -475,7 +478,7 @@ fn fix_to_pox_contract() { &conf.get_chainstate_path_str(), None, ) - .unwrap(); + .unwrap(); let sortdb = btc_regtest_controller.sortdb_mut(); let mut reward_cycle_pox_addrs = HashMap::new(); @@ -581,17 +584,11 @@ fn fix_to_pox_contract() { // cycle 24 is the first 2.1, it should have pox_2 and 1 burn slot ( 24, - HashMap::from([ - (pox_addr_2.clone(), 13u64), - (burn_pox_addr.clone(), 1), - ]), + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), ), ( 25, - HashMap::from([ - (pox_addr_2.clone(), 13u64), - (burn_pox_addr.clone(), 1), - ]), + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), ), // Epoch 2.2 has started, so the reward set should be all burns. (26, HashMap::from([(burn_pox_addr.clone(), 14)])), @@ -650,8 +647,8 @@ fn fix_to_pox_contract() { StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); if &tx_sender == &spender_addr - && (parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 || - parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3) + && (parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 + || parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3) { let contract_call = match &parsed.payload { TransactionPayload::ContractCall(cc) => cc, @@ -662,7 +659,7 @@ fn fix_to_pox_contract() { let result = Value::try_deserialize_hex_untyped( tx.get("raw_result").unwrap().as_str().unwrap(), ) - .unwrap(); + .unwrap(); assert_eq!(result.to_string(), "(err none)"); if parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 { abort_tested_2_2 = true; @@ -675,16 +672,21 @@ fn fix_to_pox_contract() { } } - assert!(abort_tested_2_2, "The stack-increase transaction must have been aborted in Epoch 2.2, \ - and it must have been tested in the tx receipts"); - assert!(abort_tested_2_3, "The stack-increase transaction must have been aborted in Epoch 2.3, \ - and it must have been tested in the tx receipts"); + assert!( + abort_tested_2_2, + "The stack-increase transaction must have been aborted in Epoch 2.2, \ + and it must have been tested in the tx receipts" + ); + assert!( + abort_tested_2_3, + "The stack-increase transaction must have been aborted in Epoch 2.3, \ + and it must have been tested in the tx receipts" + ); test_observer::clear(); channel.stop_chains_coordinator(); } - #[test] #[ignore] /// Verify that stackers that don't meet the stacking threshold get auto-unlocked in PoX-3. @@ -730,7 +732,7 @@ fn verify_auto_unlock_behavior() { let pox_pubkey_1 = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_1 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_1) .to_bytes() @@ -740,22 +742,36 @@ fn verify_auto_unlock_behavior() { let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_2 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_2) .to_bytes() .to_vec(), ); + let pox_pubkey_2_stx_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![pox_pubkey_2], + ) + .unwrap(); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) - .unwrap(); + .unwrap(); let pox_pubkey_hash_3 = bytes_to_hex( &Hash160::from_node_public_key(&pox_pubkey_3) .to_bytes() .to_vec(), ); + let pox_pubkey_3_stx_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![pox_pubkey_3], + ) + .unwrap(); let (mut conf, _) = neon_integration_test_conf(); @@ -864,15 +880,15 @@ fn verify_auto_unlock_behavior() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let pox_addr_tuple_3 = execute( &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, @@ -921,8 +937,8 @@ fn verify_auto_unlock_behavior() { &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), ClarityVersion::Clarity2, ) - .unwrap() - .unwrap(); + .unwrap() + .unwrap(); let tx = make_contract_call( &spender_sk, 1, @@ -942,7 +958,6 @@ fn verify_auto_unlock_behavior() { sleep_ms(5_000); submit_tx(&http_origin, &tx); - // that it can mine _at all_ is a success criterion let mut last_block_height = get_chain_info(&conf).burn_block_height; for _i in 0..20 { @@ -953,7 +968,7 @@ fn verify_auto_unlock_behavior() { } else { panic!("FATAL: failed to mine"); } - } + } info!("Successfully transitioned to Epoch 2.2"); @@ -966,8 +981,12 @@ fn verify_auto_unlock_behavior() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let pox_info = get_pox_info(&http_origin); - info!("curr height: {}, curr cycle id: {}, pox active: {}", - tip_info.burn_block_height, pox_info.current_cycle.id, pox_info.current_cycle.is_pox_active); + info!( + "curr height: {}, curr cycle id: {}, pox active: {}", + tip_info.burn_block_height, + pox_info.current_cycle.id, + pox_info.current_cycle.is_pox_active + ); } info!("Successfully transitioned to Epoch 2.3"); @@ -981,8 +1000,12 @@ fn verify_auto_unlock_behavior() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let pox_info = get_pox_info(&http_origin); - info!("curr height: {}, curr cycle id: {}, pox active: {}", - tip_info.burn_block_height, pox_info.current_cycle.id, pox_info.current_cycle.is_pox_active); + info!( + "curr height: {}, curr cycle id: {}, pox active: {}", + tip_info.burn_block_height, + pox_info.current_cycle.id, + pox_info.current_cycle.is_pox_active + ); } // skip a couple sortitions @@ -1050,16 +1073,16 @@ fn verify_auto_unlock_behavior() { } } - // Check the locked balance of addr 1 + // Check the locked balance of addr 1. let account = get_account(&http_origin, &spender_stx_addr); - assert_eq!(account.locked, first_stacked_init); + assert_eq!(account.locked, first_stacked_init as u128); - // Check the locked balance of addr 2 + // Check the locked balance of addr 2. let account = get_account(&http_origin, &spender_2_stx_addr); - assert_eq!(account.locked, small_stacked); + assert_eq!(account.locked, small_stacked as u128); - // check that the "raw" reward sets for all cycles just contains entries for both addrs - // at the cycle start + // Check that the "raw" reward sets for all cycles just contains entries for both addrs + // for the next few cycles. for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { let (mut chainstate, _) = StacksChainState::open( false, @@ -1067,35 +1090,37 @@ fn verify_auto_unlock_behavior() { &conf.get_chainstate_path_str(), None, ) - .unwrap(); + .unwrap(); let sortdb = btc_regtest_controller.sortdb_mut(); + let tip_info = get_chain_info(&conf); - let tip_block_id = StacksBlockId::new( - &tip_info.stacks_tip_consensus_hash, - &tip_info.stacks_tip - ); + let tip_block_id = + StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); let reward_set_entries = get_reward_set_entries_at_block( &mut chainstate, &burnchain_config, sortdb, &tip_block_id, - tip_info.burn_block_height + tip_info.burn_block_height, ) - .unwrap(); + .unwrap(); assert_eq!(reward_set_entries.len(), 2); - info!("Reward set entries: pre stacks increase: {:?}", reward_set_entries); + info!("reward set entries: {:?}", reward_set_entries); assert_eq!( reward_set_entries[0].reward_address.bytes(), - spender_stx_addr.bytes.0.to_vec() + pox_pubkey_2_stx_addr.bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[0].amount_stacked, + first_stacked_init as u128 ); - assert_eq!(reward_set_entries[0].amount_stacked, 200_000_000_000_000_000); assert_eq!( reward_set_entries[1].reward_address.bytes(), - spender_2_stx_addr.bytes.0.to_vec() + pox_pubkey_3_stx_addr.bytes.0.to_vec() ); - assert_eq!(reward_set_entries[0].amount_stacked, 17_000_000_000_000_000); + assert_eq!(reward_set_entries[1].amount_stacked, small_stacked as u128); } // invoke stack-increase @@ -1120,65 +1145,58 @@ fn verify_auto_unlock_behavior() { } else { panic!("FATAL: failed to mine"); } - let pox_info = get_pox_info(&http_origin); - info!("curr height: {}, curr cycle id: {}, pox active: {}", - tip_info.burn_block_height, pox_info.current_cycle.id, pox_info.current_cycle.is_pox_active); } - // Check that the locked balance of addr 1 has not changed + // Check that the locked balance of addr 1 has not changed. let account = get_account(&http_origin, &spender_stx_addr); - assert_eq!(account.locked, first_stacked_init + first_stacked_incr); + assert_eq!( + account.locked, + (first_stacked_init + first_stacked_incr) as u128 + ); - // Check that addr 2 has no locked tokens at this height + // Check that addr 2 has no locked tokens at this height (was auto-unlocked). let account = get_account(&http_origin, &spender_2_stx_addr); assert_eq!(account.locked, 0); - // check that the "raw" reward sets for all cycles just contains entries for the first - // address at the cycle start, since spender_addr_2 was auto-unlocked. + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + // Check that the "raw" reward sets for all cycles just contains entries for the first + // address at the cycle start, since addr 2 was auto-unlocked. for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { - let (mut chainstate, _) = StacksChainState::open( - false, - conf.burnchain.chain_id, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - let sortdb = btc_regtest_controller.sortdb_mut(); let tip_info = get_chain_info(&conf); - let tip_block_id = StacksBlockId::new( - &tip_info.stacks_tip_consensus_hash, - &tip_info.stacks_tip - ); + let tip_block_id = + StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); let reward_set_entries = get_reward_set_entries_at_block( &mut chainstate, &burnchain_config, sortdb, &tip_block_id, - tip_info.burn_block_height + tip_info.burn_block_height, ) - .unwrap(); + .unwrap(); assert_eq!(reward_set_entries.len(), 1); - info!("Reward set entries: post stacks increase: {:?}", reward_set_entries); assert_eq!( reward_set_entries[0].reward_address.bytes(), - spender_stx_addr.bytes.0.to_vec() + pox_pubkey_2_stx_addr.bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[0].amount_stacked, + (first_stacked_init + first_stacked_incr) as u128 ); } let tip_info = get_chain_info(&conf); let tip = StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); - let (mut chainstate, _) = StacksChainState::open( - false, - conf.burnchain.chain_id, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - let sortdb = btc_regtest_controller.sortdb_mut(); - let mut reward_cycle_pox_addrs = HashMap::new(); info!("Last tip height = {}", tip_info.burn_block_height); @@ -1207,7 +1225,6 @@ fn verify_auto_unlock_behavior() { .unwrap() .expect_list(); - debug!("Test burnchain height {}", height); if !burnchain_config.is_in_prepare_phase(height) { if pox_addrs.len() > 0 { assert_eq!(pox_addrs.len(), 2); @@ -1282,17 +1299,11 @@ fn verify_auto_unlock_behavior() { // cycle 24 is the first 2.1, it should have pox_2 and 1 burn slot ( 24, - HashMap::from([ - (pox_addr_2.clone(), 13u64), - (burn_pox_addr.clone(), 1), - ]), + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), ), ( 25, - HashMap::from([ - (pox_addr_2.clone(), 13u64), - (burn_pox_addr.clone(), 1), - ]), + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), ), // Epoch 2.2 has started, so the reward set should be all burns. (26, HashMap::from([(burn_pox_addr.clone(), 14)])), @@ -1313,10 +1324,7 @@ fn verify_auto_unlock_behavior() { // minimum, and thus they have zero reward addresses in reward cycle 30. ( 30, - HashMap::from([ - (pox_addr_2.clone(), 13u64), - (burn_pox_addr.clone(), 1), - ]), + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), ), ]); @@ -1337,4 +1345,4 @@ fn verify_auto_unlock_behavior() { test_observer::clear(); channel.stop_chains_coordinator(); -} \ No newline at end of file +} diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 39bddbc61a..94d6401c52 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -44,10 +44,10 @@ mod epoch_205; mod epoch_21; mod epoch_22; mod epoch_23; +mod epoch_24; mod integrations; mod mempool; pub mod neon_integrations; -mod epoch_24; // $ cat /tmp/out.clar pub const STORE_CONTRACT: &str = r#"(define-map store { key: (string-ascii 32) } { value: (string-ascii 32) }) From c58ba79b7465bef8708c9aab219b36bf46fa48c4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 12 May 2023 12:05:52 -0400 Subject: [PATCH 136/158] chore: update testnet 2.4 activation This puts it before the next prepare phase. --- src/core/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index d1b9192b53..ccef9f56a7 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -129,7 +129,7 @@ pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_104_380; pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_431_300; pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; -pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_433_033; +pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_432_545; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; From acc2279a392657b6018c0ff6e0005778d0b0da48 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 12 May 2023 16:30:23 -0500 Subject: [PATCH 137/158] feat: implement proposed SIP-024 sanitization logic for epoch-2.4 --- clarity/src/vm/contexts.rs | 11 +- clarity/src/vm/costs/mod.rs | 40 +- clarity/src/vm/database/clarity_db.rs | 153 ++- clarity/src/vm/database/key_value_wrapper.rs | 44 +- clarity/src/vm/events.rs | 8 +- clarity/src/vm/functions/assets.rs | 12 + clarity/src/vm/functions/conversions.rs | 10 +- clarity/src/vm/functions/database.rs | 45 +- clarity/src/vm/functions/sequences.rs | 28 +- clarity/src/vm/mod.rs | 13 +- clarity/src/vm/tests/contracts.rs | 24 +- clarity/src/vm/tests/mod.rs | 7 + clarity/src/vm/types/mod.rs | 49 +- clarity/src/vm/types/serialization.rs | 1188 +++++++++++++---- clarity/src/vm/types/signatures.rs | 1 + src/chainstate/stacks/boot/pox_2_tests.rs | 10 +- src/chainstate/stacks/db/blocks.rs | 2 + src/chainstate/stacks/db/contracts.rs | 3 +- src/chainstate/stacks/db/mod.rs | 15 +- src/clarity_vm/tests/costs.rs | 6 + src/main.rs | 53 + src/net/rpc.rs | 29 +- stacks-common/src/types/mod.rs | 6 + testnet/stacks-node/src/tests/epoch_21.rs | 18 +- testnet/stacks-node/src/tests/integrations.rs | 15 +- .../src/tests/neon_integrations.rs | 12 +- 26 files changed, 1361 insertions(+), 441 deletions(-) diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 6ad92e6c72..f577690ae3 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -1162,7 +1162,16 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let value = arg.match_atom_value() .ok_or_else(|| InterpreterError::InterpreterError(format!("Passed non-value expression to exec_tx on {}!", tx_name)))?; - Ok(value.clone()) + // sanitize contract-call inputs in epochs >= 2.4 + // testing todo: ensure sanitize_value() preserves trait callability! + let expected_type = TypeSignature::type_of(value); + let (sanitized_value, _) = Value::sanitize_value( + self.epoch(), + &expected_type, + value.clone(), + ).ok_or_else(|| CheckErrors::TypeValueError(expected_type, value.clone()))?; + + Ok(sanitized_value) }) .collect(); diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 7b5b6467de..ee9a1a6eb2 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -325,9 +325,11 @@ pub enum CostErrors { fn load_state_summary(mainnet: bool, clarity_db: &mut ClarityDatabase) -> Result { let cost_voting_contract = boot_code_id("cost-voting", mainnet); + let clarity_epoch = clarity_db.get_clarity_epoch_version(); let last_processed_at = match clarity_db.get_value( "vm-costs::last-processed-at-height", &TypeSignature::UIntType, + &clarity_epoch, ) { Ok(Some(v)) => u32::try_from(v.value.expect_u128()).expect("Block height overflowed u32"), Ok(None) => return Ok(CostStateSummary::empty()), @@ -355,11 +357,14 @@ fn store_state_summary( ) -> Result<()> { let block_height = clarity_db.get_current_block_height(); let cost_voting_contract = boot_code_id("cost-voting", mainnet); - - clarity_db.put( - "vm-costs::last-processed-at-height", - &Value::UInt(block_height as u128), - ); + let epoch = clarity_db.get_clarity_epoch_version(); + clarity_db + .put_value( + "vm-costs::last-processed-at-height", + Value::UInt(block_height as u128), + &epoch, + ) + .map_err(|_e| CostErrors::CostContractLoadFailure)?; let serialized_summary = serde_json::to_string(&SerializedCostStateSummary::from(to_store.clone())) .expect("BUG: failure to serialize cost state summary struct"); @@ -387,15 +392,24 @@ fn load_cost_functions( clarity_db: &mut ClarityDatabase, apply_updates: bool, ) -> Result { + let clarity_epoch = clarity_db.get_clarity_epoch_version(); let last_processed_count = clarity_db - .get_value("vm-costs::last_processed_count", &TypeSignature::UIntType) + .get_value( + "vm-costs::last_processed_count", + &TypeSignature::UIntType, + &clarity_epoch, + ) .map_err(|_e| CostErrors::CostContractLoadFailure)? .map(|result| result.value) .unwrap_or(Value::UInt(0)) .expect_u128(); let cost_voting_contract = boot_code_id("cost-voting", mainnet); let confirmed_proposals_count = clarity_db - .lookup_variable_unknown_descriptor(&cost_voting_contract, "confirmed-proposal-count") + .lookup_variable_unknown_descriptor( + &cost_voting_contract, + "confirmed-proposal-count", + &clarity_epoch, + ) .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? .expect_u128(); debug!("Check cost voting contract"; @@ -422,6 +436,7 @@ fn load_cost_functions( )]) .expect("BUG: failed to construct simple tuple"), ), + &clarity_epoch, ) .expect("BUG: Failed querying confirmed-proposals") .expect_optional() @@ -615,10 +630,13 @@ fn load_cost_functions( } if confirmed_proposals_count > last_processed_count { store_state_summary(mainnet, clarity_db, &state_summary)?; - clarity_db.put( - "vm-costs::last_processed_count", - &Value::UInt(confirmed_proposals_count), - ); + clarity_db + .put_value( + "vm-costs::last_processed_count", + Value::UInt(confirmed_proposals_count), + &clarity_epoch, + ) + .map_err(|_e| CostErrors::CostContractLoadFailure)?; } Ok(state_summary) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 8c4a61db74..1218da1a13 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -482,13 +482,49 @@ impl<'a> ClarityDatabase<'a> { self.store.get::(key) } + pub fn put_value(&mut self, key: &str, value: Value, epoch: &StacksEpochId) -> Result<()> { + self.put_value_with_size(key, value, epoch)?; + Ok(()) + } + + pub fn put_value_with_size( + &mut self, + key: &str, + value: Value, + epoch: &StacksEpochId, + ) -> Result { + let sanitize = epoch.value_sanitizing(); + let mut pre_sanitized_size = None; + + let serialized = if sanitize { + let value_size = value.serialized_size() as u64; + let (sanitized_value, did_sanitize) = + Value::sanitize_value(epoch, &TypeSignature::type_of(&value), value) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; + // if data needed to be sanitized *charge* for the unsanitized cost + if did_sanitize { + pre_sanitized_size = Some(value_size); + } + sanitized_value.serialize_to_vec() + } else { + value.serialize_to_vec() + }; + + let size = serialized.len() as u64; + let hex_serialized = to_hex(serialized.as_slice()); + self.store.put(&key, &hex_serialized); + + Ok(pre_sanitized_size.unwrap_or(size)) + } + pub fn get_value( &mut self, key: &str, expected: &TypeSignature, + epoch: &StacksEpochId, ) -> Result> { self.store - .get_value(key, expected) + .get_value(key, expected, epoch) .map_err(|e| InterpreterError::DBError(e.to_string()).into()) } @@ -711,6 +747,7 @@ impl<'a> ClarityDatabase<'a> { self.get_value( ClarityDatabase::ustx_liquid_supply_key(), &TypeSignature::UIntType, + &StacksEpochId::latest(), ) .expect("FATAL: failed to load ustx_liquid_supply Clarity key") .map(|v| v.value.expect_u128()) @@ -718,10 +755,13 @@ impl<'a> ClarityDatabase<'a> { } fn set_ustx_liquid_supply(&mut self, set_to: u128) { - self.put( + self.put_value( ClarityDatabase::ustx_liquid_supply_key(), - &Value::UInt(set_to), + Value::UInt(set_to), + // okay to pin epoch, because ustx_liquid_supply does not need to sanitize + &StacksEpochId::Epoch21, ) + .expect("FATAL: Failed to store STX liquid supply"); } pub fn increment_ustx_liquid_supply(&mut self, incr_by: u128) -> Result<()> { @@ -1119,8 +1159,14 @@ impl<'a> ClarityDatabase<'a> { value: Value, ) -> Result { let descriptor = self.load_variable(contract_identifier, variable_name)?; - self.set_variable(contract_identifier, variable_name, value, &descriptor) - .map(|data| data.value) + self.set_variable( + contract_identifier, + variable_name, + value, + &descriptor, + &StacksEpochId::latest(), + ) + .map(|data| data.value) } pub fn set_variable( @@ -1129,6 +1175,7 @@ impl<'a> ClarityDatabase<'a> { variable_name: &str, value: Value, variable_descriptor: &DataVariableMetadata, + epoch: &StacksEpochId, ) -> Result { if !variable_descriptor .value_type @@ -1145,7 +1192,7 @@ impl<'a> ClarityDatabase<'a> { variable_name, ); - let size = self.put_with_size(&key, &value); + let size = self.put_value_with_size(&key, value, epoch)?; Ok(ValueResult { value: Value::Bool(true), @@ -1157,9 +1204,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, variable_name: &str, + epoch: &StacksEpochId, ) -> Result { let descriptor = self.load_variable(contract_identifier, variable_name)?; - self.lookup_variable(contract_identifier, variable_name, &descriptor) + self.lookup_variable(contract_identifier, variable_name, &descriptor, epoch) } pub fn lookup_variable( @@ -1167,6 +1215,7 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, variable_name: &str, variable_descriptor: &DataVariableMetadata, + epoch: &StacksEpochId, ) -> Result { let key = ClarityDatabase::make_key_for_trip( contract_identifier, @@ -1174,7 +1223,7 @@ impl<'a> ClarityDatabase<'a> { variable_name, ); - let result = self.get_value(&key, &variable_descriptor.value_type)?; + let result = self.get_value(&key, &variable_descriptor.value_type, epoch)?; match result { None => Ok(Value::none()), @@ -1189,6 +1238,7 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, variable_name: &str, variable_descriptor: &DataVariableMetadata, + epoch: &StacksEpochId, ) -> Result { let key = ClarityDatabase::make_key_for_trip( contract_identifier, @@ -1196,7 +1246,7 @@ impl<'a> ClarityDatabase<'a> { variable_name, ); - let result = self.get_value(&key, &variable_descriptor.value_type)?; + let result = self.get_value(&key, &variable_descriptor.value_type, epoch)?; match result { None => Ok(ValueResult { @@ -1247,7 +1297,7 @@ impl<'a> ClarityDatabase<'a> { ClarityDatabase::make_key_for_data_map_entry_serialized( contract_identifier, map_name, - &key_value.serialize(), + &key_value.serialize_to_hex(), ) } @@ -1269,9 +1319,10 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, map_name: &str, key_value: &Value, + epoch: &StacksEpochId, ) -> Result { let descriptor = self.load_map(contract_identifier, map_name)?; - self.fetch_entry(contract_identifier, map_name, key_value, &descriptor) + self.fetch_entry(contract_identifier, map_name, key_value, &descriptor, epoch) } /// Returns a Clarity optional type wrapping a found or not found result @@ -1281,6 +1332,7 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key_value: &Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { if !map_descriptor .key_type @@ -1297,7 +1349,7 @@ impl<'a> ClarityDatabase<'a> { ClarityDatabase::make_key_for_data_map_entry(contract_identifier, map_name, key_value); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - let result = self.get_value(&key, &stored_type)?; + let result = self.get_value(&key, &stored_type, epoch)?; match result { None => Ok(Value::none()), @@ -1311,6 +1363,7 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key_value: &Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { if !map_descriptor .key_type @@ -1323,7 +1376,7 @@ impl<'a> ClarityDatabase<'a> { .into()); } - let key_serialized = key_value.serialize(); + let key_serialized = key_value.serialize_to_hex(); let key = ClarityDatabase::make_key_for_data_map_entry_serialized( contract_identifier, map_name, @@ -1331,7 +1384,7 @@ impl<'a> ClarityDatabase<'a> { ); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - let result = self.get_value(&key, &stored_type)?; + let result = self.get_value(&key, &stored_type, epoch)?; match result { None => Ok(ValueResult { @@ -1357,6 +1410,7 @@ impl<'a> ClarityDatabase<'a> { key: Value, value: Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { self.inner_set_entry( contract_identifier, @@ -1365,6 +1419,7 @@ impl<'a> ClarityDatabase<'a> { value, false, map_descriptor, + epoch, ) } @@ -1374,10 +1429,18 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key: Value, value: Value, + epoch: &StacksEpochId, ) -> Result { let descriptor = self.load_map(contract_identifier, map_name)?; - self.set_entry(contract_identifier, map_name, key, value, &descriptor) - .map(|data| data.value) + self.set_entry( + contract_identifier, + map_name, + key, + value, + &descriptor, + epoch, + ) + .map(|data| data.value) } pub fn insert_entry_unknown_descriptor( @@ -1386,10 +1449,18 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key: Value, value: Value, + epoch: &StacksEpochId, ) -> Result { let descriptor = self.load_map(contract_identifier, map_name)?; - self.insert_entry(contract_identifier, map_name, key, value, &descriptor) - .map(|data| data.value) + self.insert_entry( + contract_identifier, + map_name, + key, + value, + &descriptor, + epoch, + ) + .map(|data| data.value) } pub fn insert_entry( @@ -1399,6 +1470,7 @@ impl<'a> ClarityDatabase<'a> { key: Value, value: Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { self.inner_set_entry( contract_identifier, @@ -1407,11 +1479,17 @@ impl<'a> ClarityDatabase<'a> { value, true, map_descriptor, + epoch, ) } - fn data_map_entry_exists(&mut self, key: &str, expected_value: &TypeSignature) -> Result { - match self.get_value(key, expected_value)? { + fn data_map_entry_exists( + &mut self, + key: &str, + expected_value: &TypeSignature, + epoch: &StacksEpochId, + ) -> Result { + match self.get_value(key, expected_value, epoch)? { None => Ok(false), Some(value) => Ok(value.value != Value::none()), } @@ -1425,6 +1503,7 @@ impl<'a> ClarityDatabase<'a> { value: Value, return_if_exists: bool, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { if !map_descriptor .key_type @@ -1443,7 +1522,7 @@ impl<'a> ClarityDatabase<'a> { ); } - let key_serialized = key_value.serialize(); + let key_serialized = key_value.serialize_to_hex(); let key_serialized_byte_len = byte_len_of_serialization(&key_serialized); let key = ClarityDatabase::make_key_for_quad( contract_identifier, @@ -1453,7 +1532,7 @@ impl<'a> ClarityDatabase<'a> { ); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - if return_if_exists && self.data_map_entry_exists(&key, &stored_type)? { + if return_if_exists && self.data_map_entry_exists(&key, &stored_type, epoch)? { return Ok(ValueResult { value: Value::Bool(false), serialized_byte_len: key_serialized_byte_len, @@ -1461,7 +1540,7 @@ impl<'a> ClarityDatabase<'a> { } let placed_value = Value::some(value)?; - let placed_size = self.put_with_size(&key, &placed_value); + let placed_size = self.put_value_with_size(&key, placed_value, epoch)?; Ok(ValueResult { value: Value::Bool(true), @@ -1477,6 +1556,7 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key_value: &Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { if !map_descriptor .key_type @@ -1489,7 +1569,7 @@ impl<'a> ClarityDatabase<'a> { .into()); } - let key_serialized = key_value.serialize(); + let key_serialized = key_value.serialize_to_hex(); let key_serialized_byte_len = byte_len_of_serialization(&key_serialized); let key = ClarityDatabase::make_key_for_quad( contract_identifier, @@ -1498,14 +1578,14 @@ impl<'a> ClarityDatabase<'a> { &key_serialized, ); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - if !self.data_map_entry_exists(&key, &stored_type)? { + if !self.data_map_entry_exists(&key, &stored_type, epoch)? { return Ok(ValueResult { value: Value::Bool(false), serialized_byte_len: key_serialized_byte_len, }); } - self.put(&key, &(Value::none())); + self.put_value(&key, Value::none(), epoch)?; Ok(ValueResult { value: Value::Bool(true), @@ -1709,12 +1789,17 @@ impl<'a> ClarityDatabase<'a> { contract_identifier, StoreType::NonFungibleToken, asset_name, - &asset.serialize(), + &asset.serialize_to_hex(), ); - let value: Option = self.get(&key); + let epoch = self.get_clarity_epoch_version(); + let value: Option = self.get_value( + &key, + &TypeSignature::new_option(TypeSignature::PrincipalType).unwrap(), + &epoch, + )?; let owner = match value { - Some(owner) => owner.expect_optional(), + Some(owner) => owner.value.expect_optional(), None => return Err(RuntimeErrorType::NoSuchToken.into()), }; @@ -1742,6 +1827,7 @@ impl<'a> ClarityDatabase<'a> { asset: &Value, principal: &PrincipalData, key_type: &TypeSignature, + epoch: &StacksEpochId, ) -> Result<()> { if !key_type.admits(&self.get_clarity_epoch_version(), asset)? { return Err(CheckErrors::TypeValueError(key_type.clone(), (*asset).clone()).into()); @@ -1751,11 +1837,11 @@ impl<'a> ClarityDatabase<'a> { contract_identifier, StoreType::NonFungibleToken, asset_name, - &asset.serialize(), + &asset.serialize_to_hex(), ); let value = Value::some(Value::Principal(principal.clone()))?; - self.put(&key, &value); + self.put_value(&key, value, epoch)?; Ok(()) } @@ -1766,6 +1852,7 @@ impl<'a> ClarityDatabase<'a> { asset_name: &str, asset: &Value, key_type: &TypeSignature, + epoch: &StacksEpochId, ) -> Result<()> { if !key_type.admits(&self.get_clarity_epoch_version(), asset)? { return Err(CheckErrors::TypeValueError(key_type.clone(), (*asset).clone()).into()); @@ -1775,10 +1862,10 @@ impl<'a> ClarityDatabase<'a> { contract_identifier, StoreType::NonFungibleToken, asset_name, - &asset.serialize(), + &asset.serialize_to_hex(), ); - self.put(&key, &(Value::none())); + self.put_value(&key, Value::none(), epoch)?; Ok(()) } } diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 9c7aa33569..0b45c54089 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -18,10 +18,13 @@ use std::collections::HashMap; use std::{clone::Clone, cmp::Eq, hash::Hash}; use crate::vm::database::clarity_store::make_contract_hash_key; -use crate::vm::errors::InterpreterResult as Result; +use crate::vm::errors::InterpreterResult; use crate::vm::types::serialization::SerializationError; -use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; -use crate::vm::Value; +use crate::vm::types::{ + QualifiedContractIdentifier, SequenceData, SequenceSubtype, TupleData, TypeSignature, +}; +use crate::vm::{StacksEpoch, Value}; +use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; use crate::types::chainstate::StacksBlockId; @@ -102,6 +105,7 @@ where /// Result structure for fetched values from the /// underlying store. +#[derive(Debug)] pub struct ValueResult { pub value: Value, pub serialized_byte_len: u64, @@ -324,7 +328,7 @@ impl<'a> RollbackWrapper<'a> { &mut self, bhh: StacksBlockId, query_pending_data: bool, - ) -> Result { + ) -> InterpreterResult { self.store.set_block_hash(bhh).and_then(|x| { // use and_then so that query_pending_data is only set once set_block_hash succeeds // this doesn't matter in practice, because a set_block_hash failure always aborts @@ -366,31 +370,41 @@ impl<'a> RollbackWrapper<'a> { lookup_result.or_else(|| self.store.get(key).map(|x| T::deserialize(&x))) } + pub fn deserialize_value( + value_hex: &str, + expected: &TypeSignature, + epoch: &StacksEpochId, + ) -> Result { + let serialized_byte_len = value_hex.len() as u64 / 2; + let sanitize = epoch.value_sanitizing(); + let value = Value::try_deserialize_hex(value_hex, expected, sanitize)?; + + Ok(ValueResult { + value, + serialized_byte_len, + }) + } + /// Get a Clarity value from the underlying Clarity KV store. /// Returns Some if found, with the Clarity Value and the serialized byte length of the value. pub fn get_value( &mut self, key: &str, expected: &TypeSignature, - ) -> std::result::Result, SerializationError> { + epoch: &StacksEpochId, + ) -> Result, SerializationError> { self.stack .last() .expect("ERROR: Clarity VM attempted GET on non-nested context."); if self.query_pending_data { if let Some(x) = self.lookup_map.get(key).and_then(|x| x.last()) { - return Ok(Some(ValueResult { - value: Value::try_deserialize_hex(x, expected)?, - serialized_byte_len: x.len() as u64 / 2, - })); + return Ok(Some(Self::deserialize_value(x, expected, epoch)?)); } } match self.store.get(key) { - Some(x) => Ok(Some(ValueResult { - value: Value::try_deserialize_hex(&x, expected)?, - serialized_byte_len: x.len() as u64 / 2, - })), + Some(x) => Ok(Some(Self::deserialize_value(&x, expected, epoch)?)), None => Ok(None), } } @@ -442,7 +456,7 @@ impl<'a> RollbackWrapper<'a> { &mut self, contract: &QualifiedContractIdentifier, key: &str, - ) -> Result> { + ) -> InterpreterResult> { self.stack .last() .expect("ERROR: Clarity VM attempted GET on non-nested context."); @@ -471,7 +485,7 @@ impl<'a> RollbackWrapper<'a> { at_height: u32, contract: &QualifiedContractIdentifier, key: &str, - ) -> Result> { + ) -> InterpreterResult> { self.stack .last() .expect("ERROR: Clarity VM attempted GET on non-nested context."); diff --git a/clarity/src/vm/events.rs b/clarity/src/vm/events.rs index 9c97820868..3973e60680 100644 --- a/clarity/src/vm/events.rs +++ b/clarity/src/vm/events.rs @@ -222,7 +222,7 @@ impl NFTTransferEventData { pub fn json_serialize(&self) -> serde_json::Value { let raw_value = { let mut bytes = vec![]; - self.value.consensus_serialize(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes).unwrap(); let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; @@ -247,7 +247,7 @@ impl NFTMintEventData { pub fn json_serialize(&self) -> serde_json::Value { let raw_value = { let mut bytes = vec![]; - self.value.consensus_serialize(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes).unwrap(); let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; @@ -271,7 +271,7 @@ impl NFTBurnEventData { pub fn json_serialize(&self) -> serde_json::Value { let raw_value = { let mut bytes = vec![]; - self.value.consensus_serialize(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes).unwrap(); let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; @@ -347,7 +347,7 @@ impl SmartContractEventData { pub fn json_serialize(&self) -> serde_json::Value { let raw_value = { let mut bytes = vec![]; - self.value.consensus_serialize(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes).unwrap(); let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index 6906543d2a..d5b21c59aa 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -407,12 +407,14 @@ pub fn special_mint_asset_v200( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(expected_asset_type.size() as u64)?; + let epoch = env.epoch().clone(); env.global_context.database.set_nft_owner( &env.contract_context.contract_identifier, asset_name, &asset, to_principal, expected_asset_type, + &epoch, )?; let asset_identifier = AssetIdentifier { @@ -470,12 +472,14 @@ pub fn special_mint_asset_v205( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(asset_size)?; + let epoch = env.epoch().clone(); env.global_context.database.set_nft_owner( &env.contract_context.contract_identifier, asset_name, &asset, to_principal, expected_asset_type, + &epoch, )?; let asset_identifier = AssetIdentifier { @@ -545,12 +549,14 @@ pub fn special_transfer_asset_v200( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(expected_asset_type.size() as u64)?; + let epoch = env.epoch().clone(); env.global_context.database.set_nft_owner( &env.contract_context.contract_identifier, asset_name, &asset, to_principal, expected_asset_type, + &epoch, )?; env.global_context.log_asset_transfer( @@ -631,12 +637,14 @@ pub fn special_transfer_asset_v205( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(asset_size)?; + let epoch = env.epoch().clone(); env.global_context.database.set_nft_owner( &env.contract_context.contract_identifier, asset_name, &asset, to_principal, expected_asset_type, + &epoch, )?; env.global_context.log_asset_transfer( @@ -1018,11 +1026,13 @@ pub fn special_burn_asset_v200( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(expected_asset_type.size() as u64)?; + let epoch = env.epoch().clone(); env.global_context.database.burn_nft( &env.contract_context.contract_identifier, asset_name, &asset, expected_asset_type, + &epoch, )?; env.global_context.log_asset_transfer( @@ -1095,11 +1105,13 @@ pub fn special_burn_asset_v205( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(asset_size)?; + let epoch = env.epoch().clone(); env.global_context.database.burn_nft( &env.contract_context.contract_identifier, asset_name, &asset, expected_asset_type, + &epoch, )?; env.global_context.log_asset_transfer( diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index 1ff203117c..622baf6e87 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::StacksEpochId; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::runtime_cost; @@ -216,7 +217,12 @@ pub fn native_int_to_utf8(value: Value) -> Result { /// If the value cannot fit as serialized into the maximum buffer size, /// this returns `none`, otherwise, it will be `(some consensus-serialized-buffer)` pub fn to_consensus_buff(value: Value) -> Result { - let clar_buff_serialized = match Value::buff_from(value.serialize_to_vec()) { + let mut clar_buff_serialized = vec![]; + value + .serialize_write(&mut clar_buff_serialized) + .expect("FATAL: failed to serialize to vec"); + + let clar_buff_serialized = match Value::buff_from(clar_buff_serialized) { Ok(x) => x, Err(_) => return Ok(Value::none()), }; @@ -260,7 +266,7 @@ pub fn from_consensus_buff( // Perform the deserialization and check that it deserialized to the expected // type. A type mismatch at this point is an error that should be surfaced in // Clarity (as a none return). - let result = match Value::try_deserialize_bytes_exact(&input_bytes, &type_arg) { + let result = match Value::try_deserialize_bytes_exact(&input_bytes, &type_arg, false) { Ok(value) => value, Err(_) => return Ok(Value::none()), }; diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index 54cc43ae5e..d4f6080c5a 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -204,6 +204,11 @@ pub fn special_contract_call( nested_env.execute_contract(&contract_identifier, function_name, &rest_args, false) }?; + // sanitize contract-call outputs in epochs >= 2.4 + let result_type = TypeSignature::type_of(&result); + let (result, _) = Value::sanitize_value(env.epoch(), &result_type, result) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; + // Ensure that the expected type from the trait spec admits // the type of the value returned by the dynamic dispatch. if let Some(returns_type_signature) = type_returns_constraint { @@ -241,9 +246,10 @@ pub fn special_fetch_variable_v200( data_types.value_type.size(), )?; + let epoch = env.epoch().clone(); env.global_context .database - .lookup_variable(contract, var_name, data_types) + .lookup_variable(contract, var_name, data_types, &epoch) } /// The Stacks v205 version of fetch_variable uses the actual stored size of the @@ -265,10 +271,11 @@ pub fn special_fetch_variable_v205( .get(var_name) .ok_or(CheckErrors::NoSuchDataVariable(var_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .lookup_variable_with_size(contract, var_name, data_types); + .lookup_variable_with_size(contract, var_name, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -311,9 +318,10 @@ pub fn special_set_variable_v200( env.add_memory(value.get_memory_use())?; + let epoch = env.epoch().clone(); env.global_context .database - .set_variable(contract, var_name, value, data_types) + .set_variable(contract, var_name, value, data_types, &epoch) .map(|data| data.value) } @@ -342,10 +350,11 @@ pub fn special_set_variable_v205( .get(var_name) .ok_or(CheckErrors::NoSuchDataVariable(var_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .set_variable(contract, var_name, value, data_types); + .set_variable(contract, var_name, value, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -384,9 +393,10 @@ pub fn special_fetch_entry_v200( data_types.value_type.size() + data_types.key_type.size(), )?; + let epoch = env.epoch().clone(); env.global_context .database - .fetch_entry(contract, map_name, &key, data_types) + .fetch_entry(contract, map_name, &key, data_types, &epoch) } /// The Stacks v205 version of fetch_entry uses the actual stored size of the @@ -410,10 +420,11 @@ pub fn special_fetch_entry_v205( .get(map_name) .ok_or(CheckErrors::NoSuchMap(map_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .fetch_entry_with_size(contract, map_name, &key, data_types); + .fetch_entry_with_size(contract, map_name, &key, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -486,9 +497,10 @@ pub fn special_set_entry_v200( env.add_memory(key.get_memory_use())?; env.add_memory(value.get_memory_use())?; + let epoch = env.epoch().clone(); env.global_context .database - .set_entry(contract, map_name, key, value, data_types) + .set_entry(contract, map_name, key, value, data_types, &epoch) .map(|data| data.value) } @@ -519,10 +531,11 @@ pub fn special_set_entry_v205( .get(map_name) .ok_or(CheckErrors::NoSuchMap(map_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .set_entry(contract, map_name, key, value, data_types); + .set_entry(contract, map_name, key, value, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -570,9 +583,11 @@ pub fn special_insert_entry_v200( env.add_memory(key.get_memory_use())?; env.add_memory(value.get_memory_use())?; + let epoch = env.epoch().clone(); + env.global_context .database - .insert_entry(contract, map_name, key, value, data_types) + .insert_entry(contract, map_name, key, value, data_types, &epoch) .map(|data| data.value) } @@ -603,10 +618,11 @@ pub fn special_insert_entry_v205( .get(map_name) .ok_or(CheckErrors::NoSuchMap(map_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .insert_entry(contract, map_name, key, value, data_types); + .insert_entry(contract, map_name, key, value, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -651,9 +667,10 @@ pub fn special_delete_entry_v200( env.add_memory(key.get_memory_use())?; + let epoch = env.epoch().clone(); env.global_context .database - .delete_entry(contract, map_name, &key, data_types) + .delete_entry(contract, map_name, &key, data_types, &epoch) .map(|data| data.value) } @@ -682,10 +699,11 @@ pub fn special_delete_entry_v205( .get(map_name) .ok_or(CheckErrors::NoSuchMap(map_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .delete_entry(contract, map_name, &key, data_types); + .delete_entry(contract, map_name, &key, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -876,11 +894,12 @@ pub fn special_get_burn_block_info( TupleData::from_data(vec![ ( "addrs".into(), - Value::list_from( + Value::cons_list( addrs .into_iter() .map(|addr_tuple| Value::Tuple(addr_tuple)) .collect(), + env.epoch(), ) .expect("FATAL: could not convert address list to Value"), ), diff --git a/clarity/src/vm/functions/sequences.rs b/clarity/src/vm/functions/sequences.rs index 78337cf03d..801498f60c 100644 --- a/clarity/src/vm/functions/sequences.rs +++ b/clarity/src/vm/functions/sequences.rs @@ -45,7 +45,7 @@ pub fn list_cons( runtime_cost(ClarityCostFunction::ListCons, env, arg_size)?; - Value::list_from(args) + Value::cons_list(args, env.epoch()) } pub fn special_filter( @@ -167,7 +167,7 @@ pub fn special_map( mapped_results.push(res); } - Value::list_from(mapped_results) + Value::cons_list(mapped_results, env.epoch()) } pub fn special_append( @@ -194,11 +194,14 @@ pub fn special_append( )?; if entry_type.is_no_type() { assert_eq!(size, 0); - return Value::list_from(vec![element]); + return Value::cons_list(vec![element], env.epoch()); } if let Ok(next_entry_type) = TypeSignature::least_supertype(env.epoch(), &entry_type, &element_type) { + let (element, _) = Value::sanitize_value(env.epoch(), &next_entry_type, element) + .ok_or_else(|| CheckErrors::ListTypesMustMatch)?; + let next_type_signature = ListTypeData::new_list(next_entry_type, size + 1)?; data.push(element); Ok(Value::Sequence(SequenceData::List(ListData { @@ -223,7 +226,7 @@ pub fn special_concat_v200( check_argument_count(2, args)?; let mut wrapped_seq = eval(&args[0], env, context)?; - let mut other_wrapped_seq = eval(&args[1], env, context)?; + let other_wrapped_seq = eval(&args[1], env, context)?; runtime_cost( ClarityCostFunction::Concat, @@ -231,9 +234,9 @@ pub fn special_concat_v200( u64::from(wrapped_seq.size()).cost_overflow_add(u64::from(other_wrapped_seq.size()))?, )?; - match (&mut wrapped_seq, &mut other_wrapped_seq) { - (Value::Sequence(ref mut seq), Value::Sequence(ref mut other_seq)) => { - seq.append(env.epoch(), other_seq) + match (&mut wrapped_seq, other_wrapped_seq) { + (Value::Sequence(ref mut seq), Value::Sequence(other_seq)) => { + seq.concat(env.epoch(), other_seq) } _ => Err(RuntimeErrorType::BadTypeConstruction.into()), }?; @@ -249,17 +252,17 @@ pub fn special_concat_v205( check_argument_count(2, args)?; let mut wrapped_seq = eval(&args[0], env, context)?; - let mut other_wrapped_seq = eval(&args[1], env, context)?; + let other_wrapped_seq = eval(&args[1], env, context)?; - match (&mut wrapped_seq, &mut other_wrapped_seq) { - (Value::Sequence(ref mut seq), Value::Sequence(ref mut other_seq)) => { + match (&mut wrapped_seq, other_wrapped_seq) { + (Value::Sequence(ref mut seq), Value::Sequence(other_seq)) => { runtime_cost( ClarityCostFunction::Concat, env, (seq.len() as u64).cost_overflow_add(other_seq.len() as u64)?, )?; - seq.append(env.epoch(), other_seq) + seq.concat(env.epoch(), other_seq) } _ => { runtime_cost(ClarityCostFunction::Concat, env, 1)?; @@ -381,7 +384,8 @@ pub fn special_slice( env, (right_position - left_position) * seq.element_size(), )?; - let seq_value = seq.slice(left_position as usize, right_position as usize)?; + let seq_value = + seq.slice(env.epoch(), left_position as usize, right_position as usize)?; Value::some(seq_value) } _ => return Err(RuntimeErrorType::BadTypeConstruction.into()), diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 2358517508..db54d5b245 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -184,12 +184,15 @@ fn lookup_variable(name: &str, context: &LocalContext, env: &mut Environment) -> env, context.depth(), )?; - if let Some(value) = context - .lookup_variable(name) - .or_else(|| env.contract_context.lookup_variable(name)) - { + if let Some(value) = context.lookup_variable(name) { runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size())?; Ok(value.clone()) + } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size())?; + let (value, _) = + Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value), value) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; + Ok(value) } else if let Some(callable_data) = context.lookup_callable_contract(name) { if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { Ok(callable_data.contract_identifier.clone().into()) @@ -411,7 +414,7 @@ pub fn eval_all( global_context.add_memory(value.size() as u64)?; let data_type = global_context.database.create_variable(&contract_context.contract_identifier, &name, value_type); - global_context.database.set_variable(&contract_context.contract_identifier, &name, value, &data_type)?; + global_context.database.set_variable(&contract_context.contract_identifier, &name, value, &data_type, &global_context.epoch_id)?; contract_context.meta_data_var.insert(name, data_type); }, diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index fd64f45308..688403dca8 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -230,7 +230,7 @@ fn test_contract_caller(owned_env: &mut OwnedEnvironment) { false ) .unwrap(), - Value::list_from(vec![p1.clone(), p1.clone()]).unwrap() + Value::cons_list_unsanitized(vec![p1.clone(), p1.clone()]).unwrap() ); assert_eq!( env.execute_contract( @@ -240,7 +240,7 @@ fn test_contract_caller(owned_env: &mut OwnedEnvironment) { false ) .unwrap(), - Value::list_from(vec![c_b.clone(), c_b.clone()]).unwrap() + Value::cons_list_unsanitized(vec![c_b.clone(), c_b.clone()]).unwrap() ); assert_eq!( env.execute_contract( @@ -250,7 +250,7 @@ fn test_contract_caller(owned_env: &mut OwnedEnvironment) { false ) .unwrap(), - Value::list_from(vec![c_b.clone(), p1.clone()]).unwrap() + Value::cons_list_unsanitized(vec![c_b.clone(), p1.clone()]).unwrap() ); assert_eq!( env.execute_contract( @@ -260,7 +260,7 @@ fn test_contract_caller(owned_env: &mut OwnedEnvironment) { false ) .unwrap(), - Value::list_from(vec![c_b.clone(), c_b.clone()]).unwrap() + Value::cons_list_unsanitized(vec![c_b.clone(), c_b.clone()]).unwrap() ); } } @@ -278,7 +278,7 @@ fn tx_sponsor_contract_asserts(env: &mut Environment, sponsor: Option) -> Result { + Value::cons_list_unsanitized(list_data) + } +} + pub fn with_versioned_memory_environment( f: F, epoch: StacksEpochId, diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index e4fe61e05e..e51bbe9f45 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -241,6 +241,9 @@ pub enum Value { Optional(OptionalData), Response(ResponseData), CallableContract(CallableData), + // NOTE: any new value variants which may contain _other values_ (i.e., + // compound values like `Optional`, `Tuple`, `Response`, or `Sequence(List)`) + // must be handled in the value sanitization routine! } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] @@ -463,12 +466,11 @@ impl SequenceData { Ok(()) } - pub fn append(&mut self, epoch: &StacksEpochId, other_seq: &mut SequenceData) -> Result<()> { + pub fn concat(&mut self, epoch: &StacksEpochId, other_seq: SequenceData) -> Result<()> { match (self, other_seq) { - ( - SequenceData::List(ref mut inner_data), - SequenceData::List(ref mut other_inner_data), - ) => inner_data.append(epoch, other_inner_data), + (SequenceData::List(ref mut inner_data), SequenceData::List(other_inner_data)) => { + inner_data.append(epoch, other_inner_data) + } ( SequenceData::Buffer(ref mut inner_data), SequenceData::Buffer(ref mut other_inner_data), @@ -486,7 +488,12 @@ impl SequenceData { Ok(()) } - pub fn slice(self, left_position: usize, right_position: usize) -> Result { + pub fn slice( + self, + epoch: &StacksEpochId, + left_position: usize, + right_position: usize, + ) -> Result { let empty_seq = left_position == right_position; let result = match self { @@ -504,7 +511,7 @@ impl SequenceData { } else { data.data[left_position..right_position].to_vec() }; - Value::list_from(data) + Value::cons_list(data, epoch) } SequenceData::String(CharType::ASCII(data)) => { let data = if empty_seq { @@ -910,7 +917,15 @@ impl Value { }))) } - pub fn list_from(list_data: Vec) -> Result { + pub fn cons_list_unsanitized(list_data: Vec) -> Result { + let type_sig = TypeSignature::construct_parent_list_type(&list_data)?; + Ok(Value::Sequence(SequenceData::List(ListData { + data: list_data, + type_signature: type_sig, + }))) + } + + pub fn cons_list(list_data: Vec, epoch: &StacksEpochId) -> Result { // Constructors for TypeSignature ensure that the size of the Value cannot // be greater than MAX_VALUE_SIZE (they error on such constructions) // Aaron: at this point, we've _already_ allocated memory for this type. @@ -918,6 +933,14 @@ impl Value { // this is a problem _if_ the static analyzer cannot already prevent // this case. This applies to all the constructor size checks. let type_sig = TypeSignature::construct_parent_list_type(&list_data)?; + let list_data_opt: Option<_> = list_data + .into_iter() + .map(|item| { + Value::sanitize_value(epoch, type_sig.get_list_item_type(), item) + .map(|(value, _did_sanitize)| value) + }) + .collect(); + let list_data = list_data_opt.ok_or_else(|| CheckErrors::ListTypesMustMatch)?; Ok(Value::Sequence(SequenceData::List(ListData { data: list_data, type_signature: type_sig, @@ -1188,13 +1211,18 @@ impl ListData { self.data.len().try_into().unwrap() } - fn append(&mut self, epoch: &StacksEpochId, other_seq: &mut ListData) -> Result<()> { + fn append(&mut self, epoch: &StacksEpochId, other_seq: ListData) -> Result<()> { let entry_type_a = self.type_signature.get_list_item_type(); let entry_type_b = other_seq.type_signature.get_list_item_type(); let entry_type = TypeSignature::factor_out_no_type(epoch, &entry_type_a, &entry_type_b)?; let max_len = self.type_signature.get_max_len() + other_seq.type_signature.get_max_len(); + for item in other_seq.data.into_iter() { + let (item, _) = Value::sanitize_value(epoch, &entry_type, item) + .ok_or_else(|| CheckErrors::ListTypesMustMatch)?; + self.data.push(item); + } + self.type_signature = ListTypeData::new_list(entry_type, max_len)?; - self.data.append(&mut other_seq.data); Ok(()) } } @@ -1460,6 +1488,7 @@ impl TupleData { Ok(t) } + /// Return the number of fields in this tuple value pub fn len(&self) -> u64 { self.data_map.len() as u64 } diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 6e86b3f944..5a087cd9ff 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -34,7 +34,7 @@ use crate::vm::types::{ BufferLength, CallableData, CharType, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, SequenceData, SequenceSubtype, StandardPrincipalData, StringSubtype, StringUTF8Length, TupleData, TypeSignature, Value, BOUND_VALUE_SERIALIZATION_BYTES, - MAX_VALUE_SIZE, + MAX_TYPE_DEPTH, MAX_VALUE_SIZE, }; use stacks_common::util::hash::{hex_bytes, to_hex}; use stacks_common::util::retry::BoundReader; @@ -42,6 +42,8 @@ use stacks_common::util::retry::BoundReader; use crate::codec::{Error as codec_error, StacksMessageCodec}; use crate::vm::types::byte_len_of_serialization; +use super::{ListTypeData, TupleTypeSignature}; + /// Errors that may occur in serialization or deserialization /// If deserialization failed because the described type is a bad type and /// a CheckError is thrown, it gets wrapped in BadTypeError. @@ -61,6 +63,15 @@ lazy_static! { pub static ref NONE_SERIALIZATION_LEN: u64 = Value::none().serialize_to_vec().len() as u64; } +/// Deserialization uses a specific epoch for passing to the type signature checks +/// The reason this is pinned to Epoch21 is so that values stored before epoch-2.4 +/// can still be read from the database. +const DESERIALIZATION_TYPE_CHECK_EPOCH: StacksEpochId = StacksEpochId::Epoch21; + +/// Pre-sanitization values could end up being larger than the deserializer originally +/// supported, so we increase the bound to a higher level limit imposed by the cost checker. +const SANITIZATION_READ_BOUND: u64 = 15_000_000; + impl std::fmt::Display for SerializationError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { @@ -295,6 +306,77 @@ macro_rules! check_match { }; } +enum DeserializeStackItem { + List { + items: Vec, + expected_len: u32, + expected_type: Option, + }, + Tuple { + items: Vec<(ClarityName, Value)>, + expected_len: u64, + processed_entries: u64, + expected_type: Option, + next_name: ClarityName, + next_sanitize: bool, + }, + OptionSome { + inner_expected_type: Option, + }, + ResponseOk { + inner_expected_type: Option, + }, + ResponseErr { + inner_expected_type: Option, + }, + TopLevel { + expected_type: Option, + }, +} + +impl DeserializeStackItem { + /// What is the expected type for the child of this deserialization stack item? + fn next_expected_type(&self) -> Result, SerializationError> { + match self { + DeserializeStackItem::List { expected_type, .. } => Ok(expected_type + .as_ref() + .map(|lt| lt.get_list_item_type()) + .cloned()), + DeserializeStackItem::Tuple { + expected_type, + next_name, + next_sanitize, + .. + } => match expected_type { + None => Ok(None), + Some(some_tuple) => { + // if we're sanitizing this tuple, and the `next_name` field is to be + // removed, don't return an expected type. + if *next_sanitize { + return Ok(None); + } + let field_type = some_tuple.field_type(&next_name).ok_or_else(|| { + SerializationError::DeserializeExpected(TypeSignature::TupleType( + some_tuple.clone(), + )) + })?; + Ok(Some(field_type.clone())) + } + }, + DeserializeStackItem::OptionSome { + inner_expected_type, + } => Ok(inner_expected_type.clone()), + DeserializeStackItem::ResponseOk { + inner_expected_type, + } => Ok(inner_expected_type.clone()), + DeserializeStackItem::ResponseErr { + inner_expected_type, + } => Ok(inner_expected_type.clone()), + DeserializeStackItem::TopLevel { expected_type } => Ok(expected_type.clone()), + } + } +} + impl TypeSignature { /// Return the maximum length of the consensus serialization of a /// Clarity value of this type. The returned length *may* not fit @@ -427,18 +509,28 @@ impl Value { pub fn deserialize_read( r: &mut R, expected_type: Option<&TypeSignature>, + sanitize: bool, ) -> Result { - Self::deserialize_read_count(r, expected_type).map(|(value, _)| value) + Self::deserialize_read_count(r, expected_type, sanitize).map(|(value, _)| value) } /// Deserialize just like `deserialize_read` but also - /// return the bytes read + /// return the bytes read. + /// If `sanitize` argument is set to true and `expected_type` is supplied, + /// this method will remove any extraneous tuple fields which may have been + /// allowed by `least_super_type`. pub fn deserialize_read_count( r: &mut R, expected_type: Option<&TypeSignature>, + sanitize: bool, ) -> Result<(Value, u64), SerializationError> { - let mut bound_reader = BoundReader::from_reader(r, BOUND_VALUE_SERIALIZATION_BYTES as u64); - let value = Value::inner_deserialize_read(&mut bound_reader, expected_type, 0)?; + let bound_value_serialization_bytes = if sanitize && expected_type.is_some() { + SANITIZATION_READ_BOUND + } else { + BOUND_VALUE_SERIALIZATION_BYTES as u64 + }; + let mut bound_reader = BoundReader::from_reader(r, bound_value_serialization_bytes); + let value = Value::inner_deserialize_read(&mut bound_reader, expected_type, sanitize)?; let bytes_read = bound_reader.num_read(); if let Some(expected_type) = expected_type { let expect_size = match expected_type.max_serialized_size() { @@ -452,13 +544,15 @@ impl Value { } }; - assert!( - expect_size as u64 >= bytes_read, - "Deserialized more bytes than expected size during deserialization. Expected size = {}, bytes read = {}, type = {}", - expect_size, - bytes_read, - expected_type, - ); + if expect_size as u64 > bytes_read { + // this can happen due to sanitization, so its no longer indicative of a *problem* with the node. + debug!( + "Deserialized more bytes than expected size during deserialization. Expected size = {}, bytes read = {}, type = {}", + expect_size, + bytes_read, + expected_type, + ); + } } Ok((value, bytes_read)) @@ -466,261 +560,428 @@ impl Value { fn inner_deserialize_read( r: &mut R, - expected_type: Option<&TypeSignature>, - depth: u8, + top_expected_type: Option<&TypeSignature>, + sanitize: bool, ) -> Result { use super::PrincipalData::*; use super::Value::*; - if depth >= 16 { - return Err(CheckErrors::TypeSignatureTooDeep.into()); - } - - let mut header = [0]; - r.read_exact(&mut header)?; - - let prefix = TypePrefix::from_u8(header[0]).ok_or_else(|| "Bad type prefix")?; + let mut stack = vec![DeserializeStackItem::TopLevel { + expected_type: top_expected_type.cloned(), + }]; - match prefix { - TypePrefix::Int => { - check_match!(expected_type, TypeSignature::IntType)?; - let mut buffer = [0; 16]; - r.read_exact(&mut buffer)?; - Ok(Int(i128::from_be_bytes(buffer))) - } - TypePrefix::UInt => { - check_match!(expected_type, TypeSignature::UIntType)?; - let mut buffer = [0; 16]; - r.read_exact(&mut buffer)?; - Ok(UInt(u128::from_be_bytes(buffer))) + while !stack.is_empty() { + if stack.len() > MAX_TYPE_DEPTH as usize { + return Err(CheckErrors::TypeSignatureTooDeep.into()); } - TypePrefix::Buffer => { - let mut buffer_len = [0; 4]; - r.read_exact(&mut buffer_len)?; - let buffer_len = BufferLength::try_from(u32::from_be_bytes(buffer_len))?; - - if let Some(x) = expected_type { - let passed_test = match x { - TypeSignature::SequenceType(SequenceSubtype::BufferType(expected_len)) => { - u32::from(&buffer_len) <= u32::from(expected_len) - } - _ => false, - }; - if !passed_test { - return Err(SerializationError::DeserializeExpected(x.clone())); - } - } - - let mut data = vec![0; u32::from(buffer_len) as usize]; - - r.read_exact(&mut data[..])?; - - Value::buff_from(data).map_err(|_| "Bad buffer".into()) - } - TypePrefix::BoolTrue => { - check_match!(expected_type, TypeSignature::BoolType)?; - Ok(Bool(true)) - } - TypePrefix::BoolFalse => { - check_match!(expected_type, TypeSignature::BoolType)?; - Ok(Bool(false)) - } - TypePrefix::PrincipalStandard => { - check_match!(expected_type, TypeSignature::PrincipalType)?; - StandardPrincipalData::deserialize_read(r).map(Value::from) - } - TypePrefix::PrincipalContract => { - check_match!(expected_type, TypeSignature::PrincipalType)?; - let issuer = StandardPrincipalData::deserialize_read(r)?; - let name = ContractName::deserialize_read(r)?; - Ok(Value::from(QualifiedContractIdentifier { issuer, name })) - } - TypePrefix::ResponseOk | TypePrefix::ResponseErr => { - let committed = prefix == TypePrefix::ResponseOk; - - let expect_contained_type = match expected_type { - None => None, - Some(x) => { - let contained_type = match (committed, x) { - (true, TypeSignature::ResponseType(types)) => Ok(&types.0), - (false, TypeSignature::ResponseType(types)) => Ok(&types.1), - _ => Err(SerializationError::DeserializeExpected(x.clone())), - }?; - Some(contained_type) - } - }; - let data = Value::inner_deserialize_read(r, expect_contained_type, depth + 1)?; - let value = if committed { - Value::okay(data) - } else { - Value::error(data) + let expected_type = stack + .last() + .expect("FATAL: stack.last() should always be some() because of loop condition") + .next_expected_type()?; + + let mut header = [0]; + r.read_exact(&mut header)?; + let prefix = TypePrefix::from_u8(header[0]).ok_or_else(|| "Bad type prefix")?; + + let item = match prefix { + TypePrefix::Int => { + check_match!(expected_type, TypeSignature::IntType)?; + let mut buffer = [0; 16]; + r.read_exact(&mut buffer)?; + Ok(Int(i128::from_be_bytes(buffer))) } - .map_err(|_x| "Value too large")?; - - Ok(value) - } - TypePrefix::OptionalNone => { - check_match!(expected_type, TypeSignature::OptionalType(_))?; - Ok(Value::none()) - } - TypePrefix::OptionalSome => { - let expect_contained_type = match expected_type { - None => None, - Some(x) => { - let contained_type = match x { - TypeSignature::OptionalType(some_type) => Ok(some_type.as_ref()), - _ => Err(SerializationError::DeserializeExpected(x.clone())), - }?; - Some(contained_type) + TypePrefix::UInt => { + check_match!(expected_type, TypeSignature::UIntType)?; + let mut buffer = [0; 16]; + r.read_exact(&mut buffer)?; + Ok(UInt(u128::from_be_bytes(buffer))) + } + TypePrefix::Buffer => { + let mut buffer_len = [0; 4]; + r.read_exact(&mut buffer_len)?; + let buffer_len = BufferLength::try_from(u32::from_be_bytes(buffer_len))?; + + if let Some(x) = &expected_type { + let passed_test = match x { + TypeSignature::SequenceType(SequenceSubtype::BufferType( + expected_len, + )) => u32::from(&buffer_len) <= u32::from(expected_len), + _ => false, + }; + if !passed_test { + return Err(SerializationError::DeserializeExpected(x.clone())); + } } - }; - let value = Value::some(Value::inner_deserialize_read( - r, - expect_contained_type, - depth + 1, - )?) - .map_err(|_x| "Value too large")?; + let mut data = vec![0; u32::from(buffer_len) as usize]; - Ok(value) - } - TypePrefix::List => { - let mut len = [0; 4]; - r.read_exact(&mut len)?; - let len = u32::from_be_bytes(len); + r.read_exact(&mut data[..])?; - if len > MAX_VALUE_SIZE { - return Err("Illegal list type".into()); + Value::buff_from(data).map_err(|_| "Bad buffer".into()) } - - let (list_type, entry_type) = match expected_type { - None => (None, None), - Some(TypeSignature::SequenceType(SequenceSubtype::ListType(list_type))) => { - if len > list_type.get_max_len() { - return Err(SerializationError::DeserializeExpected( - expected_type.unwrap().clone(), - )); - } - (Some(list_type), Some(list_type.get_list_item_type())) - } - Some(x) => return Err(SerializationError::DeserializeExpected(x.clone())), - }; - - let mut items = Vec::with_capacity(len as usize); - for _i in 0..len { - items.push(Value::inner_deserialize_read(r, entry_type, depth + 1)?); + TypePrefix::BoolTrue => { + check_match!(expected_type, TypeSignature::BoolType)?; + Ok(Bool(true)) } - - if let Some(list_type) = list_type { - Value::list_with_type(&StacksEpochId::Epoch21, items, list_type.clone()) - .map_err(|_| "Illegal list type".into()) - } else { - Value::list_from(items).map_err(|_| "Illegal list type".into()) + TypePrefix::BoolFalse => { + check_match!(expected_type, TypeSignature::BoolType)?; + Ok(Bool(false)) } - } - TypePrefix::Tuple => { - let mut len = [0; 4]; - r.read_exact(&mut len)?; - let len = u32::from_be_bytes(len); - - if len > MAX_VALUE_SIZE { - return Err(SerializationError::DeserializationError( - "Illegal tuple type".to_string(), - )); + TypePrefix::PrincipalStandard => { + check_match!(expected_type, TypeSignature::PrincipalType)?; + StandardPrincipalData::deserialize_read(r).map(Value::from) } + TypePrefix::PrincipalContract => { + check_match!(expected_type, TypeSignature::PrincipalType)?; + let issuer = StandardPrincipalData::deserialize_read(r)?; + let name = ContractName::deserialize_read(r)?; + Ok(Value::from(QualifiedContractIdentifier { issuer, name })) + } + TypePrefix::ResponseOk | TypePrefix::ResponseErr => { + let committed = prefix == TypePrefix::ResponseOk; - let tuple_type = match expected_type { - None => None, - Some(TypeSignature::TupleType(tuple_type)) => { - if len as u64 != tuple_type.len() { - return Err(SerializationError::DeserializeExpected( - expected_type.unwrap().clone(), - )); + let expect_contained_type = match &expected_type { + None => None, + Some(x) => { + let contained_type = match (committed, x) { + (true, TypeSignature::ResponseType(types)) => Ok(&types.0), + (false, TypeSignature::ResponseType(types)) => Ok(&types.1), + _ => Err(SerializationError::DeserializeExpected(x.clone())), + }?; + Some(contained_type) } - Some(tuple_type) - } - Some(x) => return Err(SerializationError::DeserializeExpected(x.clone())), - }; + }; - let mut items = Vec::with_capacity(len as usize); - for _i in 0..len { - let key = ClarityName::deserialize_read(r)?; + let stack_item = if committed { + DeserializeStackItem::ResponseOk { + inner_expected_type: expect_contained_type.cloned(), + } + } else { + DeserializeStackItem::ResponseErr { + inner_expected_type: expect_contained_type.cloned(), + } + }; - let expected_field_type = match tuple_type { + stack.push(stack_item); + continue; + } + TypePrefix::OptionalNone => { + check_match!(expected_type, TypeSignature::OptionalType(_))?; + Ok(Value::none()) + } + TypePrefix::OptionalSome => { + let expect_contained_type = match &expected_type { None => None, - Some(some_tuple) => Some(some_tuple.field_type(&key).ok_or_else(|| { - SerializationError::DeserializeExpected(expected_type.unwrap().clone()) - })?), + Some(x) => { + let contained_type = match x { + TypeSignature::OptionalType(some_type) => Ok(some_type.as_ref()), + _ => Err(SerializationError::DeserializeExpected(x.clone())), + }?; + Some(contained_type) + } }; - let value = Value::inner_deserialize_read(r, expected_field_type, depth + 1)?; - items.push((key, value)) - } + let stack_item = DeserializeStackItem::OptionSome { + inner_expected_type: expect_contained_type.cloned(), + }; - if let Some(tuple_type) = tuple_type { - TupleData::from_data_typed(&StacksEpochId::latest(), items, tuple_type) - .map_err(|_| "Illegal tuple type".into()) - .map(Value::from) - } else { - TupleData::from_data(items) - .map_err(|_| "Illegal tuple type".into()) - .map(Value::from) + stack.push(stack_item); + continue; } - } - TypePrefix::StringASCII => { - let mut buffer_len = [0; 4]; - r.read_exact(&mut buffer_len)?; - let buffer_len = BufferLength::try_from(u32::from_be_bytes(buffer_len))?; - - if let Some(x) = expected_type { - let passed_test = match x { - TypeSignature::SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(expected_len), - )) => u32::from(&buffer_len) <= u32::from(expected_len), - _ => false, + TypePrefix::List => { + let mut len = [0; 4]; + r.read_exact(&mut len)?; + let len = u32::from_be_bytes(len); + + if len > MAX_VALUE_SIZE { + return Err("Illegal list type".into()); + } + + let (list_type, _entry_type) = match expected_type.as_ref() { + None => (None, None), + Some(TypeSignature::SequenceType(SequenceSubtype::ListType(list_type))) => { + if len > list_type.get_max_len() { + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap().clone(), + )); + } + (Some(list_type), Some(list_type.get_list_item_type())) + } + Some(x) => return Err(SerializationError::DeserializeExpected(x.clone())), }; - if !passed_test { - return Err(SerializationError::DeserializeExpected(x.clone())); + + if len > 0 { + let items = Vec::with_capacity(len as usize); + let stack_item = DeserializeStackItem::List { + items, + expected_len: len, + expected_type: list_type.cloned(), + }; + + stack.push(stack_item); + continue; + } else { + let finished_list = if let Some(list_type) = list_type { + Value::list_with_type( + &DESERIALIZATION_TYPE_CHECK_EPOCH, + vec![], + list_type.clone(), + ) + .map_err(|_| "Illegal list type")? + } else { + Value::cons_list_unsanitized(vec![]).map_err(|_| "Illegal list type")? + }; + + Ok(finished_list) } } + TypePrefix::Tuple => { + let mut len = [0; 4]; + r.read_exact(&mut len)?; + let len = u32::from_be_bytes(len); + let expected_len = u64::from(len); + + if len > MAX_VALUE_SIZE { + return Err(SerializationError::DeserializationError( + "Illegal tuple type".to_string(), + )); + } - let mut data = vec![0; u32::from(buffer_len) as usize]; - - r.read_exact(&mut data[..])?; + let tuple_type = match expected_type.as_ref() { + None => None, + Some(TypeSignature::TupleType(tuple_type)) => { + if sanitize { + if u64::from(len) < tuple_type.len() { + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap().clone(), + )); + } + } else { + if len as u64 != tuple_type.len() { + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap().clone(), + )); + } + } + Some(tuple_type) + } + Some(x) => return Err(SerializationError::DeserializeExpected(x.clone())), + }; - Value::string_ascii_from_bytes(data).map_err(|_| "Bad string".into()) - } - TypePrefix::StringUTF8 => { - let mut total_len = [0; 4]; - r.read_exact(&mut total_len)?; - let total_len = BufferLength::try_from(u32::from_be_bytes(total_len))?; + if len > 0 { + let items = Vec::with_capacity(expected_len as usize); + let first_key = ClarityName::deserialize_read(r)?; + let next_sanitize = sanitize + && tuple_type + .map(|tt| tt.field_type(&first_key).is_none()) + .unwrap_or(false); + let stack_item = DeserializeStackItem::Tuple { + items, + expected_len, + processed_entries: 0, + expected_type: tuple_type.cloned(), + next_name: first_key, + next_sanitize, + }; + + stack.push(stack_item); + continue; + } else { + let finished_tuple = if let Some(tuple_type) = tuple_type { + TupleData::from_data_typed( + &DESERIALIZATION_TYPE_CHECK_EPOCH, + vec![], + &tuple_type, + ) + .map_err(|_| "Illegal tuple type") + .map(Value::from)? + } else { + TupleData::from_data(vec![]) + .map_err(|_| "Illegal tuple type") + .map(Value::from)? + }; + Ok(finished_tuple) + } + } + TypePrefix::StringASCII => { + let mut buffer_len = [0; 4]; + r.read_exact(&mut buffer_len)?; + let buffer_len = BufferLength::try_from(u32::from_be_bytes(buffer_len))?; - let mut data: Vec = vec![0; u32::from(total_len) as usize]; + if let Some(x) = &expected_type { + let passed_test = match x { + TypeSignature::SequenceType(SequenceSubtype::StringType( + StringSubtype::ASCII(expected_len), + )) => u32::from(&buffer_len) <= u32::from(expected_len), + _ => false, + }; + if !passed_test { + return Err(SerializationError::DeserializeExpected(x.clone())); + } + } - r.read_exact(&mut data[..])?; + let mut data = vec![0; u32::from(buffer_len) as usize]; - let value = Value::string_utf8_from_bytes(data) - .map_err(|_| "Illegal string_utf8 type".into()); + r.read_exact(&mut data[..])?; - if let Some(x) = expected_type { - let passed_test = match (x, &value) { - ( - TypeSignature::SequenceType(SequenceSubtype::StringType( - StringSubtype::UTF8(expected_len), - )), - Ok(Value::Sequence(SequenceData::String(CharType::UTF8(utf8)))), - ) => utf8.data.len() as u32 <= u32::from(expected_len), - _ => false, - }; - if !passed_test { - return Err(SerializationError::DeserializeExpected(x.clone())); + Value::string_ascii_from_bytes(data).map_err(|_| "Bad string".into()) + } + TypePrefix::StringUTF8 => { + let mut total_len = [0; 4]; + r.read_exact(&mut total_len)?; + let total_len = BufferLength::try_from(u32::from_be_bytes(total_len))?; + + let mut data: Vec = vec![0; u32::from(total_len) as usize]; + + r.read_exact(&mut data[..])?; + + let value = Value::string_utf8_from_bytes(data) + .map_err(|_| "Illegal string_utf8 type".into()); + + if let Some(x) = &expected_type { + let passed_test = match (x, &value) { + ( + TypeSignature::SequenceType(SequenceSubtype::StringType( + StringSubtype::UTF8(expected_len), + )), + Ok(Value::Sequence(SequenceData::String(CharType::UTF8(utf8)))), + ) => utf8.data.len() as u32 <= u32::from(expected_len), + _ => false, + }; + if !passed_test { + return Err(SerializationError::DeserializeExpected(x.clone())); + } } + + value } + }?; - value + let mut finished_item = Some(item); + while let Some(item) = finished_item.take() { + let stack_bottom = if let Some(stack_item) = stack.pop() { + stack_item + } else { + // this should be unreachable! + return Ok(item); + }; + match stack_bottom { + DeserializeStackItem::TopLevel { .. } => return Ok(item), + DeserializeStackItem::List { + mut items, + expected_len, + expected_type, + } => { + items.push(item); + if expected_len as usize <= items.len() { + // list is finished! + let finished_list = if let Some(list_type) = expected_type { + Value::list_with_type( + &DESERIALIZATION_TYPE_CHECK_EPOCH, + items, + list_type.clone(), + ) + .map_err(|_| "Illegal list type")? + } else { + Value::cons_list_unsanitized(items) + .map_err(|_| "Illegal list type")? + }; + + finished_item.replace(finished_list); + } else { + // list is not finished, reinsert on stack + stack.push(DeserializeStackItem::List { + items, + expected_len, + expected_type, + }); + } + } + DeserializeStackItem::Tuple { + mut items, + expected_len, + expected_type, + next_name, + next_sanitize, + mut processed_entries, + } => { + let push_entry = if sanitize { + if let Some(_) = expected_type.as_ref() { + // if performing tuple sanitization, don't include a field + // if it was sanitized + !next_sanitize + } else { + // always push the entry if there's no type expectation + true + } + } else { + true + }; + let tuple_entry = (next_name, item); + if push_entry { + items.push(tuple_entry); + } + processed_entries += 1; + if expected_len <= processed_entries { + // tuple is finished! + let finished_tuple = if let Some(tuple_type) = expected_type { + if items.len() != tuple_type.len() as usize { + return Err(SerializationError::DeserializeExpected( + TypeSignature::TupleType(tuple_type), + )); + } + TupleData::from_data_typed( + &DESERIALIZATION_TYPE_CHECK_EPOCH, + items, + &tuple_type, + ) + .map_err(|_| "Illegal tuple type") + .map(Value::from)? + } else { + TupleData::from_data(items) + .map_err(|_| "Illegal tuple type") + .map(Value::from)? + }; + + finished_item.replace(finished_tuple); + } else { + // tuple is not finished, read the next key name and reinsert on stack + let key = ClarityName::deserialize_read(r)?; + let next_sanitize = sanitize + && expected_type + .as_ref() + .map(|tt| tt.field_type(&key).is_none()) + .unwrap_or(false); + stack.push(DeserializeStackItem::Tuple { + items, + expected_type, + expected_len, + next_name: key, + next_sanitize, + processed_entries, + }); + } + } + DeserializeStackItem::OptionSome { .. } => { + let finished_some = Value::some(item).map_err(|_x| "Value too large")?; + finished_item.replace(finished_some); + } + DeserializeStackItem::ResponseOk { .. } => { + let finished_some = Value::okay(item).map_err(|_x| "Value too large")?; + finished_item.replace(finished_some); + } + DeserializeStackItem::ResponseErr { .. } => { + let finished_some = Value::error(item).map_err(|_x| "Value too large")?; + finished_item.replace(finished_some); + } + }; } } + + Err(SerializationError::DeserializationError( + "Invalid data: stack ran out before finishing parsing".into(), + )) } pub fn serialize_write(&self, w: &mut W) -> std::io::Result<()> { @@ -790,8 +1051,9 @@ impl Value { pub fn try_deserialize_bytes( bytes: &Vec, expected: &TypeSignature, + sanitize: bool, ) -> Result { - Value::deserialize_read(&mut bytes.as_slice(), Some(expected)) + Value::deserialize_read(&mut bytes.as_slice(), Some(expected), sanitize) } /// This function attempts to deserialize a hex string into a Clarity Value. @@ -801,9 +1063,10 @@ impl Value { pub fn try_deserialize_hex( hex: &str, expected: &TypeSignature, + sanitize: bool, ) -> Result { let mut data = hex_bytes(hex).map_err(|_| "Bad hex string")?; - Value::try_deserialize_bytes(&mut data, expected) + Value::try_deserialize_bytes(&mut data, expected, sanitize) } /// This function attempts to deserialize a byte buffer into a @@ -817,10 +1080,11 @@ impl Value { pub fn try_deserialize_bytes_exact( bytes: &Vec, expected: &TypeSignature, + sanitize: bool, ) -> Result { let input_length = bytes.len(); let (value, read_count) = - Value::deserialize_read_count(&mut bytes.as_slice(), Some(expected))?; + Value::deserialize_read_count(&mut bytes.as_slice(), Some(expected), sanitize)?; if read_count != (input_length as u64) { Err(SerializationError::LeftoverBytesInDeserialization) } else { @@ -828,8 +1092,10 @@ impl Value { } } - pub fn try_deserialize_bytes_untyped(bytes: &Vec) -> Result { - Value::deserialize_read(&mut bytes.as_slice(), None) + /// Try to deserialize a value without type information. This *does not* perform sanitization + /// so it should not be used when decoding clarity database values. + fn try_deserialize_bytes_untyped(bytes: &Vec) -> Result { + Value::deserialize_read(&mut bytes.as_slice(), None, false) } pub fn try_deserialize_hex_untyped(hex: &str) -> Result { @@ -842,11 +1108,6 @@ impl Value { Value::try_deserialize_bytes_untyped(&mut data) } - pub fn deserialize(hex: &str, expected: &TypeSignature) -> Self { - Value::try_deserialize_hex(hex, expected) - .expect("ERROR: Failed to parse Clarity hex string") - } - pub fn serialized_size(&self) -> u32 { let mut counter = WriteCounter { count: 0 }; self.serialize_write(&mut counter) @@ -882,18 +1143,123 @@ impl Write for WriteCounter { } } -impl ClaritySerializable for Value { - fn serialize(&self) -> String { +impl Value { + pub fn serialize_to_vec(&self) -> Vec { let mut byte_serialization = Vec::new(); self.serialize_write(&mut byte_serialization) .expect("IOError filling byte buffer."); + byte_serialization + } + + /// This does *not* perform any data sanitization + pub fn serialize_to_hex(&self) -> String { + let byte_serialization = self.serialize_to_vec(); to_hex(byte_serialization.as_slice()) } -} -impl ClarityDeserializable for Value { - fn deserialize(hex: &str) -> Self { - Value::try_deserialize_hex_untyped(hex).expect("ERROR: Failed to parse Clarity hex string") + /// Sanitize `value` against pre-2.4 serialization + /// + /// Returns Some if the sanitization is successful, or was not necessary. + /// Returns None if the sanitization failed. + /// + /// Returns the sanitized value _and_ whether or not sanitization was required. + pub fn sanitize_value( + epoch: &StacksEpochId, + expected: &TypeSignature, + value: Value, + ) -> Option<(Value, bool)> { + // in epochs before 2.4, perform no sanitization + if !epoch.value_sanitizing() { + return Some((value, false)); + } + let (output, did_sanitize) = match value { + Value::Sequence(SequenceData::List(l)) => { + let lt = match expected { + TypeSignature::SequenceType(SequenceSubtype::ListType(lt)) => lt, + _ => return None, + }; + if l.len() > lt.get_max_len() { + return None; + } + let mut sanitized_items = vec![]; + let mut did_sanitize_children = false; + for item in l.data.into_iter() { + let (sanitized_item, did_sanitize) = + Self::sanitize_value(epoch, lt.get_list_item_type(), item)?; + sanitized_items.push(sanitized_item); + did_sanitize_children = did_sanitize_children || did_sanitize; + } + // do not sanitize list before construction here, because we're already sanitizing + let output_list = Value::cons_list_unsanitized(sanitized_items).ok()?; + (output_list, did_sanitize_children) + } + Value::Tuple(tuple_data) => { + let tt = match expected { + TypeSignature::TupleType(tt) => tt, + _ => return None, + }; + let mut sanitized_tuple_entries = vec![]; + let tuple_data_len = tuple_data.len(); + let mut tuple_data_map = tuple_data.data_map; + let mut did_sanitize_children = false; + for (key, expect_key_type) in tt.get_type_map().iter() { + let field_data = tuple_data_map.remove(key)?; + let (sanitized_field, did_sanitize) = + Self::sanitize_value(epoch, expect_key_type, field_data)?; + sanitized_tuple_entries.push((key.clone(), sanitized_field)); + did_sanitize_children = did_sanitize_children || did_sanitize; + } + let did_sanitize_tuple = did_sanitize_children || (tt.len() != tuple_data_len); + ( + Value::Tuple(TupleData::from_data(sanitized_tuple_entries).ok()?), + did_sanitize_tuple, + ) + } + Value::Optional(opt_data) => { + let inner_type = match expected { + TypeSignature::OptionalType(inner_type) => inner_type, + _ => return None, + }; + let some_data = match opt_data.data { + Some(data) => *data, + None => return Some((Value::none(), false)), + }; + let (sanitized_data, did_sanitize_child) = + Self::sanitize_value(epoch, &inner_type, some_data)?; + (Value::some(sanitized_data).ok()?, did_sanitize_child) + } + Value::Response(response) => { + let rt = match expected { + TypeSignature::ResponseType(rt) => rt, + _ => return None, + }; + + let response_ok = response.committed; + let response_data = *response.data; + let inner_type = if response_ok { &rt.0 } else { &rt.1 }; + let (sanitized_inner, did_sanitize_child) = + Self::sanitize_value(epoch, &inner_type, response_data)?; + let sanitized_resp = if response_ok { + Value::okay(sanitized_inner) + } else { + Value::error(sanitized_inner) + }; + (sanitized_resp.ok()?, did_sanitize_child) + } + value => { + if expected.admits(epoch, &value).ok()? { + return Some((value, false)); + } else { + return None; + } + } + }; + + if expected.admits(epoch, &output).ok()? { + Some((output, did_sanitize)) + } else { + None + } } } @@ -919,13 +1285,15 @@ impl ClarityDeserializable for u32 { } } +/// Note: the StacksMessageCodec implementation for Clarity values *does not* +/// sanitize its serialization or deserialization. impl StacksMessageCodec for Value { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { self.serialize_write(fd).map_err(codec_error::WriteError) } fn consensus_deserialize(fd: &mut R) -> Result { - Value::deserialize_read(fd, None).map_err(|e| match e { + Value::deserialize_read(fd, None, false).map_err(|e| match e { SerializationError::IOError(e) => codec_error::ReadError(e.err), _ => codec_error::DeserializeError(format!("Failed to decode clarity value: {:?}", &e)), }) @@ -935,7 +1303,7 @@ impl StacksMessageCodec for Value { impl std::hash::Hash for Value { fn hash(&self, state: &mut H) { let mut s = vec![]; - self.consensus_serialize(&mut s) + self.serialize_write(&mut s) .expect("FATAL: failed to serialize to vec"); s.hash(state); } @@ -948,7 +1316,7 @@ mod tests { use std::io::Write; - use crate::vm::database::{ClarityDeserializable, ClaritySerializable}; + use crate::vm::database::{ClarityDeserializable, ClaritySerializable, RollbackWrapper}; use crate::vm::errors::Error; use crate::vm::types::TypeSignature::{BoolType, IntType}; @@ -981,16 +1349,17 @@ mod tests { fn test_deser_ser(v: Value) { assert_eq!( &v, - &Value::deserialize(&v.serialize(), &TypeSignature::type_of(&v)) + &Value::try_deserialize_hex(&v.serialize_to_hex(), &TypeSignature::type_of(&v), false) + .unwrap() ); assert_eq!( &v, - &Value::try_deserialize_hex_untyped(&v.serialize()).unwrap() + &Value::try_deserialize_hex_untyped(&v.serialize_to_hex()).unwrap() ); // test the serialized_size implementation assert_eq!( v.serialized_size(), - v.serialize().len() as u32 / 2, + v.serialize_to_hex().len() as u32 / 2, "serialized_size() should return the byte length of the serialization (half the length of the hex encoding)", ); } @@ -1001,7 +1370,7 @@ mod tests { fn test_bad_expectation(v: Value, e: TypeSignature) { assert!( - match Value::try_deserialize_hex(&v.serialize(), &e).unwrap_err() { + match Value::try_deserialize_hex(&v.serialize_to_hex(), &e, false).unwrap_err() { SerializationError::DeserializeExpected(_) => true, _ => false, } @@ -1031,18 +1400,21 @@ mod tests { // Should be legal! Value::try_deserialize_hex( - &Value::list_from(vec![]).unwrap().serialize(), + &Value::list_from(vec![]).unwrap().serialize_to_hex(), &TypeSignature::from_string("(list 2 (list 3 int))", version, epoch), + false, ) .unwrap(); Value::try_deserialize_hex( - &list_list_int.serialize(), + &list_list_int.serialize_to_hex(), &TypeSignature::from_string("(list 2 (list 3 int))", version, epoch), + false, ) .unwrap(); Value::try_deserialize_hex( - &list_list_int.serialize(), + &list_list_int.serialize_to_hex(), &TypeSignature::from_string("(list 1 (list 4 int))", version, epoch), + false, ) .unwrap(); @@ -1078,7 +1450,7 @@ mod tests { .unwrap(); assert_eq!( - Value::deserialize_read(&mut too_big.as_slice(), None).unwrap_err(), + Value::deserialize_read(&mut too_big.as_slice(), None, false).unwrap_err(), "Illegal list type".into() ); @@ -1101,7 +1473,7 @@ mod tests { "Unexpected end of byte stream".into()); */ - match Value::deserialize_read(&mut eof.as_slice(), None) { + match Value::deserialize_read(&mut eof.as_slice(), None, false) { Ok(_) => assert!(false, "Accidentally parsed truncated slice"), Err(eres) => match eres { SerializationError::IOError(ioe) => match ioe.err.kind() { @@ -1263,39 +1635,291 @@ mod tests { // t_0 and t_1 are actually the same assert_eq!( - Value::try_deserialize_hex(&t_1.serialize(), &TypeSignature::type_of(&t_0)).unwrap(), - Value::try_deserialize_hex(&t_0.serialize(), &TypeSignature::type_of(&t_0)).unwrap() + Value::try_deserialize_hex( + &t_1.serialize_to_hex(), + &TypeSignature::type_of(&t_0), + false + ) + .unwrap(), + Value::try_deserialize_hex( + &t_0.serialize_to_hex(), + &TypeSignature::type_of(&t_0), + false + ) + .unwrap() ); // field number not equal to expectations - assert!( - match Value::try_deserialize_hex(&t_3.serialize(), &TypeSignature::type_of(&t_1)) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - } - ); + assert!(match Value::try_deserialize_hex( + &t_3.serialize_to_hex(), + &TypeSignature::type_of(&t_1), + false + ) + .unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + }); // field type mismatch - assert!( - match Value::try_deserialize_hex(&t_2.serialize(), &TypeSignature::type_of(&t_1)) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - } - ); + assert!(match Value::try_deserialize_hex( + &t_2.serialize_to_hex(), + &TypeSignature::type_of(&t_1), + false + ) + .unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + }); // field not-present in expected - assert!( - match Value::try_deserialize_hex(&t_1.serialize(), &TypeSignature::type_of(&t_4)) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - } + assert!(match Value::try_deserialize_hex( + &t_1.serialize_to_hex(), + &TypeSignature::type_of(&t_4), + false + ) + .unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + }); + } + + #[apply(test_clarity_versions_serialization)] + fn test_sanitization(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { + let v_1 = Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ("c".into(), Value::Int(3)), + ]) + .unwrap() + .into(), + ]) + .unwrap(); + let v_1_good = Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::Int(4))]) + .unwrap() + .into(), + ]) + .unwrap(); + + let t_1_good = TypeSignature::from_string("(list 5 (tuple (b int)))", version, epoch); + let t_1_bad_0 = + TypeSignature::from_string("(list 5 (tuple (b int) (a int)))", version, epoch); + let t_1_bad_1 = TypeSignature::from_string("(list 5 (tuple (b uint)))", version, epoch); + + let v_2 = TupleData::from_data(vec![ + ( + "list-1".into(), + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ("c".into(), Value::Int(3)), + ]) + .unwrap() + .into(), + ]) + .unwrap(), + ), + ( + "list-2".into(), + Value::list_from(vec![ + TupleData::from_data(vec![("c".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ("c".into(), Value::Int(3)), + ]) + .unwrap() + .into(), + ]) + .unwrap(), + ), + ]) + .unwrap() + .into(); + + let v_2_good = TupleData::from_data(vec![ + ( + "list-1".into(), + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::Int(4))]) + .unwrap() + .into(), + ]) + .unwrap(), + ), + ( + "list-2".into(), + Value::list_from(vec![ + TupleData::from_data(vec![("c".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![("c".into(), Value::Int(3))]) + .unwrap() + .into(), + ]) + .unwrap(), + ), + ]) + .unwrap() + .into(); + + let t_2_good = TypeSignature::from_string( + "(tuple (list-2 (list 2 (tuple (c int)))) (list-1 (list 5 (tuple (b int)))))", + version, + epoch, ); + let t_2_bad_0 = TypeSignature::from_string( + "(tuple (list-2 (list 2 (tuple (c int)))) (list-1 (list 5 (tuple (a int)))))", + version, + epoch, + ); + let t_2_bad_1 = TypeSignature::from_string( + "(tuple (list-2 (list 1 (tuple (c int)))) (list-1 (list 5 (tuple (b int)))))", + version, + epoch, + ); + + let v_3 = Value::some( + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ("c".into(), Value::Int(3)), + ]) + .unwrap() + .into(), + ) + .unwrap(); + + let v_3_good = Value::some( + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ]) + .unwrap() + .into(), + ) + .unwrap(); + + let t_3_good = + TypeSignature::from_string("(optional (tuple (a int) (b int)))", version, epoch); + let t_3_bad_0 = + TypeSignature::from_string("(optional (tuple (a uint) (b int)))", version, epoch); + let t_3_bad_1 = + TypeSignature::from_string("(optional (tuple (d int) (b int)))", version, epoch); + + let v_4 = Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::some(Value::Int(1)).unwrap()), + ("b".into(), Value::none()), + ("c".into(), Value::some(Value::Int(3)).unwrap()), + ]) + .unwrap() + .into(), + ]) + .unwrap(); + let v_4_good = Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::none())]) + .unwrap() + .into(), + ]) + .unwrap(); + + let t_4_good = + TypeSignature::from_string("(list 5 (tuple (b (optional int))))", version, epoch); + let t_4_bad_0 = TypeSignature::from_string( + "(list 5 (tuple (b (optional int)) (a (optional int))))", + version, + epoch, + ); + let t_4_bad_1 = + TypeSignature::from_string("(list 5 (tuple (b (optional uint))))", version, epoch); + + let test_cases = [ + (v_1, v_1_good, t_1_good, vec![t_1_bad_0, t_1_bad_1]), + (v_2, v_2_good, t_2_good, vec![t_2_bad_0, t_2_bad_1]), + (v_3, v_3_good, t_3_good, vec![t_3_bad_0, t_3_bad_1]), + (v_4, v_4_good, t_4_good, vec![t_4_bad_0, t_4_bad_1]), + ]; + + for (input_val, expected_out, good_type, bad_types) in test_cases.iter() { + eprintln!( + "Testing {}. Expected sanitization = {}", + input_val, expected_out + ); + let serialized = input_val.serialize_to_hex(); + + let result = + RollbackWrapper::deserialize_value(&serialized, good_type, &epoch).map(|x| x.value); + if epoch < StacksEpochId::Epoch24 { + let error = result.unwrap_err(); + match error { + SerializationError::DeserializeExpected(_) => {} + _ => panic!("Expected a DeserializeExpected error"), + } + } else { + let value = result.unwrap(); + assert_eq!(&value, expected_out); + } + + for bad_type in bad_types.iter() { + eprintln!("Testing bad type: {}", bad_type); + let result = RollbackWrapper::deserialize_value(&serialized, bad_type, &epoch); + let error = result.unwrap_err(); + match error { + SerializationError::DeserializeExpected(_) => {} + e => panic!("Expected a DeserializeExpected error, got = {}", e), + } + } + + // now test the value::sanitize routine + let result = Value::sanitize_value(&epoch, good_type, input_val.clone()); + if epoch < StacksEpochId::Epoch24 { + let (value, did_sanitize) = result.unwrap(); + assert_eq!(&value, input_val); + assert!(!did_sanitize, "Should not sanitize before epoch-2.4"); + } else { + let (value, did_sanitize) = result.unwrap(); + assert_eq!(&value, expected_out); + assert!(did_sanitize, "Should have sanitized"); + } + + for bad_type in bad_types.iter() { + eprintln!("Testing bad type: {}", bad_type); + let result = Value::sanitize_value(&epoch, bad_type, input_val.clone()); + if epoch < StacksEpochId::Epoch24 { + let (value, did_sanitize) = result.unwrap(); + assert_eq!(&value, input_val); + assert!(!did_sanitize, "Should not sanitize before epoch-2.4"); + } else { + assert!(result.is_none()); + } + } + } } #[test] @@ -1335,7 +1959,7 @@ mod tests { for (test, expected) in tests.iter() { if let Ok(x) = expected { - assert_eq!(test, &x.serialize()); + assert_eq!(test, &x.serialize_to_hex()); } assert_eq!(expected, &Value::try_deserialize_hex_untyped(test)); assert_eq!( diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 79b56223a0..47cc94eb2c 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -849,6 +849,7 @@ impl TryFrom> for TupleTypeSignature { } impl TupleTypeSignature { + /// Return the number of fields in this tuple type pub fn len(&self) -> u64 { self.type_map.len() as u64 } diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index e76cf5fe5c..61f8467309 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -121,10 +121,12 @@ pub fn get_stacking_state_pox( let lookup_tuple = Value::Tuple( TupleData::from_data(vec![("stacker".into(), account.clone().into())]).unwrap(), ); + let epoch = db.get_clarity_epoch_version(); db.fetch_entry_unknown_descriptor( &boot_code_id(pox_contract, false), "stacking-state", &lookup_tuple, + &epoch, ) .unwrap() .expect_optional() @@ -378,10 +380,12 @@ pub fn check_stacking_state_invariants( .unwrap(), ); let entry_value = with_clarity_db_ro(peer, tip, |db| { + let epoch = db.get_clarity_epoch_version(); db.fetch_entry_unknown_descriptor( &boot_code_id(active_pox_contract, false), "reward-cycle-pox-address-list", - &entry_key + &entry_key, + &epoch, ) .unwrap() .expect_optional() @@ -560,10 +564,12 @@ pub fn get_reward_cycle_total(peer: &mut TestPeer, tip: &StacksBlockId, cycle_nu )]) .unwrap() .into(); + let epoch = db.get_clarity_epoch_version(); db.fetch_entry_unknown_descriptor( &boot_code_id(active_pox_contract, false), "reward-cycle-total-stacked", &total_stacked_key, + &epoch, ) .map(|v| { v.expect_optional() @@ -598,10 +604,12 @@ pub fn get_partial_stacked( ]) .unwrap() .into(); + let epoch = db.get_clarity_epoch_version(); db.fetch_entry_unknown_descriptor( &boot_code_id(pox_contract, false), "partial-stacked-by-cycle", &key, + &epoch, ) .map(|v| { v.expect_optional() diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 2bc21b30e8..cc52346682 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -5355,12 +5355,14 @@ impl StacksChainState { clarity_tx .connection() .as_transaction(|tx_connection| { + let epoch = tx_connection.get_epoch(); let result = tx_connection.with_clarity_db(|db| { let block_height = Value::UInt(db.get_current_block_height().into()); let res = db.fetch_entry_unknown_descriptor( &lockup_contract_id, "lockups", &block_height, + &epoch, )?; Ok(res) })?; diff --git a/src/chainstate/stacks/db/contracts.rs b/src/chainstate/stacks/db/contracts.rs index 14f9f08025..6b46fa4479 100644 --- a/src/chainstate/stacks/db/contracts.rs +++ b/src/chainstate/stacks/db/contracts.rs @@ -72,9 +72,10 @@ impl StacksChainState { contract_id: &QualifiedContractIdentifier, data_var: &str, ) -> Result, Error> { + let epoch = clarity_tx.get_epoch(); clarity_tx .with_clarity_db_readonly(|ref mut db| { - match db.lookup_variable_unknown_descriptor(contract_id, data_var) { + match db.lookup_variable_unknown_descriptor(contract_id, data_var, &epoch) { Ok(c) => Ok(Some(c)), Err(clarity_vm_error::Unchecked(CheckErrors::NoSuchDataVariable(_))) => { Ok(None) diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index f6beeac6ad..abcb84bb91 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -1299,16 +1299,18 @@ impl StacksChainState { } let lockup_contract_id = boot_code_id("lockup", mainnet); + let epoch = clarity.get_epoch(); clarity .with_clarity_db(|db| { for (block_height, schedule) in lockups_per_block.into_iter() { let key = Value::UInt(block_height.into()); - let value = Value::list_from(schedule).unwrap(); + let value = Value::cons_list(schedule, &epoch).unwrap(); db.insert_entry_unknown_descriptor( &lockup_contract_id, "lockups", key, value, + &epoch, )?; } Ok(()) @@ -1320,6 +1322,7 @@ impl StacksChainState { let bns_contract_id = boot_code_id("bns", mainnet); if let Some(get_namespaces) = boot_data.get_bulk_initial_namespaces.take() { info!("Initializing chain with namespaces"); + let epoch = clarity.get_epoch(); clarity .with_clarity_db(|db| { let initial_namespaces = get_namespaces(); @@ -1358,7 +1361,10 @@ impl StacksChainState { assert_eq!(buckets.len(), 16); TupleData::from_data(vec![ - ("buckets".into(), Value::list_from(buckets).unwrap()), + ( + "buckets".into(), + Value::cons_list(buckets, &epoch).unwrap(), + ), ("base".into(), base), ("coeff".into(), coeff), ("nonalpha-discount".into(), nonalpha_discount), @@ -1384,6 +1390,7 @@ impl StacksChainState { "namespaces", namespace, namespace_props, + &epoch, )?; } Ok(()) @@ -1394,6 +1401,7 @@ impl StacksChainState { // BNS Names if let Some(get_names) = boot_data.get_bulk_initial_names.take() { info!("Initializing chain with names"); + let epoch = clarity.get_epoch(); clarity .with_clarity_db(|db| { let initial_names = get_names(); @@ -1449,6 +1457,7 @@ impl StacksChainState { &fqn, &owner_address, &expected_asset_type, + &epoch, )?; let registered_at = Value::UInt(0); @@ -1470,6 +1479,7 @@ impl StacksChainState { "name-properties", fqn.clone(), name_props, + &epoch, )?; db.insert_entry_unknown_descriptor( @@ -1477,6 +1487,7 @@ impl StacksChainState { "owner-name", Value::Principal(owner_address), fqn, + &epoch, )?; } Ok(()) diff --git a/src/clarity_vm/tests/costs.rs b/src/clarity_vm/tests/costs.rs index 976f476ce4..8e12c92cd1 100644 --- a/src/clarity_vm/tests/costs.rs +++ b/src/clarity_vm/tests/costs.rs @@ -1190,11 +1190,13 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity confirmed-height: u1 }}", intercepted, "\"intercepted-function\"", cost_definer, "\"cost-definition\"" ); + let epoch = db.get_clarity_epoch_version(); db.set_entry_unknown_descriptor( voting_contract_to_use, "confirmed-proposals", execute_on_network("{ confirmed-id: u0 }", use_mainnet), execute_on_network(&value, use_mainnet), + &epoch, ) .unwrap(); db.commit(); @@ -1504,11 +1506,13 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi confirmed-height: u1 }}", intercepted_ct, intercepted_f, cost_ct, cost_f ); + let epoch = db.get_clarity_epoch_version(); db.set_entry_unknown_descriptor( &COST_VOTING_TESTNET_CONTRACT, "confirmed-proposals", execute(&format!("{{ confirmed-id: u{} }}", ix)), execute(&value), + &epoch, ) .unwrap(); } @@ -1602,11 +1606,13 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi confirmed-height: u1 }}", intercepted_ct, intercepted_f, cost_ct, cost_f ); + let epoch = db.get_clarity_epoch_version(); db.set_entry_unknown_descriptor( &COST_VOTING_TESTNET_CONTRACT, "confirmed-proposals", execute(&format!("{{ confirmed-id: u{} }}", ix + bad_proposals)), execute(&value), + &epoch, ) .unwrap(); } diff --git a/src/main.rs b/src/main.rs index fcf8abb7e2..10ea712cbe 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1005,6 +1005,59 @@ simulating a miner. return; } + if argv[1] == "deserialize-db" { + if argv.len() < 4 { + eprintln!("Usage: {} clarity_sqlite_db [byte-prefix]", &argv[0]); + process::exit(1); + } + let db_path = &argv[2]; + let byte_prefix = &argv[3]; + let conn = Connection::open_with_flags(db_path, OpenFlags::SQLITE_OPEN_READ_ONLY).unwrap(); + let query = format!( + "SELECT value FROM data_table WHERE key LIKE \"{}%\"", + byte_prefix + ); + let mut stmt = conn.prepare(&query).unwrap(); + let mut rows = stmt.query(rusqlite::NO_PARAMS).unwrap(); + while let Ok(Some(row)) = rows.next() { + let val_string: String = row.get(0).unwrap(); + let clarity_value = match clarity::vm::Value::try_deserialize_hex_untyped(&val_string) { + Ok(x) => x, + Err(_e) => continue, + }; + println!("{} => {}", val_string, clarity_value); + } + + process::exit(0); + } + + if argv[1] == "check-deser-data" { + if argv.len() < 3 { + eprintln!("Usage: {} check-file.txt", &argv[0]); + process::exit(1); + } + let txt_path = &argv[2]; + let check_file = File::open(txt_path).unwrap(); + let mut i = 1; + for line in io::BufReader::new(check_file).lines() { + if i % 100000 == 0 { + println!("{}...", i); + } + i += 1; + let line = line.unwrap().trim().to_string(); + if line.len() == 0 { + continue; + } + let vals: Vec<_> = line.split(" => ").map(|x| x.trim()).collect(); + let hex_string = &vals[0]; + let expected_value_display = &vals[1]; + let value = clarity::vm::Value::try_deserialize_hex_untyped(&hex_string).unwrap(); + assert_eq!(&value.to_string(), expected_value_display); + } + + process::exit(0); + } + if argv[1] == "replay-chainstate" { if argv.len() < 7 { eprintln!("Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", &argv[0]); diff --git a/src/net/rpc.rs b/src/net/rpc.rs index 628921e51a..24a2136625 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -1363,15 +1363,15 @@ impl ConversationHttp { var_name, ); - let (value, marf_proof) = if with_proof { + let (value_hex, marf_proof): (String, _) = if with_proof { clarity_db - .get_with_proof::(&key) + .get_with_proof(&key) .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? } else { - clarity_db.get::(&key).map(|a| (a, None))? + clarity_db.get(&key).map(|a| (a, None))? }; - let data = format!("0x{}", value.serialize()); + let data = format!("0x{}", value_hex); Some(DataVarResponse { data, marf_proof }) }) }) { @@ -1416,25 +1416,22 @@ impl ConversationHttp { map_name, key, ); - let (value, marf_proof) = if with_proof { + let (value_hex, marf_proof): (String, _) = if with_proof { clarity_db - .get_with_proof::(&key) + .get_with_proof(&key) .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) .unwrap_or_else(|| { test_debug!("No value for '{}' in {}", &key, tip); - (Value::none(), Some("".into())) + (Value::none().serialize_to_hex(), Some("".into())) }) } else { - clarity_db - .get::(&key) - .map(|a| (a, None)) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (Value::none(), None) - }) + clarity_db.get(&key).map(|a| (a, None)).unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (Value::none().serialize_to_hex(), None) + }) }; - let data = format!("0x{}", value.serialize()); + let data = format!("0x{}", value_hex); MapEntryResponse { data, marf_proof } }) }) { @@ -1528,7 +1525,7 @@ impl ConversationHttp { response_metadata, CallReadOnlyResponse { okay: true, - result: Some(format!("0x{}", data.serialize())), + result: Some(format!("0x{}", data.serialize_to_hex())), cause: None, }, ), diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 448fdc3937..3c934b1956 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -81,6 +81,12 @@ impl StacksEpochId { pub fn latest() -> StacksEpochId { StacksEpochId::Epoch24 } + + /// Returns whether or not this Epoch should perform + /// Clarity value sanitization + pub fn value_sanitizing(&self) -> bool { + self >= &StacksEpochId::Epoch24 + } } impl std::fmt::Display for StacksEpochId { diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 4243835a66..50b4fc2920 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -444,9 +444,12 @@ fn transition_adds_burn_block_height() { .unwrap(), ) .unwrap(); - let clarity_value = - Value::deserialize_read(&mut &clarity_serialized_value[..], None) - .unwrap(); + let clarity_value = Value::deserialize_read( + &mut &clarity_serialized_value[..], + None, + false, + ) + .unwrap(); let pair = clarity_value.expect_tuple(); let height = pair.get("height").unwrap().clone().expect_u128() as u64; let bhh_opt = @@ -1273,9 +1276,12 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap(), ) .unwrap(); - let clarity_value = - Value::deserialize_read(&mut &clarity_serialized_value[..], None) - .unwrap(); + let clarity_value = Value::deserialize_read( + &mut &clarity_serialized_value[..], + None, + false, + ) + .unwrap(); let pair = clarity_value.expect_tuple(); let burn_block_height = pair.get("burn-height").unwrap().clone().expect_u128() as u64; diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 43834e40e2..5fe4efd252 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -26,7 +26,6 @@ use stacks::vm::{ contract_interface_builder::{build_contract_interface, ContractInterface}, mem_type_check, }, - database::ClaritySerializable, types::{QualifiedContractIdentifier, ResponseData, TupleData}, Value, }; @@ -492,7 +491,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize()) + .json(&key.serialize_to_hex()) .send() .unwrap().json::>().unwrap(); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); @@ -507,7 +506,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize()) + .json(&key.serialize_to_hex()) .send() .unwrap().json::>().unwrap(); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); @@ -524,7 +523,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize()) + .json(&key.serialize_to_hex()) .send() .unwrap().json::>().unwrap(); @@ -545,7 +544,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize()) + .json(&key.serialize_to_hex()) .send() .unwrap().json::>().unwrap(); @@ -673,7 +672,7 @@ fn integration_test_get_info() { let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), sponsor: None, - arguments: vec![Value::UInt(3).serialize()] + arguments: vec![Value::UInt(3).serialize_to_hex()] }; let res = client.post(&path) @@ -741,7 +740,7 @@ fn integration_test_get_info() { let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), sponsor: None, - arguments: vec![Value::UInt(3).serialize()] + arguments: vec![Value::UInt(3).serialize_to_hex()] }; let res = client.post(&path) @@ -764,7 +763,7 @@ fn integration_test_get_info() { let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), sponsor: None, - arguments: vec![Value::UInt(100).serialize()] + arguments: vec![Value::UInt(100).serialize_to_hex()] }; let res = client.post(&path) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 202c55f8a2..a197523af6 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -42,7 +42,6 @@ use stacks::util::hash::{bytes_to_hex, hex_bytes, to_hex}; use stacks::util::secp256k1::Secp256k1PublicKey; use stacks::util::{get_epoch_time_ms, get_epoch_time_secs, sleep_ms}; use stacks::util_lib::boot::boot_code_id; -use stacks::vm::database::ClarityDeserializable; use stacks::vm::types::PrincipalData; use stacks::vm::ClarityVersion; use stacks::vm::Value; @@ -1369,7 +1368,7 @@ fn liquid_ustx_integration() { eprintln!("{}", contract_call.function_name.as_str()); if contract_call.function_name.as_str() == "execute" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); let liquid_ustx = parsed.expect_result_ok().expect_u128(); assert!(liquid_ustx > 0, "Should be more liquid ustx than 0"); tested = true; @@ -4651,7 +4650,7 @@ fn cost_voting_integration() { serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); } else if contract_call.function_name.as_str() == "propose-vote-confirm" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); assert_eq!(parsed.to_string(), "(ok u0)"); tested = true; } @@ -4697,7 +4696,7 @@ fn cost_voting_integration() { eprintln!("{}", contract_call.function_name.as_str()); if contract_call.function_name.as_str() == "confirm-miners" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); assert_eq!(parsed.to_string(), "(err 13)"); tested = true; } @@ -4746,7 +4745,7 @@ fn cost_voting_integration() { eprintln!("{}", contract_call.function_name.as_str()); if contract_call.function_name.as_str() == "confirm-miners" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); assert_eq!(parsed.to_string(), "(ok true)"); tested = true; } @@ -6001,8 +6000,7 @@ fn pox_integration_test() { eprintln!("{}", contract_call.function_name.as_str()); if contract_call.function_name.as_str() == "stack-stx" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = - >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); // should unlock at height 300 (we're in reward cycle 13, lockup starts in reward cycle // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward // cycle length of 15 blocks, is a burnchain height of 300) From eb8b2cb752eca640340866a683e09b1ff864fdcc Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 15 May 2023 10:13:29 -0500 Subject: [PATCH 138/158] fix pox_3 auto_unlock tests --- src/chainstate/stacks/boot/pox_3_tests.rs | 84 ++++++++++++++--------- stacks-common/src/util/macros.rs | 3 - 2 files changed, 51 insertions(+), 36 deletions(-) diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs index b78e563fcd..2418b5bf64 100644 --- a/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -192,7 +192,7 @@ fn simple_pox_lockup_transition_pox_2() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - "pox_3_tests::simple_pox_lockup_transition_pox_2", + function_name!(), 7104, Some(epochs.clone()), Some(&observer), @@ -540,30 +540,29 @@ fn simple_pox_lockup_transition_pox_2() { } #[test] -fn test_simple_pox_2_auto_unlock_ab() { - test_simple_pox_2_auto_unlock(true) +fn pox_auto_unlock_ab() { + pox_auto_unlock(true) } #[test] -fn test_simple_pox_2_auto_unlock_ba() { - test_simple_pox_2_auto_unlock(false) +fn pox_auto_unlock_ba() { + pox_auto_unlock(false) } /// In this test case, two Stackers, Alice and Bob stack and interact with the -/// PoX v1 contract and PoX v2 contract across the epoch transition. +/// PoX v1 contract and PoX v2 contract across the epoch transition, and then again +/// in PoX v3. /// /// Alice: stacks via PoX v1 for 4 cycles. The third of these cycles occurs after /// the PoX v1 -> v2 transition, and so Alice gets "early unlocked". /// After the early unlock, Alice re-stacks in PoX v2 -/// Alice tries to stack again via PoX v1, which is allowed by the contract, -/// but forbidden by the VM (because PoX has transitioned to v2) /// Bob: stacks via PoX v2 for 6 cycles. He attempted to stack via PoX v1 as well, /// but is forbidden because he has already placed an account lock via PoX v2. /// /// Note: this test is symmetric over the order of alice and bob's stacking calls. /// when alice goes first, the auto-unlock code doesn't need to perform a "move" /// when bob goes first, the auto-unlock code does need to perform a "move" -fn test_simple_pox_2_auto_unlock(alice_first: bool) { +fn pox_auto_unlock(alice_first: bool) { let EXPECTED_FIRST_V2_CYCLE = 8; // the sim environment produces 25 empty sortitions before // tenures start being tracked. @@ -595,8 +594,8 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - &format!("pox_3_tests::simple_pox_auto_unlock_{}", alice_first), - 7102, + &format!("{}-{}", function_name!(), alice_first), + 7102 + if alice_first { 0 } else { 20 }, Some(epochs.clone()), Some(&observer), ); @@ -697,20 +696,34 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { ); } - // now check that bob has no locked tokens at (height_target + 1) + // now check that bob has an unlock height of `height_target` let bob_bal = get_stx_account_at( &mut peer, &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), ); - assert_eq!(bob_bal.amount_locked(), 0); + assert_eq!(bob_bal.unlock_height(), height_target); // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block + assert_eq!(bob_bal.amount_locked(), 10000000000); + + // check that the total reward cycle amounts have decremented correctly + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + } + + // check that bob is fully unlocked at next block + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + let bob_bal = get_stx_account_at( &mut peer, &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), ); + assert_eq!(bob_bal.unlock_height(), 0); assert_eq!(bob_bal.amount_locked(), 0); // check that the total reward cycle amounts have decremented correctly @@ -840,21 +853,15 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { ); } - // now check that bob has no locked tokens at (height_target + 1) + // now check that bob has an unlock height of `height_target` let bob_bal = get_stx_account_at( &mut peer, &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), ); - assert_eq!(bob_bal.amount_locked(), 0); - + assert_eq!(bob_bal.unlock_height(), height_target); // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block - let bob_bal = get_stx_account_at( - &mut peer, - &latest_block, - &key_to_stacks_addr(&bob).to_account_principal(), - ); - assert_eq!(bob_bal.amount_locked(), 0); + assert_eq!(bob_bal.amount_locked(), 10000000000); // check that the total reward cycle amounts have decremented correctly for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { @@ -887,6 +894,17 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { let reward_indexes_str = format!("{}", alice_state.get("reward-set-indexes").unwrap()); assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); + // check that bob is fully unlocked at next block + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ); + assert_eq!(bob_bal.unlock_height(), 0); + assert_eq!(bob_bal.amount_locked(), 0); + // now let's check some tx receipts let alice_address = key_to_stacks_addr(&alice); @@ -929,7 +947,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { "Bob tx0 should have committed okay" ); - assert_eq!(coinbase_txs.len(), 37); + assert_eq!(coinbase_txs.len(), 38); info!( "Expected first auto-unlock coinbase index: {}", @@ -1002,7 +1020,7 @@ fn delegate_stack_increase() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - &format!("pox_3_delegate_stack_increase"), + function_name!(), 7103, Some(epochs.clone()), Some(&observer), @@ -1622,7 +1640,7 @@ fn stack_increase() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - &format!("pox_3_stack_increase"), + function_name!(), 7105, Some(epochs.clone()), Some(&observer), @@ -2050,7 +2068,7 @@ fn pox_extend_transition() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - &format!("pox_3_pox_extend_transition"), + function_name!(), 7110, Some(epochs.clone()), Some(&observer), @@ -2563,7 +2581,7 @@ fn delegate_extend_pox_3() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - "pox_3_delegate_extend", + function_name!(), 7114, Some(epochs.clone()), Some(&observer), @@ -3049,7 +3067,7 @@ fn pox_3_getters() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - "pox_3_getters", + function_name!(), 7115, Some(epochs.clone()), Some(&observer), @@ -3384,8 +3402,8 @@ fn get_pox_addrs() { let (mut peer, keys) = instantiate_pox_peer_with_epoch( &burnchain, - "pox_3_tests::get_pox_addrs", - 7102, + function_name!(), + 7142, Some(epochs.clone()), None, ); @@ -3596,7 +3614,7 @@ fn stack_with_segwit() { let (mut peer, keys) = instantiate_pox_peer_with_epoch( &burnchain, - "pox_3_tests::stack_with_segwit", + function_name!(), 7120, Some(epochs.clone()), None, @@ -3811,7 +3829,7 @@ fn stack_aggregation_increase() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - "pox_3::stack_aggregation_increase", + function_name!(), 7117, Some(epochs.clone()), Some(&observer), @@ -4249,7 +4267,7 @@ fn pox_3_delegate_stx_addr_validation() { let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( &burnchain, - "pox_3::delegate_stx_addr", + function_name!(), 7100, Some(epochs.clone()), None, diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 7b83d48739..589fc398c6 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -652,8 +652,5 @@ macro_rules! impl_byte_array_rusqlite_only { macro_rules! function_name { () => { stdext::function_name!() - .rsplit_once("::") - .expect("Failed to split current function name") - .1 }; } From 0a5af8b1c8fefa13d1e1849b08d0224cdd56dad7 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 15 May 2023 11:16:34 -0500 Subject: [PATCH 139/158] more sanitization unit cases --- clarity/src/vm/types/serialization.rs | 146 ++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 5a087cd9ff..bd05f85b2f 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -1859,11 +1859,157 @@ mod tests { let t_4_bad_1 = TypeSignature::from_string("(list 5 (tuple (b (optional uint))))", version, epoch); + let v_5 = Value::okay( + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::some(Value::Int(1)).unwrap()), + ("b".into(), Value::none()), + ("c".into(), Value::some(Value::Int(3)).unwrap()), + ]) + .unwrap() + .into(), + ]) + .unwrap(), + ) + .unwrap(); + let v_5_good = Value::okay( + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::none())]) + .unwrap() + .into(), + ]) + .unwrap(), + ) + .unwrap(); + + let t_5_good_0 = TypeSignature::from_string( + "(response (list 5 (tuple (b (optional int)))) int)", + version, + epoch, + ); + let t_5_good_1 = TypeSignature::from_string( + "(response (list 2 (tuple (b (optional int)))) int)", + version, + epoch, + ); + let t_5_good_2 = TypeSignature::from_string( + "(response (list 2 (tuple (b (optional int)))) bool)", + version, + epoch, + ); + let t_5_bad_0 = TypeSignature::from_string( + "(response (list 5 (tuple (b (optional int)) (a (optional int)))) uint)", + version, + epoch, + ); + let t_5_bad_1 = TypeSignature::from_string( + "(response (list 5 (tuple (b (optional uint)))) int)", + version, + epoch, + ); + let t_5_bad_2 = TypeSignature::from_string( + "(response int (list 5 (tuple (b (optional int)))))", + version, + epoch, + ); + let t_5_bad_3 = TypeSignature::from_string( + "(list 5 (tuple (b (optional int)) (a (optional int))))", + version, + epoch, + ); + + let v_6 = Value::error( + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::some(Value::Int(1)).unwrap()), + ("b".into(), Value::none()), + ("c".into(), Value::some(Value::Int(3)).unwrap()), + ]) + .unwrap() + .into(), + ]) + .unwrap(), + ) + .unwrap(); + let v_6_good = Value::error( + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::none())]) + .unwrap() + .into(), + ]) + .unwrap(), + ) + .unwrap(); + + let t_6_good_0 = TypeSignature::from_string( + "(response int (list 5 (tuple (b (optional int)))))", + version, + epoch, + ); + let t_6_good_1 = TypeSignature::from_string( + "(response int (list 2 (tuple (b (optional int)))))", + version, + epoch, + ); + let t_6_good_2 = TypeSignature::from_string( + "(response bool (list 2 (tuple (b (optional int)))))", + version, + epoch, + ); + let t_6_bad_0 = TypeSignature::from_string( + "(response uint (list 5 (tuple (b (optional int)) (a (optional int)))))", + version, + epoch, + ); + let t_6_bad_1 = TypeSignature::from_string( + "(response int (list 5 (tuple (b (optional uint)))))", + version, + epoch, + ); + let t_6_bad_2 = TypeSignature::from_string( + "(response (list 5 (tuple (b (optional int)))) int)", + version, + epoch, + ); + let t_6_bad_3 = TypeSignature::from_string( + "(list 5 (tuple (b (optional int)) (a (optional int))))", + version, + epoch, + ); + let test_cases = [ (v_1, v_1_good, t_1_good, vec![t_1_bad_0, t_1_bad_1]), (v_2, v_2_good, t_2_good, vec![t_2_bad_0, t_2_bad_1]), (v_3, v_3_good, t_3_good, vec![t_3_bad_0, t_3_bad_1]), (v_4, v_4_good, t_4_good, vec![t_4_bad_0, t_4_bad_1]), + ( + v_5.clone(), + v_5_good.clone(), + t_5_good_0, + vec![t_5_bad_0, t_5_bad_1, t_5_bad_2, t_5_bad_3], + ), + (v_5.clone(), v_5_good.clone(), t_5_good_1, vec![]), + (v_5, v_5_good, t_5_good_2, vec![]), + ( + v_6.clone(), + v_6_good.clone(), + t_6_good_0, + vec![t_6_bad_0, t_6_bad_1, t_6_bad_2, t_6_bad_3], + ), + (v_6.clone(), v_6_good.clone(), t_6_good_1, vec![]), + (v_6, v_6_good, t_6_good_2, vec![]), ]; for (input_val, expected_out, good_type, bad_types) in test_cases.iter() { From 020133443ff4dd5624da549454aa921d578fb41f Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Mon, 15 May 2023 15:35:34 -0400 Subject: [PATCH 140/158] added more test coverage for epoch 2.4 --- clarity/src/vm/test_util/mod.rs | 15 ++++ clarity/src/vm/tests/traits.rs | 16 +++- src/clarity_vm/tests/ast.rs | 6 ++ src/clarity_vm/tests/costs.rs | 30 ++++--- src/clarity_vm/tests/large_contract.rs | 112 ++++++++++--------------- 5 files changed, 98 insertions(+), 81 deletions(-) diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 65e0025c7a..efaffcea5e 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -39,6 +39,21 @@ pub const TEST_BURN_STATE_DB_21: UnitTestBurnStateDB = UnitTestBurnStateDB { ast_rules: ASTRules::PrecheckSize, }; +pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnStateDB { + match epoch_id { + StacksEpochId::Epoch20 => UnitTestBurnStateDB { + epoch_id, + ast_rules: ASTRules::Typical, + }, + StacksEpochId::Epoch2_05 | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | + StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => UnitTestBurnStateDB { + epoch_id, + ast_rules: ASTRules::PrecheckSize + }, + _ => panic!("Epoch {} not covered", &epoch_id), + } +} + pub fn execute(s: &str) -> Value { vm_execute(s).unwrap().unwrap() } diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index 0ee695c837..d54dad182f 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -41,8 +41,18 @@ use crate::vm::ContractContext; #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_epoch_clarity_versions(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} +#[template] +#[rstest] +#[case(StacksEpochId::Epoch21)] +#[case(StacksEpochId::Epoch22)] +#[case(StacksEpochId::Epoch23)] +#[case(StacksEpochId::Epoch24)] +fn test_epoch_only_clarity_2(#[case] epoch: StacksEpochId) {} + #[apply(test_epoch_clarity_versions)] fn test_trait_basics(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let to_test = [ @@ -74,8 +84,8 @@ fn test_trait_basics(#[case] version: ClarityVersion, #[case] epoch: StacksEpoch } } -#[test] -fn test_clarity2() { +#[apply(test_epoch_only_clarity_2)] +fn test_clarity2(#[case] epoch: StacksEpochId) { let to_test = [ test_pass_principal_literal_to_trait, test_pass_trait_to_subtrait, @@ -90,7 +100,7 @@ fn test_clarity2() { test_let3_trait, ]; for test in to_test.iter() { - with_memory_environment(test, StacksEpochId::latest(), false); + with_memory_environment(test, epoch, false); } } diff --git a/src/clarity_vm/tests/ast.rs b/src/clarity_vm/tests/ast.rs index 139969080d..4367bf7045 100644 --- a/src/clarity_vm/tests/ast.rs +++ b/src/clarity_vm/tests/ast.rs @@ -19,6 +19,12 @@ use rstest_reuse::{self, *}; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn test_edge_counting_runtime_template( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/src/clarity_vm/tests/costs.rs b/src/clarity_vm/tests/costs.rs index 8e12c92cd1..5c2b22c388 100644 --- a/src/clarity_vm/tests/costs.rs +++ b/src/clarity_vm/tests/costs.rs @@ -1482,6 +1482,12 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi let bad_proposals = bad_cases.len(); + let voting_contract_to_use: &QualifiedContractIdentifier = if use_mainnet { + &COST_VOTING_MAINNET_CONTRACT + } else { + &COST_VOTING_TESTNET_CONTRACT + }; + { let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([2 as u8; 32])); @@ -1489,7 +1495,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi db.begin(); db.set_variable_unknown_descriptor( - &COST_VOTING_TESTNET_CONTRACT, + voting_contract_to_use, "confirmed-proposal-count", Value::UInt(bad_proposals as u128), ) @@ -1508,7 +1514,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi ); let epoch = db.get_clarity_epoch_version(); db.set_entry_unknown_descriptor( - &COST_VOTING_TESTNET_CONTRACT, + voting_contract_to_use, "confirmed-proposals", execute(&format!("{{ confirmed-id: u{} }}", ix)), execute(&value), @@ -1546,7 +1552,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi for (target, referenced_function) in tracker.cost_function_references().into_iter() { assert_eq!( &referenced_function.contract_id, - &boot_code_id("costs", false), + &boot_code_id("costs", use_mainnet), "All cost functions should still point to the boot costs" ); assert_eq!( @@ -1568,7 +1574,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi "cost-definition", ), ( - boot_code_id("costs", false), + boot_code_id("costs", use_mainnet), "cost_le", cost_definer.clone(), "cost-definition-le", @@ -1589,7 +1595,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi let good_proposals = good_cases.len() as u128; db.set_variable_unknown_descriptor( - &COST_VOTING_TESTNET_CONTRACT, + voting_contract_to_use, "confirmed-proposal-count", Value::UInt(bad_proposals as u128 + good_proposals), ) @@ -1608,7 +1614,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi ); let epoch = db.get_clarity_epoch_version(); db.set_entry_unknown_descriptor( - &COST_VOTING_TESTNET_CONTRACT, + voting_contract_to_use, "confirmed-proposals", execute(&format!("{{ confirmed-id: u{} }}", ix + bad_proposals)), execute(&value), @@ -1665,7 +1671,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi } else { assert_eq!( &referenced_function.contract_id, - &boot_code_id("costs", false), + &boot_code_id("costs", use_mainnet), "Cost function should still point to the boot costs" ); assert_eq!( @@ -1679,11 +1685,11 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi }; } -// TODO: Reinstate this test. We couldn't get it working in time for pr/2940. -//#[test] -//fn test_cost_voting_integration_mainnet() { -// test_cost_voting_integration(true) -//} +#[test] +fn test_cost_voting_integration_mainnet() { + test_cost_voting_integration(true, ClarityVersion::Clarity1); + test_cost_voting_integration(true, ClarityVersion::Clarity2); +} #[test] fn test_cost_voting_integration_testnet() { diff --git a/src/clarity_vm/tests/large_contract.rs b/src/clarity_vm/tests/large_contract.rs index af944f4f69..789e0c0f74 100644 --- a/src/clarity_vm/tests/large_contract.rs +++ b/src/clarity_vm/tests/large_contract.rs @@ -61,6 +61,12 @@ use crate::util_lib::boot::boot_code_id; #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] +#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] +#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] fn clarity_version_template(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} fn test_block_headers(n: u8) -> StacksBlockId { @@ -116,12 +122,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac .unwrap(), ); let contract_identifier = QualifiedContractIdentifier::local("tokens").unwrap(); - let burn_db = match epoch { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch), - }; + let burn_db = &generate_test_burn_state_db(epoch); let mut gb = clarity.begin_test_genesis_block( &StacksBlockId::sentinel(), @@ -137,45 +138,49 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac }) .unwrap(); - if epoch == StacksEpochId::Epoch2_05 { - let (ast, _analysis) = tx - .analyze_smart_contract( + match epoch { + StacksEpochId::Epoch2_05 => { + let (ast, _analysis) = tx + .analyze_smart_contract( + &boot_code_id("costs-2", false), + ClarityVersion::Clarity1, + BOOT_CODE_COSTS_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + tx.initialize_smart_contract( &boot_code_id("costs-2", false), ClarityVersion::Clarity1, + &ast, BOOT_CODE_COSTS_2, - ASTRules::PrecheckSize, + None, + |_, _| false, ) - .unwrap(); - tx.initialize_smart_contract( - &boot_code_id("costs-2", false), - ClarityVersion::Clarity1, - &ast, - BOOT_CODE_COSTS_2, - None, - |_, _| false, - ) - .unwrap(); - } - - if epoch == StacksEpochId::Epoch21 { - let (ast, _analysis) = tx - .analyze_smart_contract( + .unwrap(); + } + StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | + StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => { + let (ast, _analysis) = tx + .analyze_smart_contract( + &boot_code_id("costs-3", false), + ClarityVersion::Clarity2, + BOOT_CODE_COSTS_3, + ASTRules::PrecheckSize, + ) + .unwrap(); + tx.initialize_smart_contract( &boot_code_id("costs-3", false), ClarityVersion::Clarity2, + &ast, BOOT_CODE_COSTS_3, - ASTRules::PrecheckSize, + None, + |_, _| false, ) - .unwrap(); - tx.initialize_smart_contract( - &boot_code_id("costs-3", false), - ClarityVersion::Clarity2, - &ast, - BOOT_CODE_COSTS_3, - None, - |_, _| false, - ) - .unwrap(); + .unwrap(); + } + _ => panic!("Epoch {} not covered.", &epoch), } + }); gb.commit_block(); @@ -674,12 +679,7 @@ pub fn rollback_log_memory_test( let marf = MarfedKV::temporary(); let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); let EXPLODE_N = 100; - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); clarity_instance @@ -748,12 +748,7 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id let marf = MarfedKV::temporary(); let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); let EXPLODE_N = 100; - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); @@ -833,12 +828,7 @@ pub fn argument_memory_test( let EXPLODE_N = 100; let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); clarity_instance .begin_test_genesis_block( @@ -912,12 +902,7 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); let COUNT_PER_FUNC = 10; let FUNCS = 10; - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); @@ -1037,12 +1022,7 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); let COUNT_PER_CONTRACT = 20; let CONTRACTS = 5; - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); clarity_instance .begin_test_genesis_block( From 4c12f9b061eb9ef3cfd627309fee32f096d3133a Mon Sep 17 00:00:00 2001 From: friedger Date: Mon, 15 May 2023 22:11:26 +0200 Subject: [PATCH 141/158] fix: allow change of delegation data --- src/chainstate/stacks/boot/pox-3.clar | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index d1b2d80651..5b100dd249 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -653,9 +653,11 @@ (asserts! (check-caller-allowed) (err ERR_STACKING_PERMISSION_DENIED)) - ;; tx-sender principal must not be stacking - (asserts! (is-none (get-stacker-info tx-sender)) - (err ERR_STACKING_ALREADY_STACKED)) + ;; delegate-stx no longer requires the delegator to not currently + ;; be stacking. + ;; delegate-stack-* functions assert that + ;; 1. users can't swim in two pools at the same time. + ;; 2. users can't switch pools without cool down cycle. ;; pox-addr, if given, must be valid (match pox-addr From defcaa3101899bbdf08674c2e6da093365bc4816 Mon Sep 17 00:00:00 2001 From: friedger Date: Mon, 15 May 2023 23:14:19 +0200 Subject: [PATCH 142/158] chore: improve comments --- src/chainstate/stacks/boot/pox-3.clar | 2 ++ src/chainstate/stacks/boot/pox_3_tests.rs | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar index 5b100dd249..5878038a0b 100644 --- a/src/chainstate/stacks/boot/pox-3.clar +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -658,6 +658,8 @@ ;; delegate-stack-* functions assert that ;; 1. users can't swim in two pools at the same time. ;; 2. users can't switch pools without cool down cycle. + ;; Other pool admins can't increase or extend. + ;; 3. users can't join a pool while already directly stacking. ;; pox-addr, if given, must be valid (match pox-addr diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs index 2418b5bf64..adefc7ee01 100644 --- a/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -1829,7 +1829,7 @@ fn stack_increase() { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - // in the next tenure, PoX 2 should now exist. + // in the next tenure, PoX 3 should now exist. let tip = get_tip(peer.sortdb.as_ref()); // submit an increase: this should fail, because Alice is not yet locked @@ -2762,12 +2762,12 @@ fn delegate_extend_pox_3() { assert_eq!( alice_first_cycle as u64, first_v3_cycle, - "Alice's first cycle in PoX-2 stacking state is the next cycle, which is 8" + "Alice's first cycle in PoX-3 stacking state is the next cycle, which is 12" ); assert_eq!(alice_lock_period, 6); assert_eq!( bob_first_cycle as u64, first_v3_cycle, - "Bob's first cycle in PoX-2 stacking state is the next cycle, which is 8" + "Bob's first cycle in PoX-3 stacking state is the next cycle, which is 12" ); assert_eq!(bob_lock_period, 3); From 00e6c322191a99eb29e70329e9899e51c905ecd6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 17 May 2023 07:17:42 -0500 Subject: [PATCH 143/158] address PR feedback, enable sanitization in from-consensus-buff --- clarity/src/vm/functions/conversions.rs | 6 +++++- clarity/src/vm/types/serialization.rs | 2 ++ stacks-common/src/types/mod.rs | 10 +++++++++- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index 622baf6e87..594c8f6ec8 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -266,7 +266,11 @@ pub fn from_consensus_buff( // Perform the deserialization and check that it deserialized to the expected // type. A type mismatch at this point is an error that should be surfaced in // Clarity (as a none return). - let result = match Value::try_deserialize_bytes_exact(&input_bytes, &type_arg, false) { + let result = match Value::try_deserialize_bytes_exact( + &input_bytes, + &type_arg, + env.epoch().value_sanitizing(), + ) { Ok(value) => value, Err(_) => return Ok(Value::none()), }; diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index bd05f85b2f..7f790c8279 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -1098,6 +1098,8 @@ impl Value { Value::deserialize_read(&mut bytes.as_slice(), None, false) } + /// Try to deserialize a value from a hex string without type information. This *does not* + /// perform sanitization. pub fn try_deserialize_hex_untyped(hex: &str) -> Result { let hex = if hex.starts_with("0x") { &hex[2..] diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 3c934b1956..35bb97d860 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -85,7 +85,15 @@ impl StacksEpochId { /// Returns whether or not this Epoch should perform /// Clarity value sanitization pub fn value_sanitizing(&self) -> bool { - self >= &StacksEpochId::Epoch24 + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 => false, + StacksEpochId::Epoch24 => true, + } } } From 686668043756aae5f3e2cf8409dae6c708b8167b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 19 May 2023 09:00:30 -0500 Subject: [PATCH 144/158] address PR feedback --- clarity/src/vm/test_util/mod.rs | 9 ++++--- clarity/src/vm/types/serialization.rs | 37 +++++++++++++++++++++++--- src/clarity_vm/tests/costs.rs | 4 +-- src/clarity_vm/tests/large_contract.rs | 11 ++++---- 4 files changed, 48 insertions(+), 13 deletions(-) diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index efaffcea5e..3af797e636 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -45,10 +45,13 @@ pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnState epoch_id, ast_rules: ASTRules::Typical, }, - StacksEpochId::Epoch2_05 | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | - StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => UnitTestBurnStateDB { + StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => UnitTestBurnStateDB { epoch_id, - ast_rules: ASTRules::PrecheckSize + ast_rules: ASTRules::PrecheckSize, }, _ => panic!("Epoch {} not covered", &epoch_id), } diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 7f790c8279..0456d2033b 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -306,6 +306,10 @@ macro_rules! check_match { }; } +/// `DeserializeStackItem` objects are used by the deserializer to indicate +/// how the deserialization loop's current object is to be handled once it is +/// deserialized: i.e., is the object the top-level object for the serialization +/// or is it an entry in a composite type (e.g., a list or tuple)? enum DeserializeStackItem { List { items: Vec, @@ -336,6 +340,9 @@ enum DeserializeStackItem { impl DeserializeStackItem { /// What is the expected type for the child of this deserialization stack item? + /// + /// Returns `None` if this stack item either doesn't have an expected type, or the + /// next child is going to be sanitized/elided. fn next_expected_type(&self) -> Result, SerializationError> { match self { DeserializeStackItem::List { expected_type, .. } => Ok(expected_type @@ -772,6 +779,13 @@ impl Value { if len > 0 { let items = Vec::with_capacity(expected_len as usize); let first_key = ClarityName::deserialize_read(r)?; + // figure out if the next (key, value) pair for this + // tuple will be elided (or sanitized) from the tuple. + // the logic here is that the next pair should be elided if: + // * `sanitize` parameter is true + // * `tuple_type` is some (i.e., there is an expected type for the + // tuple) + // * `tuple_type` does not contain an entry for `key` let next_sanitize = sanitize && tuple_type .map(|tt| tt.field_type(&first_key).is_none()) @@ -864,7 +878,11 @@ impl Value { stack_item } else { // this should be unreachable! - return Ok(item); + warn!( + "Deserializer reached unexpected path: item processed, but deserializer stack does not expect another value"; + "item" => %item, + ); + return Err("Deserializer processed item, but deserializer stack does not expect another value".into()); }; match stack_bottom { DeserializeStackItem::TopLevel { .. } => return Ok(item), @@ -948,6 +966,13 @@ impl Value { } else { // tuple is not finished, read the next key name and reinsert on stack let key = ClarityName::deserialize_read(r)?; + // figure out if the next (key, value) pair for this + // tuple will be elided (or sanitized) from the tuple. + // the logic here is that the next pair should be elided if: + // * `sanitize` parameter is true + // * `tuple_type` is some (i.e., there is an expected type for the + // tuple) + // * `tuple_type` does not contain an entry for `key` let next_sanitize = sanitize && expected_type .as_ref() @@ -1201,7 +1226,7 @@ impl Value { _ => return None, }; let mut sanitized_tuple_entries = vec![]; - let tuple_data_len = tuple_data.len(); + let original_tuple_len = tuple_data.len(); let mut tuple_data_map = tuple_data.data_map; let mut did_sanitize_children = false; for (key, expect_key_type) in tt.get_type_map().iter() { @@ -1211,7 +1236,13 @@ impl Value { sanitized_tuple_entries.push((key.clone(), sanitized_field)); did_sanitize_children = did_sanitize_children || did_sanitize; } - let did_sanitize_tuple = did_sanitize_children || (tt.len() != tuple_data_len); + if sanitized_tuple_entries.len() as u64 != tt.len() { + // this code should be unreachable, because I think any case that + // could trigger this would have returned None earlier + warn!("Sanitizer handled path that should have errored earlier, skipping sanitization"); + return None; + } + let did_sanitize_tuple = did_sanitize_children || (tt.len() != original_tuple_len); ( Value::Tuple(TupleData::from_data(sanitized_tuple_entries).ok()?), did_sanitize_tuple, diff --git a/src/clarity_vm/tests/costs.rs b/src/clarity_vm/tests/costs.rs index 5c2b22c388..19b43ba1f3 100644 --- a/src/clarity_vm/tests/costs.rs +++ b/src/clarity_vm/tests/costs.rs @@ -1687,8 +1687,8 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi #[test] fn test_cost_voting_integration_mainnet() { - test_cost_voting_integration(true, ClarityVersion::Clarity1); - test_cost_voting_integration(true, ClarityVersion::Clarity2); + test_cost_voting_integration(true, ClarityVersion::Clarity1); + test_cost_voting_integration(true, ClarityVersion::Clarity2); } #[test] diff --git a/src/clarity_vm/tests/large_contract.rs b/src/clarity_vm/tests/large_contract.rs index 789e0c0f74..3212b74330 100644 --- a/src/clarity_vm/tests/large_contract.rs +++ b/src/clarity_vm/tests/large_contract.rs @@ -156,10 +156,12 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac None, |_, _| false, ) - .unwrap(); + .unwrap(); } - StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | - StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => { + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { let (ast, _analysis) = tx .analyze_smart_contract( &boot_code_id("costs-3", false), @@ -176,11 +178,10 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac None, |_, _| false, ) - .unwrap(); + .unwrap(); } _ => panic!("Epoch {} not covered.", &epoch), } - }); gb.commit_block(); From 41da4a5b586e13fcf39f3a373c8f6b19eda87ddf Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 19 May 2023 13:47:28 -0500 Subject: [PATCH 145/158] fix: affirmation calc should skip any ops whose parent is <= first_block_height --- src/burnchains/affirmation.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index 2611d3b541..76b4f73dcb 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -573,11 +573,9 @@ pub fn read_prepare_phase_commits( test_debug!("Skip too-early block commit"); continue; } - if (opdata.parent_block_ptr as u64) < first_block_height { - if opdata.parent_block_ptr != 0 || opdata.parent_vtxindex != 0 { - test_debug!("Skip orphaned block-commit"); - continue; - } + if (opdata.parent_block_ptr as u64) <= first_block_height { + test_debug!("Skip orphaned block-commit"); + continue; } if opdata.block_height <= opdata.parent_block_ptr as u64 { test_debug!("Skip block-commit whose 'parent' comes at or after it"); From bfa03c7dba4b0a3abb0acb12a495ed5e504390e0 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 19 May 2023 14:00:53 -0500 Subject: [PATCH 146/158] update mainnet 2.4 height with sip-024 value --- src/core/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index ccef9f56a7..8dc0aca1de 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -118,8 +118,8 @@ pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; /// This is Epoch-2.3 activation height proposed in SIP-023 pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_240; -/// This is Epoch-2.3, now Epoch-2.4, activation height proposed in SIP-022 -pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 789_751; +/// This is Epoch-2.3, now Epoch-2.4, activation height proposed in SIP-024 +pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; From db22876be4c77745255c854a0faaa187ee6f9a87 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 19 May 2023 14:16:52 -0500 Subject: [PATCH 147/158] add comment to affirmation module explaining change --- src/burnchains/affirmation.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index 76b4f73dcb..c2ca802b54 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -573,6 +573,8 @@ pub fn read_prepare_phase_commits( test_debug!("Skip too-early block commit"); continue; } + // the block commit's parent must be a burnchain block that is evaluated by the node + // blocks that are <= first_block_height do not meet this requirement. if (opdata.parent_block_ptr as u64) <= first_block_height { test_debug!("Skip orphaned block-commit"); continue; From c7b50e01ea9e0fe4558af066ced0e226ae6faf23 Mon Sep 17 00:00:00 2001 From: Pavitthra Pandurangan Date: Mon, 22 May 2023 10:40:13 -0400 Subject: [PATCH 148/158] add epoch.truncate --- testnet/stacks-node/src/tests/epoch_24.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 228556fb44..854031eebc 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -174,6 +174,7 @@ fn fix_to_pox_contract() { epochs[5].end_height = epoch_2_4; epochs[6].start_height = epoch_2_4; epochs[6].end_height = STACKS_EPOCH_MAX; + epochs.truncate(7); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -809,6 +810,7 @@ fn verify_auto_unlock_behavior() { epochs[5].end_height = epoch_2_4; epochs[6].start_height = epoch_2_4; epochs[6].end_height = STACKS_EPOCH_MAX; + epochs.truncate(7); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); From ff25ba6129d39710c6ff50fb3452b0cbac994f8a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 22 May 2023 13:03:13 -0500 Subject: [PATCH 149/158] sanitize-gate the depth check --- clarity/src/vm/types/serialization.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 0456d2033b..6b8a107dba 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -72,6 +72,11 @@ const DESERIALIZATION_TYPE_CHECK_EPOCH: StacksEpochId = StacksEpochId::Epoch21; /// supported, so we increase the bound to a higher level limit imposed by the cost checker. const SANITIZATION_READ_BOUND: u64 = 15_000_000; +/// Before epoch-2.4, this is the deserialization depth limit. +/// After epoch-2.4, with type sanitization support, the full +/// clarity depth limit is supported. +const UNSANITIZED_DEPTH_CHECK: usize = 16; + impl std::fmt::Display for SerializationError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { @@ -578,7 +583,12 @@ impl Value { }]; while !stack.is_empty() { - if stack.len() > MAX_TYPE_DEPTH as usize { + let depth_check = if sanitize { + MAX_TYPE_DEPTH as usize + } else { + UNSANITIZED_DEPTH_CHECK + }; + if stack.len() > depth_check { return Err(CheckErrors::TypeSignatureTooDeep.into()); } From cecc15c8d9d2dde36577a22c202c6318b8048edd Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 22 May 2023 15:43:52 -0700 Subject: [PATCH 150/158] Chore - adding 2.4.0.0.0 to changelog --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 368ff525c2..6f1e7ec9ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,19 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.4.0.0.0] +This is a **consensus-breaking** release to revert consensus to PoX, and is the second fork proposed in SIP-022. + +- [SIP-022](https://github.com/stacksgov/sips/blob/main/sips/sip-022/sip-022-emergency-pox-fix.md) +- [SIP-024](https://github.com/stacksgov/sips/blob/main/sips/sip-024/sip-024-least-supertype-fix.md) + +### Fixed +- PoX is re-enabled and stacking resumes starting at Bitcoin block `791551` +- Peer network id is updated to `0x18000009` +- Refactoring of the deserialization routine from recursive to iterative + +This release is compatible with chainstate directories from 2.1.0.0.x and 2.3.0.0.x + ## [2.3.0.0.2] This is a high-priority hotfix release to address a bug in the From a93c2867df7bea946fcd99994bf73dbd54d6d0d3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 22 May 2023 16:18:17 -0700 Subject: [PATCH 151/158] chore: add sip-024 text --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f1e7ec9ae..5c9ad7b314 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ This is a **consensus-breaking** release to revert consensus to PoX, and is the ### Fixed - PoX is re-enabled and stacking resumes starting at Bitcoin block `791551` - Peer network id is updated to `0x18000009` -- Refactoring of the deserialization routine from recursive to iterative +- Adds the type sanitization described in SIP-024 This release is compatible with chainstate directories from 2.1.0.0.x and 2.3.0.0.x From 9c8d26a27949c0a95d4d726f4b3544cb9e3b8475 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 23 May 2023 15:03:06 -0500 Subject: [PATCH 152/158] test: refactor rstest templates for reuse --- Cargo.lock | 162 +++++++-- Cargo.toml | 4 +- clarity/Cargo.toml | 6 +- clarity/src/libclarity.rs | 2 +- .../vm/analysis/arithmetic_checker/tests.rs | 20 +- .../vm/analysis/read_only_checker/tests.rs | 20 +- .../src/vm/analysis/trait_checker/tests.rs | 92 +++-- .../type_checker/v2_1/tests/assets.rs | 16 +- .../type_checker/v2_1/tests/contracts.rs | 202 ++++++----- .../analysis/type_checker/v2_1/tests/mod.rs | 38 +-- clarity/src/vm/tests/assets.rs | 186 +++++----- clarity/src/vm/tests/contracts.rs | 252 +++++++------- clarity/src/vm/tests/defines.rs | 19 +- clarity/src/vm/tests/mod.rs | 124 +++++-- clarity/src/vm/tests/sequences.rs | 18 +- clarity/src/vm/tests/simple_apply_eval.rs | 32 +- clarity/src/vm/tests/traits.rs | 319 ++++++++++++------ clarity/src/vm/types/serialization.rs | 34 +- clarity/src/vm/types/signatures.rs | 23 +- src/clarity_vm/tests/analysis_costs.rs | 4 +- src/clarity_vm/tests/ast.rs | 20 +- src/clarity_vm/tests/contracts.rs | 4 +- src/clarity_vm/tests/forking.rs | 26 +- src/clarity_vm/tests/large_contract.rs | 33 +- 24 files changed, 872 insertions(+), 784 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df02fb1ea3..841302aee0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -125,7 +125,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -398,8 +398,8 @@ dependencies = [ "rand_chacha 0.2.2", "regex", "ripemd", - "rstest", - "rstest_reuse", + "rstest 0.17.0", + "rstest_reuse 0.5.0", "rusqlite", "secp256k1", "serde", @@ -523,8 +523,8 @@ dependencies = [ "rand 0.7.3", "rand_chacha 0.2.2", "regex", - "rstest", - "rstest_reuse", + "rstest 0.17.0", + "rstest_reuse 0.5.0", "rusqlite", "serde", "serde_derive", @@ -744,7 +744,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -794,7 +794,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn", + "syn 1.0.109", ] [[package]] @@ -811,7 +811,7 @@ checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -933,7 +933,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -994,11 +994,26 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +[[package]] +name = "futures" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1006,15 +1021,26 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" + +[[package]] +name = "futures-executor" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] [[package]] name = "futures-io" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" @@ -1031,26 +1057,45 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-macro" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.12", +] + [[package]] name = "futures-sink" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.27" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" + +[[package]] +name = "futures-timer" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ + "futures-channel", "futures-core", "futures-io", + "futures-macro", "futures-sink", "futures-task", "memchr", @@ -1796,7 +1841,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1887,7 +1932,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -2170,7 +2215,33 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "rstest" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de1bb486a691878cd320c2f0d319ba91eeaa2e894066d8b5f8f117c000e9d962" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.0", +] + +[[package]] +name = "rstest_macros" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290ca1a1c8ca7edb7c3283bd44dc35dd54fdec6253a3912e201ba1072018fca8" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "rustc_version 0.4.0", + "syn 1.0.109", + "unicode-ident", ] [[package]] @@ -2181,7 +2252,19 @@ checksum = "32c6cfaae58c048728261723a72b80a0aa9f3768e9a7da3b302a24d262525219" dependencies = [ "quote", "rustc_version 0.3.3", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "rstest_reuse" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45f80dcc84beab3a327bbe161f77db25f336a1452428176787c8c79ac79d7073" +dependencies = [ + "quote", + "rand 0.8.5", + "rustc_version 0.4.0", + "syn 1.0.109", ] [[package]] @@ -2418,7 +2501,7 @@ checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2670,8 +2753,8 @@ dependencies = [ "percent-encoding", "rand 0.7.3", "ripemd", - "rstest", - "rstest_reuse", + "rstest 0.11.0", + "rstest_reuse 0.1.3", "rusqlite", "secp256k1", "serde", @@ -2756,7 +2839,7 @@ dependencies = [ "quote", "serde", "serde_derive", - "syn", + "syn 1.0.109", ] [[package]] @@ -2772,7 +2855,7 @@ dependencies = [ "serde_derive", "serde_json", "sha1 0.6.1", - "syn", + "syn 1.0.109", ] [[package]] @@ -2806,6 +2889,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "tempfile" version = "3.4.0" @@ -2865,7 +2959,7 @@ checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2953,7 +3047,7 @@ dependencies = [ "proc-macro2", "quote", "standback", - "syn", + "syn 1.0.109", ] [[package]] @@ -3317,7 +3411,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -3351,7 +3445,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/Cargo.toml b/Cargo.toml index 4af1002184..0b7ce13203 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,14 +91,14 @@ version = "0.2.23" features = ["std"] [dev-dependencies] -rstest = "0.11.0" -rstest_reuse = "0.1.3" assert-json-diff = "1.0.0" criterion = "0.3.5" stdext = "0.3.1" stx_genesis = { package = "stx-genesis", path = "./stx-genesis/."} clarity = { package = "clarity", features = ["default", "testing"], path = "./clarity/." } stacks_common = { package = "stacks-common", features = ["default", "testing"], path = "./stacks-common/." } +rstest = "0.17.0" +rstest_reuse = "0.5.0" [features] default = ["developer-mode"] diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index ce4fd7146e..b812ef735a 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -28,8 +28,8 @@ lazy_static = "1.4.0" integer-sqrt = "0.1.3" slog = { version = "2.5.2", features = [ "max_level_trace" ] } stacks_common = { package = "stacks-common", path = "../stacks-common/." } -rstest = "0.11.0" -rstest_reuse = "0.1.3" +rstest = "0.17.0" +rstest_reuse = "0.5.0" [dependencies.serde_json] version = "1.0" @@ -44,8 +44,6 @@ version = "0.2.23" features = ["std"] [dev-dependencies] -rstest = "0.11.0" -rstest_reuse = "0.1.3" assert-json-diff = "1.0.0" # a nightly rustc regression (35dbef235 2021-03-02) prevents criterion from compiling # but it isn't necessary for tests: only benchmarks. therefore, commenting out for now. diff --git a/clarity/src/libclarity.rs b/clarity/src/libclarity.rs index 5988a3818e..f2ac8ebb2b 100644 --- a/clarity/src/libclarity.rs +++ b/clarity/src/libclarity.rs @@ -49,7 +49,7 @@ extern crate rstest; #[cfg(any(test, feature = "testing"))] #[macro_use] -extern crate rstest_reuse; +pub extern crate rstest_reuse; #[cfg(feature = "monitoring_prom")] #[macro_use] diff --git a/clarity/src/vm/analysis/arithmetic_checker/tests.rs b/clarity/src/vm/analysis/arithmetic_checker/tests.rs index 680d66cadf..d1a1de2453 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/tests.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/tests.rs @@ -30,26 +30,10 @@ use crate::vm::ast::parse; use crate::vm::costs::LimitedCostTracker; use crate::vm::functions::define::DefineFunctions; use crate::vm::functions::NativeFunctions; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::QualifiedContractIdentifier; use crate::vm::variables::NativeVariables; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_arith_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - /// Checks whether or not a contract only contains arithmetic expressions (for example, defining a /// map would not pass this check). /// This check is useful in determining the validity of new potential cost functions. @@ -77,7 +61,7 @@ fn check_good(contract: &str, version: ClarityVersion, epoch: StacksEpochId) { ArithmeticOnlyChecker::run(&analysis).expect("Should pass arithmetic checks"); } -#[apply(test_clarity_versions_arith_checker)] +#[apply(test_clarity_versions)] fn test_bad_defines(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tests = [ ("(define-public (foo) (ok 1))", DefineTypeForbidden(DefineFunctions::PublicFunction)), diff --git a/clarity/src/vm/analysis/read_only_checker/tests.rs b/clarity/src/vm/analysis/read_only_checker/tests.rs index 188a58db89..ea9802da98 100644 --- a/clarity/src/vm/analysis/read_only_checker/tests.rs +++ b/clarity/src/vm/analysis/read_only_checker/tests.rs @@ -24,27 +24,11 @@ use crate::vm::analysis::type_checker::v2_1::tests::mem_type_check; use crate::vm::analysis::{CheckError, CheckErrors}; use crate::vm::ast::parse; use crate::vm::database::MemoryBackingStore; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::QualifiedContractIdentifier; use crate::vm::ClarityVersion; use stacks_common::types::StacksEpochId; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_read_only_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - #[test] fn test_argument_count_violations() { let examples = [ @@ -203,7 +187,7 @@ fn test_nested_writing_closure() { } } -#[apply(test_clarity_versions_read_only_checker)] +#[apply(test_clarity_versions)] fn test_contract_call_read_only_violations( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/trait_checker/tests.rs b/clarity/src/vm/analysis/trait_checker/tests.rs index 7bca96fb9b..c2f1338616 100644 --- a/clarity/src/vm/analysis/trait_checker/tests.rs +++ b/clarity/src/vm/analysis/trait_checker/tests.rs @@ -25,28 +25,12 @@ use crate::vm::analysis::{type_check, CheckError}; use crate::vm::ast::errors::ParseErrors; use crate::vm::ast::{build_ast, parse}; use crate::vm::database::MemoryBackingStore; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; use crate::vm::ClarityVersion; use stacks_common::types::StacksEpochId; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_trait_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_defining_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -94,7 +78,7 @@ fn test_dynamic_dispatch_by_defining_trait( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_incomplete_impl_trait_1(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) @@ -120,7 +104,7 @@ fn test_incomplete_impl_trait_1(#[case] version: ClarityVersion, #[case] epoch: } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_incomplete_impl_trait_2(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) @@ -147,7 +131,7 @@ fn test_incomplete_impl_trait_2(#[case] version: ClarityVersion, #[case] epoch: } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_impl_trait_arg_admission_1(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 ((list 10 uint)) (response uint uint))))"; @@ -171,7 +155,7 @@ fn test_impl_trait_arg_admission_1(#[case] version: ClarityVersion, #[case] epoc } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_impl_trait_arg_admission_2(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 ((list 5 uint)) (response uint uint))))"; @@ -190,7 +174,7 @@ fn test_impl_trait_arg_admission_2(#[case] version: ClarityVersion, #[case] epoc .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_impl_trait_arg_admission_3(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 ((list 5 uint)) (response uint uint))))"; @@ -209,7 +193,7 @@ fn test_impl_trait_arg_admission_3(#[case] version: ClarityVersion, #[case] epoc .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_complete_impl_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) @@ -232,7 +216,7 @@ fn test_complete_impl_trait(#[case] version: ClarityVersion, #[case] epoch: Stac .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_complete_impl_trait_mixing_readonly( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -258,7 +242,7 @@ fn test_complete_impl_trait_mixing_readonly( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_get_trait_reference_from_tuple( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -311,7 +295,7 @@ fn test_get_trait_reference_from_tuple( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_defining_and_impl_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -354,7 +338,7 @@ fn test_dynamic_dispatch_by_defining_and_impl_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_define_map_storing_trait_references( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -381,7 +365,7 @@ fn test_define_map_storing_trait_references( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_cycle_in_traits_1_contract(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let dispatching_contract_src = "(define-trait trait-1 ( (get-1 () (response uint uint)))) @@ -405,7 +389,7 @@ fn test_cycle_in_traits_1_contract(#[case] version: ClarityVersion, #[case] epoc } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_cycle_in_traits_2_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let dispatching_contract_src = "(use-trait trait-2 .target-contract.trait-2) (define-trait trait-1 ( @@ -456,7 +440,7 @@ fn test_cycle_in_traits_2_contracts(#[case] version: ClarityVersion, #[case] epo } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_unknown_method( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -509,7 +493,7 @@ fn test_dynamic_dispatch_unknown_method( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_nested_literal_implicitly_compliant( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -577,7 +561,7 @@ fn test_nested_literal_implicitly_compliant( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_passing_trait_reference_instances( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -615,7 +599,7 @@ fn test_passing_trait_reference_instances( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_passing_nested_trait_reference_instances( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -654,7 +638,7 @@ fn test_passing_nested_trait_reference_instances( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_collision_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -693,7 +677,7 @@ fn test_dynamic_dispatch_collision_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_collision_defined_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -722,7 +706,7 @@ fn test_dynamic_dispatch_collision_defined_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_collision_imported_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -762,7 +746,7 @@ fn test_dynamic_dispatch_collision_imported_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_importing_non_existant_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -834,7 +818,7 @@ fn test_dynamic_dispatch_importing_non_existant_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_importing_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -901,7 +885,7 @@ fn test_dynamic_dispatch_importing_trait( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_including_nested_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1008,7 +992,7 @@ fn test_dynamic_dispatch_including_nested_trait( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_including_wrong_nested_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1127,7 +1111,7 @@ fn test_dynamic_dispatch_including_wrong_nested_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_mismatched_args( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1181,7 +1165,7 @@ fn test_dynamic_dispatch_mismatched_args( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_mismatched_returns( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1235,7 +1219,7 @@ fn test_dynamic_dispatch_mismatched_returns( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_bad_call_with_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1271,7 +1255,7 @@ fn test_bad_call_with_trait(#[case] version: ClarityVersion, #[case] epoch: Stac } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_good_call_with_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1304,7 +1288,7 @@ fn test_good_call_with_trait(#[case] version: ClarityVersion, #[case] epoch: Sta .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_good_call_2_with_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1338,7 +1322,7 @@ fn test_good_call_2_with_trait(#[case] version: ClarityVersion, #[case] epoch: S .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functions( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1406,7 +1390,7 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_pass_bound_principal_as_trait_in_user_defined_functions( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1484,7 +1468,7 @@ fn test_dynamic_dispatch_pass_bound_principal_as_trait_in_user_defined_functions } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_contract_of_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1505,7 +1489,7 @@ fn test_contract_of_good(#[case] version: ClarityVersion, #[case] epoch: StacksE .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_contract_of_wrong_type(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1634,7 +1618,7 @@ fn test_contract_of_wrong_type(#[case] version: ClarityVersion, #[case] epoch: S } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1684,7 +1668,7 @@ fn test_return_trait_with_contract_of( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of_wrapped_in_begin( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1734,7 +1718,7 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of_wrapped_in_let( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1784,7 +1768,7 @@ fn test_return_trait_with_contract_of_wrapped_in_let( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_trait_contract_not_found(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let trait_contract_src = "(define-trait my-trait ((hello (int) (response uint uint))) diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs index 0ee33344ea..6016b8e062 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs @@ -20,24 +20,12 @@ use rstest::rstest; #[cfg(test)] use rstest_reuse::{self, *}; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_assets(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - use crate::vm::analysis::errors::CheckErrors; use crate::vm::analysis::type_checker::v2_1::tests::mem_type_check; use crate::vm::analysis::AnalysisDatabase; use crate::vm::ast::parse; use crate::vm::database::MemoryBackingStore; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::{ QualifiedContractIdentifier, SequenceSubtype, StringSubtype, TypeSignature, }; @@ -124,7 +112,7 @@ const ASSET_NAMES: &str = "(define-constant burn-address 'SP00000000000000000000 (nft-burn? names name tx-sender)) "; -#[apply(test_clarity_versions_assets)] +#[apply(test_clarity_versions)] fn test_names_tokens_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tokens_contract_id = QualifiedContractIdentifier::local("tokens").unwrap(); let names_contract_id = QualifiedContractIdentifier::local("names").unwrap(); diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index d3771c72ec..c4578db59b 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -27,6 +27,7 @@ use crate::vm::analysis::{mem_type_check as mem_run_analysis, run_analysis, Chec use crate::vm::ast::parse; use crate::vm::database::MemoryBackingStore; use crate::vm::errors::Error; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::signatures::CallableSubtype; use crate::vm::types::{ PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, TypeSignature, @@ -43,19 +44,6 @@ fn mem_type_check_v1(snippet: &str) -> CheckResult<(Option, Contr mem_run_analysis(snippet, ClarityVersion::Clarity1, StacksEpochId::latest()) } -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - #[template] #[rstest] #[case(ClarityVersion::Clarity1)] @@ -451,7 +439,7 @@ fn test_names_tokens_contracts_interface() { assert_json_eq!(test_contract_json, test_contract_json_expected); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn test_names_tokens_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tokens_contract_id = QualifiedContractIdentifier::local("tokens").unwrap(); let names_contract_id = QualifiedContractIdentifier::local("names").unwrap(); @@ -468,7 +456,7 @@ fn test_names_tokens_contracts(#[case] version: ClarityVersion, #[case] epoch: S .unwrap(); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn test_names_tokens_contracts_bad(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let broken_public = " (define-public (broken-cross-contract (name-hash (buff 20)) (name-price uint)) @@ -563,7 +551,7 @@ fn test_bad_map_usage() { }); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn test_same_function_name(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let ca_id = QualifiedContractIdentifier::local("contract-a").unwrap(); let cb_id = QualifiedContractIdentifier::local("contract-b").unwrap(); @@ -1778,7 +1766,7 @@ fn call_versioned( .map_err(|e| e.to_string()) } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); @@ -1793,7 +1781,7 @@ fn clarity_trait_experiments_impl(#[case] version: ClarityVersion, #[case] epoch }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); @@ -1808,7 +1796,7 @@ fn clarity_trait_experiments_use(#[case] version: ClarityVersion, #[case] epoch: }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_empty_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1824,7 +1812,7 @@ fn clarity_trait_experiments_empty_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_duplicate_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1843,7 +1831,7 @@ fn clarity_trait_experiments_duplicate_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_undefined( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1860,7 +1848,7 @@ fn clarity_trait_experiments_use_undefined( )); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_circular( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1878,7 +1866,7 @@ fn clarity_trait_experiments_circular( assert!(err.starts_with("ASTError(ParseError { err: CircularReference([\"circular\"])")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_no_response( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1893,7 +1881,7 @@ fn clarity_trait_experiments_no_response( assert!(err.starts_with("DefineTraitBadSignature")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_out_of_order( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1909,7 +1897,7 @@ fn clarity_trait_experiments_out_of_order( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1927,7 +1915,7 @@ fn clarity_trait_experiments_double_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_double_trait_both( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1948,7 +1936,7 @@ fn clarity_trait_experiments_impl_double_trait_both( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_double_trait_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1971,7 +1959,7 @@ fn clarity_trait_experiments_impl_double_trait_1( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_double_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1992,7 +1980,7 @@ fn clarity_trait_experiments_impl_double_trait_2( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2016,7 +2004,7 @@ fn clarity_trait_experiments_use_double_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_partial_double_trait_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2040,7 +2028,7 @@ fn clarity_trait_experiments_use_partial_double_trait_1( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_partial_double_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2062,7 +2050,7 @@ fn clarity_trait_experiments_use_partial_double_trait_2( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_identical_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2080,7 +2068,7 @@ fn clarity_trait_experiments_identical_double_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_identical_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2101,7 +2089,7 @@ fn clarity_trait_experiments_impl_identical_double_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_selfret_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2116,7 +2104,7 @@ fn clarity_trait_experiments_selfret_trait( assert!(err.starts_with("ASTError(ParseError { err: CircularReference([\"self-return\"])")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_math_trait_transitive_alias( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2136,7 +2124,7 @@ fn clarity_trait_experiments_use_math_trait_transitive_alias( assert!(err.starts_with("TraitReferenceUnknown(\"math-alias\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_math_trait_transitive_name( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2159,7 +2147,7 @@ fn clarity_trait_experiments_use_math_trait_transitive_name( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_original_and_define_a_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2181,7 +2169,7 @@ fn clarity_trait_experiments_use_original_and_define_a_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_redefined_and_define_a_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2203,7 +2191,7 @@ fn clarity_trait_experiments_use_redefined_and_define_a_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_a_trait_transitive_original( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2222,7 +2210,7 @@ fn clarity_trait_experiments_use_a_trait_transitive_original( assert!(err.starts_with("TraitMethodUnknown(\"a\", \"do-it\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_a_trait_transitive_redefined( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2242,7 +2230,7 @@ fn clarity_trait_experiments_use_a_trait_transitive_redefined( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_nested_traits( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2264,7 +2252,7 @@ fn clarity_trait_experiments_nested_traits( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2297,7 +2285,7 @@ fn clarity_trait_experiments_call_nested_trait_1( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2323,7 +2311,7 @@ fn clarity_trait_experiments_call_nested_trait_2( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_3_ok( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2349,7 +2337,7 @@ fn clarity_trait_experiments_call_nested_trait_3_ok( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_3_err( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2372,7 +2360,7 @@ fn clarity_trait_experiments_call_nested_trait_3_err( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_4( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2405,7 +2393,7 @@ fn clarity_trait_experiments_call_nested_trait_4( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_math_trait_incomplete( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2423,7 +2411,7 @@ fn clarity_trait_experiments_impl_math_trait_incomplete( assert!(err.starts_with("BadTraitImplementation(\"math\", \"sub\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_literal( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2443,7 +2431,7 @@ fn clarity_trait_experiments_trait_literal( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_pass_let_rename_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2462,7 +2450,7 @@ fn clarity_trait_experiments_pass_let_rename_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_literal_incomplete( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2481,7 +2469,7 @@ fn clarity_trait_experiments_trait_literal_incomplete( assert!(err.starts_with("BadTraitImplementation(\"math\", \"sub\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_let_rename_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2503,7 +2491,7 @@ fn clarity_trait_experiments_call_let_rename_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_data_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2522,7 +2510,7 @@ fn clarity_trait_experiments_trait_data_1( assert!(err.starts_with("ASTError(ParseError { err: TraitReferenceNotAllowed")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_data_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2541,7 +2529,7 @@ fn clarity_trait_experiments_trait_data_2( assert!(err.starts_with("ASTError(ParseError { err: TraitReferenceNotAllowed")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_upcast_trait_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2557,14 +2545,14 @@ fn clarity_trait_experiments_upcast_trait_1( load_versioned(db, "upcast-trait-1", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(PrincipalType, TraitReferenceType")); } else { assert!(err.starts_with("TypeError(PrincipalType, CallableType")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_upcast_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2583,7 +2571,7 @@ fn clarity_trait_experiments_upcast_trait_2( assert!(err.starts_with("TypeError(TupleType(TupleTypeSignature { \"val\": principal,}), TupleType(TupleTypeSignature { \"val\": ,}))")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_upcast_trait_3( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2599,14 +2587,14 @@ fn clarity_trait_experiments_upcast_trait_3( load_versioned(db, "upcast-trait-3", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(PrincipalType, TraitReferenceType")); } else { assert!(err.starts_with("TypeError(PrincipalType, CallableType")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_return_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2625,7 +2613,7 @@ fn clarity_trait_experiments_return_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_upcast_renamed( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2641,14 +2629,14 @@ fn clarity_trait_experiments_upcast_renamed( load_versioned(db, "upcast-renamed", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(PrincipalType, TraitReferenceType")); } else { assert!(err.starts_with("TypeError(PrincipalType, CallableType")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_constant_call( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2671,7 +2659,7 @@ fn clarity_trait_experiments_constant_call( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_constant_to_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2687,7 +2675,7 @@ fn clarity_trait_experiments_constant_to_trait( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -2697,7 +2685,7 @@ fn clarity_trait_experiments_constant_to_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_constant_to_constant_call( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2714,7 +2702,7 @@ fn clarity_trait_experiments_constant_to_constant_call( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -2724,7 +2712,7 @@ fn clarity_trait_experiments_constant_to_constant_call( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_literal_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2741,7 +2729,7 @@ fn clarity_trait_experiments_downcast_literal_1( load_versioned(db, "downcast-literal-1", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { println!("err: {}", err); assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { @@ -2749,7 +2737,7 @@ fn clarity_trait_experiments_downcast_literal_1( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_literal_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2773,7 +2761,7 @@ fn clarity_trait_experiments_downcast_literal_2( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_literal_3( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2792,7 +2780,7 @@ fn clarity_trait_experiments_downcast_literal_3( assert!(err.starts_with("TraitReferenceUnknown(\"p\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2809,14 +2797,14 @@ fn clarity_trait_experiments_downcast_trait_2( load_versioned(db, "downcast-trait-2", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } })), PrincipalType)")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_trait_3( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2832,14 +2820,14 @@ fn clarity_trait_experiments_downcast_trait_3( load_versioned(db, "downcast-trait-3", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } })), PrincipalType)")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_trait_4( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2855,14 +2843,14 @@ fn clarity_trait_experiments_downcast_trait_4( load_versioned(db, "downcast-trait-4", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } })), PrincipalType)")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_trait_5( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2878,14 +2866,14 @@ fn clarity_trait_experiments_downcast_trait_5( load_versioned(db, "downcast-trait-5", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } })), PrincipalType)")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_identical_trait_cast( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2901,7 +2889,7 @@ fn clarity_trait_experiments_identical_trait_cast( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -2911,7 +2899,7 @@ fn clarity_trait_experiments_identical_trait_cast( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_cast( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2927,7 +2915,7 @@ fn clarity_trait_experiments_trait_cast( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -2937,7 +2925,7 @@ fn clarity_trait_experiments_trait_cast( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_cast_incompatible( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2955,7 +2943,7 @@ fn clarity_trait_experiments_trait_cast_incompatible( .unwrap_err(); match version { ClarityVersion::Clarity1 => { - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier")) @@ -2965,7 +2953,7 @@ fn clarity_trait_experiments_trait_cast_incompatible( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_renamed_trait_cast( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2984,7 +2972,7 @@ fn clarity_trait_experiments_renamed_trait_cast( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_use_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3003,7 +2991,7 @@ fn clarity_trait_experiments_readonly_use_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_pass_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3023,7 +3011,7 @@ fn clarity_trait_experiments_readonly_pass_trait( } // TODO: This should be allowed -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_call_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3042,7 +3030,7 @@ fn clarity_trait_experiments_readonly_call_trait( } // TODO: This should be allowed -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_static_call( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3062,7 +3050,7 @@ fn clarity_trait_experiments_readonly_static_call( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_static_call_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3081,7 +3069,7 @@ fn clarity_trait_experiments_readonly_static_call_trait( assert!(err.starts_with("WriteAttemptedInReadOnly")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_dyn_call_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3109,7 +3097,7 @@ fn clarity_trait_experiments_dyn_call_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_dyn_call_trait_partial( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3136,7 +3124,7 @@ fn clarity_trait_experiments_dyn_call_trait_partial( assert!(err.starts_with("BadTraitImplementation(\"math\", \"sub\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_dyn_call_not_implemented( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3163,7 +3151,7 @@ fn clarity_trait_experiments_dyn_call_not_implemented( assert!(err.starts_with("BadTraitImplementation(\"math\", \"add\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_use_principal( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3182,7 +3170,7 @@ fn clarity_trait_experiments_call_use_principal( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_return_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3210,7 +3198,7 @@ fn clarity_trait_experiments_call_return_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_full_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3241,7 +3229,7 @@ fn clarity_trait_experiments_call_full_double_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_partial_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3272,7 +3260,7 @@ fn clarity_trait_experiments_call_partial_double_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_recursion( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3301,7 +3289,7 @@ fn clarity_trait_experiments_trait_recursion( } // Additional tests using this framework -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_principals_list_to_traits_list( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3324,7 +3312,7 @@ fn clarity_trait_experiments_principals_list_to_traits_list( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_traits_list_to_traits_list( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3344,7 +3332,7 @@ fn clarity_trait_experiments_traits_list_to_traits_list( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_mixed_list_to_traits_list( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3360,7 +3348,7 @@ fn clarity_trait_experiments_mixed_list_to_traits_list( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -3370,7 +3358,7 @@ fn clarity_trait_experiments_mixed_list_to_traits_list( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait_method1_v1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3405,7 +3393,7 @@ fn clarity_trait_experiments_double_trait_method1_v1( assert!(err.starts_with("TypeError(BoolType, UIntType)")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait_method2_v1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3441,7 +3429,7 @@ fn clarity_trait_experiments_double_trait_method2_v1( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait_method1_v1_v2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3476,7 +3464,7 @@ fn clarity_trait_experiments_double_trait_method1_v1_v2( assert!(err.starts_with("TypeError(BoolType, UIntType)")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait_method2_v1_v2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3512,7 +3500,7 @@ fn clarity_trait_experiments_double_trait_method2_v1_v2( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_cross_epochs( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index d867528912..e2c3af5cea 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -49,27 +49,11 @@ use super::CheckResult; use crate::vm::ClarityVersion; use crate::vm::analysis::type_checker::SequenceSubtype; +use crate::vm::tests::test_clarity_versions; mod assets; pub mod contracts; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_type_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - /// Backwards-compatibility shim for type_checker tests. Runs at latest Clarity version. pub fn mem_type_check(exp: &str) -> CheckResult<(Option, ContractAnalysis)> { mem_run_analysis( @@ -349,7 +333,7 @@ fn test_get_burn_block_info() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_define_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(define-trait trait-1 ((get-1 (uint) (response uint uint))))", @@ -393,7 +377,7 @@ fn test_define_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpoch } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_use_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let bad = [ "(use-trait trait-1 ((get-1 (uint) (response uint uint))))", @@ -415,7 +399,7 @@ fn test_use_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_impl_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let bad = ["(impl-trait trait-1)", "(impl-trait)"]; let bad_expected = [ @@ -539,7 +523,7 @@ fn test_tx_sponsor() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_destructuring_opts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(unwrap! (some 1) 2)", @@ -753,7 +737,7 @@ fn test_at_block() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_trait_reference_unknown(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let bad = [( "(+ 1 )", @@ -1157,7 +1141,7 @@ fn test_element_at() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_eqs(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(is-eq (list 1 2 3 4 5) (list 1 2 3 4 5 6 7))", @@ -2202,7 +2186,7 @@ fn test_string_to_ints() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_response_inference(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(define-private (foo (x int)) (err x)) @@ -2332,7 +2316,7 @@ fn test_factorial() { mem_type_check(contract).unwrap(); } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_options(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract = " (define-private (foo (id (optional int))) @@ -3551,7 +3535,7 @@ fn test_let_bind_trait() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_trait_same_contract(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = ["(define-trait trait-foo ((foo () (response uint uint)))) (define-public (call-foo (f )) @@ -3597,7 +3581,7 @@ fn test_tuple_arg() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_list_arg(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(define-private (foo (l (list 3 int))) diff --git a/clarity/src/vm/tests/assets.rs b/clarity/src/vm/tests/assets.rs index ec13d20a37..dd1c1bb890 100644 --- a/clarity/src/vm/tests/assets.rs +++ b/clarity/src/vm/tests/assets.rs @@ -21,8 +21,10 @@ use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::events::StacksTransactionEvent; use crate::vm::execute as vm_execute; use crate::vm::representations::SymbolicExpression; +use crate::vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; use crate::vm::tests::{ - execute, is_committed, is_err_code, symbols_from_values, with_memory_environment, + test_clarity_versions, test_epochs, tl_env_factory as env_factory, + TopLevelMemoryEnvironmentGenerator, }; use crate::vm::types::{ AssetIdentifier, PrincipalData, QualifiedContractIdentifier, ResponseData, Value, @@ -137,7 +139,9 @@ fn execute_transaction( env.execute_transaction(issuer, None, contract_identifier.clone(), tx, args) } -fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_native_stx_ops(epoch: StacksEpochId, mut env_factory: TopLevelMemoryEnvironmentGenerator) { + let mut owned_env = env_factory.get_env(epoch); let contract = r#"(define-public (burn-stx (amount uint) (p principal)) (stx-burn? amount p)) (define-public (xfer-stx (amount uint) (p principal) (t principal)) (stx-transfer? amount p t)) (define-read-only (balance-stx (p principal)) (stx-get-balance p)) @@ -204,7 +208,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 1: send 0 let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id, "xfer-stx", @@ -216,7 +220,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id, "burn-stx", @@ -230,7 +234,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 2: from = to let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "xfer-stx", @@ -244,7 +248,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 3: sender is not tx-sender let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "xfer-stx", @@ -256,7 +260,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "burn-stx", @@ -270,7 +274,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 4: amount > balance let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "xfer-stx", @@ -282,7 +286,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "burn-stx", @@ -298,7 +302,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // will overflow before such an overflowing transfer is allowed. // assert_eq!( // execute_transaction( - // owned_env, + // &mut owned_env, // p2.clone(), // &token_contract_id, // "xfer-stx", @@ -311,7 +315,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 6: check balance let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "balance-stx", @@ -329,7 +333,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { let nonexistent_principal = Value::Principal(PrincipalData::Standard(sp_data)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "balance-stx", @@ -342,7 +346,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // now, let's actually do a couple transfers/burns and check the asset maps. let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "burn-stx", @@ -358,7 +362,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "xfer-stx", @@ -374,7 +378,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p3_principal.clone(), &token_contract_id, "xfer-stx", @@ -392,7 +396,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // let's try a user -> contract transfer let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "to-contract", @@ -416,7 +420,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { let contract_principal = Value::Principal(cp_data); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "balance-stx", @@ -429,7 +433,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // now let's do a contract -> user transfer let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p3_principal.clone(), &token_contract_id, "from-contract", @@ -456,7 +460,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // now, to transfer let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &second_contract_id, "send-to-other", @@ -477,7 +481,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // now, let's send some back let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p3_principal.clone(), &token_contract_id, "from-contract", @@ -496,7 +500,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // and, one more time for good measure let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &second_contract_id, "send-to-other", @@ -515,7 +519,12 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { ); } -fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_simple_token_system( + epoch: StacksEpochId, + mut env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let tokens_contract = FIRST_CLASS_TOKENS; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); @@ -556,7 +565,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { .unwrap(); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -568,7 +577,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -584,7 +593,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -596,7 +605,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -608,7 +617,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let err = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -622,7 +631,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { }); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-ft-get-balance", @@ -634,7 +643,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-ft-get-balance", @@ -646,7 +655,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "faucet", @@ -663,7 +672,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "faucet", @@ -679,7 +688,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "faucet", @@ -695,7 +704,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-ft-get-balance", @@ -707,7 +716,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Get the total supply - Total minted so far = 10204 let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "get-total-supply", @@ -718,7 +727,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Burn 100 tokens from p2's balance (out of 9200) let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id.clone(), "burn", @@ -735,7 +744,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Get p2's balance we should get 9200 - 100 = 9100 let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-ft-get-balance", @@ -747,7 +756,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Get the new total supply let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "get-total-supply", @@ -758,7 +767,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Burn 9101 tokens from p2's balance (out of 9100) - Should fail with error code 1 let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id.clone(), "burn", @@ -771,7 +780,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Try to burn 0 tokens from p2's balance - Should fail with error code 1 let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id.clone(), "burn", @@ -785,7 +794,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Try to burn 1 tokens from p2's balance (out of 9100) - Should pass even though // sender != tx sender let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "burn", @@ -801,7 +810,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "mint-after", @@ -813,7 +822,9 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); } -fn test_total_supply(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_total_supply(epoch: StacksEpochId, mut env_factory: TopLevelMemoryEnvironmentGenerator) { + let mut owned_env = env_factory.get_env(epoch); let bad_0 = "(define-fungible-token stackaroos (- 5))"; let bad_1 = "(define-fungible-token stackaroos true)"; @@ -878,7 +889,7 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { .unwrap(); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "gated-faucet", @@ -888,7 +899,7 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "gated-faucet", @@ -898,7 +909,7 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { assert!(!is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "gated-faucet", @@ -908,7 +919,7 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let err = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "gated-faucet", @@ -922,7 +933,12 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { }); } -fn test_overlapping_nfts(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_overlapping_nfts( + epoch: StacksEpochId, + mut env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let tokens_contract = FIRST_CLASS_TOKENS; let names_contract = ASSET_NAMES; @@ -966,7 +982,13 @@ fn test_overlapping_nfts(owned_env: &mut OwnedEnvironment) { .unwrap(); } -fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { +#[apply(test_clarity_versions)] +fn test_simple_naming_system( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let tokens_contract = FIRST_CLASS_TOKENS; let names_contract = ASSET_NAMES; @@ -989,10 +1011,8 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { _ => panic!(), }; - let mut placeholder_context = ContractContext::new( - QualifiedContractIdentifier::transient(), - ClarityVersion::Clarity2, - ); + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); let tokens_contract_id = QualifiedContractIdentifier::new(p1_std_principal_data.clone(), "tokens".into()); @@ -1034,7 +1054,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { .unwrap(); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "preorder", @@ -1045,7 +1065,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_err_code(&result, 1)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "preorder", @@ -1056,7 +1076,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "preorder", @@ -1069,7 +1089,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // shouldn't be able to register a name you didn't preorder! let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "register", @@ -1082,7 +1102,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // should work! let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "register", @@ -1104,7 +1124,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // let's try some token-transfers let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "try-bad-transfers", @@ -1115,7 +1135,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "try-bad-transfers-but-ok", @@ -1134,7 +1154,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // let's mint some names let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-mint", @@ -1146,7 +1166,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-mint", @@ -1160,7 +1180,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // let's transfer name let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "transfer", @@ -1172,7 +1192,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "transfer", @@ -1184,7 +1204,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "transfer", @@ -1196,7 +1216,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "transfer", @@ -1219,7 +1239,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // try to underpay! let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "preorder", @@ -1230,7 +1250,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "register", @@ -1243,7 +1263,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // register a cheap name! let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "preorder", @@ -1254,7 +1274,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "register", @@ -1265,7 +1285,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "register", @@ -1278,7 +1298,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p1 burning 5 should fail (not owner anymore). let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-burn", @@ -1291,7 +1311,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p1 minting 8 should succeed let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-mint", @@ -1304,7 +1324,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p2 burning 8 (which belongs to p1) should succeed even though sender != tx_sender. let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "force-burn", @@ -1322,7 +1342,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p2 burning 5 should succeed. let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "force-burn", @@ -1340,7 +1360,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p2 re-burning 5 should succeed. let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "force-burn", @@ -1352,7 +1372,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p1 re-minting 5 should succeed let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-mint", @@ -1372,17 +1392,3 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { ); } } - -#[test] -fn test_all() { - let to_test = [ - test_overlapping_nfts, - test_simple_token_system, - test_simple_naming_system, - test_total_supply, - test_native_stx_ops, - ]; - for test in to_test.iter() { - with_memory_environment(test, StacksEpochId::latest(), true); - } -} diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 688403dca8..b0b2db0c2c 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -19,8 +19,6 @@ use crate::types::chainstate::StacksBlockId; #[cfg(any(test, feature = "testing"))] use rstest::rstest; -#[cfg(any(test, feature = "testing"))] -use rstest_reuse::{self, *}; use stacks_common::types::StacksEpochId; use crate::vm::ast; @@ -34,9 +32,11 @@ use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::execute as vm_execute; use crate::vm::representations::SymbolicExpression; use crate::vm::tests::{ - execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, - with_memory_environment, BurnStateDB, TEST_BURN_STATE_DB, TEST_HEADER_DB, + env_factory, execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, + tl_env_factory, BurnStateDB, MemoryEnvironmentGenerator, TopLevelMemoryEnvironmentGenerator, + TEST_BURN_STATE_DB, TEST_HEADER_DB, }; +use crate::vm::tests::{test_clarity_versions, test_epochs}; use crate::vm::types::{ OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, StandardPrincipalData, TypeSignature, Value, @@ -175,11 +175,13 @@ fn test_get_block_info_eval() { } } -fn test_block_headers(n: u8) -> StacksBlockId { - StacksBlockId([n as u8; 32]) -} - -fn test_contract_caller(owned_env: &mut OwnedEnvironment) { +#[apply(test_clarity_versions)] +fn test_contract_caller( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_a = "(define-read-only (get-caller) (list contract-caller tx-sender))"; let contract_b = "(define-read-only (get-caller) @@ -312,7 +314,14 @@ fn tx_sponsor_contract_asserts(env: &mut Environment, sponsor: Option assert_eq!( - x, - RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash::from( - vec![2 as u8; 32].as_slice() - )) - ), - _ => panic!("Unexpected error"), - } + let err = owned_env + .initialize_contract( + QualifiedContractIdentifier::local("contract").unwrap(), + &contract, + None, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + eprintln!("{}", err); + match err { + Error::Runtime(x, _) => assert_eq!( + x, + RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash::from( + vec![2 as u8; 32].as_slice() + )) + ), + _ => panic!("Unexpected error"), } - - with_memory_environment(test, StacksEpochId::latest(), true); } -#[test] -fn test_as_max_len() { - fn test(owned_env: &mut OwnedEnvironment) { - let contract = "(define-data-var token-ids (list 10 uint) (list)) +#[apply(test_epochs)] +fn test_as_max_len(epoch: StacksEpochId, mut tl_env_factory: TopLevelMemoryEnvironmentGenerator) { + let mut owned_env = tl_env_factory.get_env(epoch); + let contract = "(define-data-var token-ids (list 10 uint) (list)) (var-set token-ids (unwrap! (as-max-len? (append (var-get token-ids) u1) u10) (err 10)))"; - owned_env - .initialize_contract( - QualifiedContractIdentifier::local("contract").unwrap(), - &contract, - None, - ASTRules::PrecheckSize, - ) - .unwrap(); - } - - with_memory_environment(test, StacksEpochId::latest(), true); + owned_env + .initialize_contract( + QualifiedContractIdentifier::local("contract").unwrap(), + &contract, + None, + ASTRules::PrecheckSize, + ) + .unwrap(); } #[test] @@ -1080,8 +1121,13 @@ fn test_arg_stack_depth() { ); } -#[test] -fn test_cc_stack_depth() { +#[apply(test_clarity_versions)] +fn test_cc_stack_depth( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_one = "(define-public (foo) (ok (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ @@ -1096,33 +1142,30 @@ fn test_cc_stack_depth() { 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1)) (bar) "; + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); + env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) + .unwrap(); - with_memory_environment( - |owned_env| { - let mut placeholder_context = ContractContext::new( - QualifiedContractIdentifier::transient(), - ClarityVersion::Clarity2, - ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); - - let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); - env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) - .unwrap(); - - let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); - assert_eq!( - env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) - .unwrap_err(), - RuntimeErrorType::MaxStackDepthReached.into() - ); - }, - StacksEpochId::latest(), - false, + let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); + assert_eq!( + env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) + .unwrap_err(), + RuntimeErrorType::MaxStackDepthReached.into() ); } -#[test] -fn test_cc_trait_stack_depth() { +#[apply(test_clarity_versions)] +fn test_cc_trait_stack_depth( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); + let contract_one = "(define-public (foo) (ok (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ @@ -1140,43 +1183,18 @@ fn test_cc_trait_stack_depth() { (bar .c-foo) "; - with_memory_environment( - |owned_env| { - let mut placeholder_context = ContractContext::new( - QualifiedContractIdentifier::transient(), - ClarityVersion::Clarity2, - ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); - - let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); - env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) - .unwrap(); - - let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); - assert_eq!( - env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) - .unwrap_err(), - RuntimeErrorType::MaxStackDepthReached.into() - ); - }, - StacksEpochId::latest(), - false, - ); -} + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); -#[test] -fn test_all() { - let to_test = [ - test_factorial_contract, - test_aborts, - test_contract_caller, - test_tx_sponsor, - test_fully_qualified_contract_call, - test_simple_naming_system, - test_simple_contract_call, - ]; - for test in to_test.iter() { - eprintln!(".."); - with_memory_environment(test, StacksEpochId::latest(), false); - } + let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); + env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) + .unwrap(); + + let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); + assert_eq!( + env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) + .unwrap_err(), + RuntimeErrorType::MaxStackDepthReached.into() + ); } diff --git a/clarity/src/vm/tests/defines.rs b/clarity/src/vm/tests/defines.rs index 6a73e375be..235aa425f5 100644 --- a/clarity/src/vm/tests/defines.rs +++ b/clarity/src/vm/tests/defines.rs @@ -14,24 +14,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::vm::tests::test_clarity_versions; + #[cfg(test)] use rstest::rstest; #[cfg(test)] use rstest_reuse::{self, *}; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_defines(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - use crate::vm::ast::build_ast; use crate::vm::ast::errors::{ParseError, ParseErrors}; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; @@ -66,7 +55,7 @@ fn test_defines() { assert_eq!(Ok(Some(Value::Int(1))), execute(&tests)); } -#[apply(test_clarity_versions_defines)] +#[apply(test_clarity_versions)] fn test_accept_options(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let defun = "(define-private (f (b (optional int))) (* 10 (default-to 0 b)))"; let tests = [ @@ -199,7 +188,7 @@ fn test_stack_depth() { }) } -#[apply(test_clarity_versions_defines)] +#[apply(test_clarity_versions)] fn test_recursive_panic(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tests = "(define-private (factorial (a int)) (if (is-eq a 0) diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 7d7fb79634..66817a6cd7 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -53,22 +53,89 @@ mod datamaps; mod defines; mod principals; mod sequences; +#[cfg(test)] mod simple_apply_eval; mod traits; -pub fn with_memory_environment(f: F, epoch: StacksEpochId, top_level: bool) -where - F: FnOnce(&mut OwnedEnvironment) -> (), -{ - let mut marf_kv = MemoryBackingStore::new(); +macro_rules! epochs_template { + ($($epoch:ident,)*) => { + #[template] + #[export] + #[rstest] + $( + #[case::$epoch(StacksEpochId::$epoch)] + )* + pub fn test_epochs(#[case] epoch: StacksEpochId) {} - let mut owned_env = OwnedEnvironment::new(marf_kv.as_clarity_db(), epoch); - // start an initial transaction. - if !top_level { - owned_env.begin(); + #[test] + fn epochs_covered() { + let epoch = StacksEpochId::latest(); + match epoch { + // don't test Epoch-1.0 + StacksEpochId::Epoch10 => (), + // this will lead to a compile time failure if an epoch is left out + // of the epochs_template! macro list + $(StacksEpochId::$epoch)|* => (), + } + } + } +} + +macro_rules! clarity_template { + ($(($epoch:ident, $clarity:ident),)*) => { + #[template] + #[export] + #[rstest] + $( + #[case::$epoch(ClarityVersion::$clarity, StacksEpochId::$epoch)] + )* + pub fn test_clarity_versions(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} + + #[test] + fn epoch_clarity_pairs_covered() { + let epoch = StacksEpochId::latest(); + let clarity = ClarityVersion::latest(); + match (epoch, clarity) { + // don't test Epoch-1.0 + (StacksEpochId::Epoch10, _) => (), + // don't test these pairs, because they aren't supported: + (StacksEpochId::Epoch20, ClarityVersion::Clarity2) => (), + (StacksEpochId::Epoch2_05, ClarityVersion::Clarity2) => (), + // this will lead to a compile time failure if a pair is left out + // of the clarity_template! macro list + $((StacksEpochId::$epoch, ClarityVersion::$clarity))|* => (), + } + } } +} + +// Define two rstest templates for Clarity tests: `test_epochs` and `test_clarity_versions` +// these templates test all epochs (except 1.0) and all valid epoch/clarity-version pairs. +// +// The macro definitions ensure that we get compile time errors in testing if there is a +// non-covered case in the rstest template. This *could* have been written as a derive macro, +// but then it would need to be defined in the `stacks-common` library (where it would have to +// get a `testing` feature flag). This seems less obtuse. +epochs_template! { + Epoch20, + Epoch2_05, + Epoch21, + Epoch22, + Epoch23, + Epoch24, +} - f(&mut owned_env) +clarity_template! { + (Epoch20, Clarity1), + (Epoch2_05, Clarity1), + (Epoch21, Clarity1), + (Epoch21, Clarity2), + (Epoch22, Clarity1), + (Epoch22, Clarity2), + (Epoch23, Clarity1), + (Epoch23, Clarity2), + (Epoch24, Clarity1), + (Epoch24, Clarity2), } #[cfg(test)] @@ -78,23 +145,32 @@ impl Value { } } -pub fn with_versioned_memory_environment( - f: F, - epoch: StacksEpochId, - version: ClarityVersion, - top_level: bool, -) where - F: FnOnce(&mut OwnedEnvironment, ClarityVersion) -> (), -{ - let mut marf_kv = MemoryBackingStore::new(); - - let mut owned_env = OwnedEnvironment::new(marf_kv.as_clarity_db(), epoch); - // start an initial transaction. - if !top_level { +#[fixture] +pub fn env_factory() -> MemoryEnvironmentGenerator { + MemoryEnvironmentGenerator(MemoryBackingStore::new()) +} + +#[fixture] +pub fn tl_env_factory() -> TopLevelMemoryEnvironmentGenerator { + TopLevelMemoryEnvironmentGenerator(MemoryBackingStore::new()) +} + +pub struct MemoryEnvironmentGenerator(MemoryBackingStore); +impl MemoryEnvironmentGenerator { + fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment { + let mut owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch); + // start an initial transaction. owned_env.begin(); + owned_env } +} - f(&mut owned_env, version) +pub struct TopLevelMemoryEnvironmentGenerator(MemoryBackingStore); +impl TopLevelMemoryEnvironmentGenerator { + fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment { + let owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch); + owned_env + } } /// Determine whether or not to use the testnet or mainnet chain ID, given whether or not the diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index 3b9e9b6c3c..dcdf841b65 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -17,13 +17,12 @@ use crate::vm::types::signatures::{ListTypeData, SequenceSubtype}; use crate::vm::types::TypeSignature::{BoolType, IntType, SequenceType, UIntType}; use crate::vm::types::{StringSubtype, StringUTF8Length, TypeSignature, Value}; -#[cfg(test)] use rstest::rstest; -#[cfg(test)] use rstest_reuse::{self, *}; use crate::vm::analysis::errors::CheckError; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::signatures::SequenceSubtype::{BufferType, ListType, StringType}; use crate::vm::types::signatures::StringSubtype::ASCII; use crate::vm::types::BufferLength; @@ -32,19 +31,6 @@ use crate::vm::{execute, execute_v2, ClarityVersion}; use stacks_common::types::StacksEpochId; use std::convert::{TryFrom, TryInto}; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_sequences(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - #[test] fn test_simple_list_admission() { let defines = "(define-private (square (x int)) (* x x)) @@ -1172,7 +1158,7 @@ fn test_buff_len() { assert_eq!(expected, execute(test2).unwrap().unwrap()); } -#[apply(test_clarity_versions_sequences)] +#[apply(test_clarity_versions)] fn test_construct_bad_list(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let test1 = "(list 1 2 3 true)"; assert_eq!( diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index dd5fee4fcb..83f4c53271 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -16,21 +16,21 @@ use std::collections::HashMap; -#[cfg(test)] use rstest::rstest; -#[cfg(test)] use rstest_reuse::{self, *}; -#[cfg(test)] use crate::vm::ast::parse; use crate::vm::ast::ASTRules; use crate::vm::callables::DefinedFunction; use crate::vm::contexts::OwnedEnvironment; use crate::vm::costs::LimitedCostTracker; +use crate::vm::database::MemoryBackingStore; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType, ShortReturnType}; use crate::vm::tests::execute; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::signatures::*; +use crate::vm::types::StacksAddressExtensions; use crate::vm::types::{ASCIIData, BuffData, CharType, QualifiedContractIdentifier, TypeSignature}; use crate::vm::types::{PrincipalData, ResponseData, SequenceData, SequenceSubtype, StringSubtype}; use crate::vm::ClarityVersion; @@ -49,26 +49,6 @@ use stacks_common::types::chainstate::StacksPublicKey; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, to_hex}; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_simple_apply_eval( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - -use crate::vm::database::MemoryBackingStore; -use crate::vm::types::StacksAddressExtensions; - #[test] fn test_doubly_defined_persisted_vars() { let tests = [ @@ -84,7 +64,7 @@ fn test_doubly_defined_persisted_vars() { } } -#[apply(test_clarity_versions_simple_apply_eval)] +#[apply(test_clarity_versions)] fn test_simple_let(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { /* test program: @@ -654,7 +634,7 @@ fn test_principal_equality() { .for_each(|(program, expectation)| assert_eq!(expectation.clone(), execute(program))); } -#[apply(test_clarity_versions_simple_apply_eval)] +#[apply(test_clarity_versions)] fn test_simple_if_functions(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { // // test program: @@ -1098,7 +1078,7 @@ fn test_sequence_comparisons_mismatched_types() { }); } -#[apply(test_clarity_versions_simple_apply_eval)] +#[apply(test_clarity_versions)] fn test_simple_arithmetic_errors(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tests = [ "(>= 1)", diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index d54dad182f..cb515b7563 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -19,95 +19,30 @@ use stacks_common::types::StacksEpochId; use crate::vm::analysis::errors::CheckError; use crate::vm::ast::ASTRules; use crate::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; +use crate::vm::database::MemoryBackingStore; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::execute as vm_execute; -use crate::vm::tests::{ - execute, symbols_from_values, with_memory_environment, with_versioned_memory_environment, -}; +use crate::vm::tests::{execute, symbols_from_values}; use crate::vm::types::{ PrincipalData, QualifiedContractIdentifier, ResponseData, TypeSignature, Value, }; use std::convert::TryInto; +use crate::vm::tests::env_factory; +use crate::vm::tests::test_clarity_versions; +use crate::vm::tests::test_epochs; use crate::vm::version::ClarityVersion; use crate::vm::ContractContext; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_epoch_clarity_versions(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - -#[template] -#[rstest] -#[case(StacksEpochId::Epoch21)] -#[case(StacksEpochId::Epoch22)] -#[case(StacksEpochId::Epoch23)] -#[case(StacksEpochId::Epoch24)] -fn test_epoch_only_clarity_2(#[case] epoch: StacksEpochId) {} - -#[apply(test_epoch_clarity_versions)] -fn test_trait_basics(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { - let to_test = [ - test_dynamic_dispatch_pass_trait_nested_in_let, - test_dynamic_dispatch_pass_trait, - test_dynamic_dispatch_intra_contract_call, - test_dynamic_dispatch_by_defining_trait, - test_dynamic_dispatch_by_implementing_imported_trait, - test_dynamic_dispatch_by_importing_trait, - test_dynamic_dispatch_including_nested_trait, - test_dynamic_dispatch_mismatched_args, - test_dynamic_dispatch_mismatched_returned, - test_reentrant_dynamic_dispatch, - test_readwrite_dynamic_dispatch, - test_readwrite_violation_dynamic_dispatch, - test_bad_call_with_trait, - test_good_call_with_trait, - test_good_call_2_with_trait, - test_contract_of_value, - test_contract_of_no_impl, - test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs, - test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functions, - test_return_trait_with_contract_of, - test_return_trait_with_contract_of_wrapped_in_begin, - test_return_trait_with_contract_of_wrapped_in_let, - ]; - for test in to_test.iter() { - with_versioned_memory_environment(test, epoch, version, false); - } -} - -#[apply(test_epoch_only_clarity_2)] -fn test_clarity2(#[case] epoch: StacksEpochId) { - let to_test = [ - test_pass_principal_literal_to_trait, - test_pass_trait_to_subtrait, - test_embedded_trait, - test_pass_embedded_trait_to_subtrait_optional, - test_pass_embedded_trait_to_subtrait_ok, - test_pass_embedded_trait_to_subtrait_err, - test_pass_embedded_trait_to_subtrait_list, - test_pass_embedded_trait_to_subtrait_list_option, - test_pass_embedded_trait_to_subtrait_option_list, - test_let_trait, - test_let3_trait, - ]; - for test in to_test.iter() { - with_memory_environment(test, epoch, false); - } -} +use super::MemoryEnvironmentGenerator; +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_defining_trait( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -156,10 +91,13 @@ fn test_dynamic_dispatch_by_defining_trait( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_pass_trait_nested_in_let( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -211,7 +149,13 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( } } -fn test_dynamic_dispatch_pass_trait(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_dynamic_dispatch_pass_trait( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -262,10 +206,13 @@ fn test_dynamic_dispatch_pass_trait(owned_env: &mut OwnedEnvironment, version: C } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_intra_contract_call( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .contract-defining-trait.trait-1) @@ -317,10 +264,13 @@ fn test_dynamic_dispatch_intra_contract_call( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_implementing_imported_trait( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .contract-defining-trait.trait-1) @@ -377,10 +327,13 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) (get-2 (uint) (response uint uint))))"; @@ -439,10 +392,13 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_importing_trait( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .contract-defining-trait.trait-1) @@ -498,10 +454,13 @@ fn test_dynamic_dispatch_by_importing_trait( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_including_nested_trait( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_nested_trait = "(define-trait trait-a ( (get-a (uint) (response uint uint))))"; let contract_defining_trait = "(use-trait trait-a .contract-defining-nested-trait.trait-a) @@ -579,10 +538,13 @@ fn test_dynamic_dispatch_including_nested_trait( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_mismatched_args( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -633,10 +595,13 @@ fn test_dynamic_dispatch_mismatched_args( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_mismatched_returned( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -687,7 +652,13 @@ fn test_dynamic_dispatch_mismatched_returned( } } -fn test_reentrant_dynamic_dispatch(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_reentrant_dynamic_dispatch( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -741,7 +712,13 @@ fn test_reentrant_dynamic_dispatch(owned_env: &mut OwnedEnvironment, version: Cl } } -fn test_readwrite_dynamic_dispatch(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_readwrite_dynamic_dispatch( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-read-only (wrapped-get-1 (contract )) @@ -792,10 +769,13 @@ fn test_readwrite_dynamic_dispatch(owned_env: &mut OwnedEnvironment, version: Cl } } +#[apply(test_clarity_versions)] fn test_readwrite_violation_dynamic_dispatch( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-read-only (wrapped-get-1 (contract )) @@ -846,7 +826,13 @@ fn test_readwrite_violation_dynamic_dispatch( } } -fn test_bad_call_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_bad_call_with_trait( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); // This set of contracts should be working in this context, // the analysis is not being performed. let contract_defining_trait = "(define-trait trait-1 ( @@ -911,7 +897,13 @@ fn test_bad_call_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityVe } } -fn test_good_call_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_good_call_with_trait( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .defun.trait-1) @@ -973,7 +965,13 @@ fn test_good_call_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityV } } -fn test_good_call_2_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_good_call_2_with_trait( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .defun.trait-1) @@ -1040,10 +1038,13 @@ fn test_good_call_2_with_trait(owned_env: &mut OwnedEnvironment, version: Clarit } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functions( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .contract-defining-trait.trait-1) @@ -1101,7 +1102,13 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio } } -fn test_contract_of_value(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_contract_of_value( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .defun.trait-1) @@ -1160,7 +1167,13 @@ fn test_contract_of_value(owned_env: &mut OwnedEnvironment, version: ClarityVers } } -fn test_contract_of_no_impl(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_contract_of_no_impl( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .defun.trait-1) @@ -1221,10 +1234,13 @@ fn test_contract_of_no_impl(owned_env: &mut OwnedEnvironment, version: ClarityVe } } +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of_wrapped_in_begin( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -1275,10 +1291,13 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( } } +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of_wrapped_in_let( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -1329,7 +1348,13 @@ fn test_return_trait_with_contract_of_wrapped_in_let( } } -fn test_return_trait_with_contract_of(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_return_trait_with_contract_of( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -1378,7 +1403,12 @@ fn test_return_trait_with_contract_of(owned_env: &mut OwnedEnvironment, version: } } -fn test_pass_trait_to_subtrait(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1438,7 +1468,16 @@ fn test_pass_trait_to_subtrait(owned_env: &mut OwnedEnvironment) { } } -fn test_embedded_trait(owned_env: &mut OwnedEnvironment) { +#[apply(test_clarity_versions)] +fn test_embedded_trait( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (echo (uint) (response uint uint)) )) @@ -1496,7 +1535,15 @@ fn test_embedded_trait(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_embedded_trait_to_subtrait_optional(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_optional( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1560,7 +1607,15 @@ fn test_pass_embedded_trait_to_subtrait_optional(owned_env: &mut OwnedEnvironmen } } -fn test_pass_embedded_trait_to_subtrait_ok(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_ok( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1624,7 +1679,15 @@ fn test_pass_embedded_trait_to_subtrait_ok(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_embedded_trait_to_subtrait_err(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_err( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1688,7 +1751,15 @@ fn test_pass_embedded_trait_to_subtrait_err(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_embedded_trait_to_subtrait_list(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_list( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1752,7 +1823,15 @@ fn test_pass_embedded_trait_to_subtrait_list(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_embedded_trait_to_subtrait_list_option(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_list_option( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1819,7 +1898,15 @@ fn test_pass_embedded_trait_to_subtrait_list_option(owned_env: &mut OwnedEnviron } } -fn test_pass_embedded_trait_to_subtrait_option_list(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_option_list( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1886,7 +1973,12 @@ fn test_pass_embedded_trait_to_subtrait_option_list(owned_env: &mut OwnedEnviron } } -fn test_let_trait(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (echo (uint) (response uint uint)) )) @@ -1942,7 +2034,12 @@ fn test_let_trait(owned_env: &mut OwnedEnvironment) { } } -fn test_let3_trait(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (echo (uint) (response uint uint)) )) @@ -2002,7 +2099,15 @@ fn test_let3_trait(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_principal_literal_to_trait(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_principal_literal_to_trait( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 6b8a107dba..3d2b71c2e4 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -1353,7 +1353,7 @@ impl std::hash::Hash for Value { } #[cfg(test)] -mod tests { +pub mod tests { use rstest::rstest; use rstest_reuse::{self, *}; @@ -1361,6 +1361,7 @@ mod tests { use crate::vm::database::{ClarityDeserializable, ClaritySerializable, RollbackWrapper}; use crate::vm::errors::Error; + use crate::vm::tests::test_clarity_versions; use crate::vm::types::TypeSignature::{BoolType, IntType}; use super::super::*; @@ -1368,23 +1369,6 @@ mod tests { use crate::vm::ClarityVersion; use stacks_common::types::StacksEpochId; - #[template] - #[rstest] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] - fn test_clarity_versions_serialization( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, - ) { - } - fn buff_type(size: u32) -> TypeSignature { TypeSignature::SequenceType(SequenceSubtype::BufferType(size.try_into().unwrap())).into() } @@ -1431,7 +1415,7 @@ mod tests { test_deser_u32_helper(134217728); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_lists(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let list_list_int = Value::list_from(vec![Value::list_from(vec![ Value::Int(1), @@ -1558,7 +1542,7 @@ mod tests { test_bad_expectation(Value::UInt(1), TypeSignature::IntType); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_opts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::none()); test_deser_ser(Value::some(Value::Int(15)).unwrap()); @@ -1572,7 +1556,7 @@ mod tests { ); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_resp(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::okay(Value::Int(15)).unwrap()); test_deser_ser(Value::error(Value::Int(15)).unwrap()); @@ -1589,7 +1573,7 @@ mod tests { ); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_buffs(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::buff_from(vec![0, 0, 0, 0]).unwrap()); test_deser_ser(Value::buff_from(vec![0xde, 0xad, 0xbe, 0xef]).unwrap()); @@ -1607,7 +1591,7 @@ mod tests { ); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_string_ascii(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::string_ascii_from_bytes(vec![61, 62, 63, 64]).unwrap()); @@ -1618,7 +1602,7 @@ mod tests { ); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_string_utf8(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::string_utf8_from_bytes(vec![61, 62, 63, 64]).unwrap()); test_deser_ser( @@ -1729,7 +1713,7 @@ mod tests { }); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_sanitization(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let v_1 = Value::list_from(vec![ TupleData::from_data(vec![("b".into(), Value::Int(2))]) diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 47cc94eb2c..a8052ab305 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -1941,22 +1941,7 @@ mod test { #[cfg(test)] use rstest_reuse::{self, *}; - #[template] - #[rstest] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] - fn test_clarity_versions_signatures( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, - ) { - } + use crate::vm::tests::test_clarity_versions; fn fail_parse(val: &str, version: ClarityVersion, epoch: StacksEpochId) -> CheckErrors { use crate::vm::ast::parse; @@ -1970,14 +1955,14 @@ mod test { TypeSignature::parse_type_repr(epoch, expr, &mut ()).unwrap_err() } - #[apply(test_clarity_versions_signatures)] + #[apply(test_clarity_versions)] fn type_of_list_of_buffs(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let value = execute("(list \"abc\" \"abcde\")").unwrap().unwrap(); let type_descr = TypeSignature::from_string("(list 2 (string-ascii 5))", version, epoch); assert_eq!(TypeSignature::type_of(&value), type_descr); } - #[apply(test_clarity_versions_signatures)] + #[apply(test_clarity_versions)] fn type_signature_way_too_big(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { // first_tuple.type_size ~= 131 // second_tuple.type_size = k * (130+130) @@ -1998,7 +1983,7 @@ mod test { ); } - #[apply(test_clarity_versions_signatures)] + #[apply(test_clarity_versions)] fn test_construction(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let bad_type_descriptions = [ ("(tuple)", EmptyTuplesNotAllowed), diff --git a/src/clarity_vm/tests/analysis_costs.rs b/src/clarity_vm/tests/analysis_costs.rs index 8b6c1b501b..8483e3ba4b 100644 --- a/src/clarity_vm/tests/analysis_costs.rs +++ b/src/clarity_vm/tests/analysis_costs.rs @@ -30,9 +30,7 @@ use clarity::vm::execute as vm_execute; use clarity::vm::functions::NativeFunctions; use clarity::vm::representations::SymbolicExpression; use clarity::vm::test_util::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; -use clarity::vm::tests::{ - execute, symbols_from_values, with_memory_environment, UnitTestBurnStateDB, -}; +use clarity::vm::tests::{execute, symbols_from_values, UnitTestBurnStateDB}; use clarity::vm::types::{ AssetIdentifier, PrincipalData, QualifiedContractIdentifier, ResponseData, Value, }; diff --git a/src/clarity_vm/tests/ast.rs b/src/clarity_vm/tests/ast.rs index 4367bf7045..bc1cff12a5 100644 --- a/src/clarity_vm/tests/ast.rs +++ b/src/clarity_vm/tests/ast.rs @@ -5,6 +5,7 @@ use clarity::vm::types::QualifiedContractIdentifier; use stacks_common::types::chainstate::StacksBlockId; use crate::chainstate::stacks::index::ClarityMarfTrieId; +use clarity::vm::tests::test_clarity_versions; use clarity::vm::version::ClarityVersion; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::StacksEpochId; @@ -14,23 +15,6 @@ use rstest::rstest; #[cfg(test)] use rstest_reuse::{self, *}; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_edge_counting_runtime_template( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - fn dependency_edge_counting_runtime( iters: usize, version: ClarityVersion, @@ -78,7 +62,7 @@ fn dependency_edge_counting_runtime( cost_track.get_total().runtime } -#[apply(test_edge_counting_runtime_template)] +#[apply(test_clarity_versions)] fn test_edge_counting_runtime(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let ratio_4_8 = dependency_edge_counting_runtime(8, version, epoch) / dependency_edge_counting_runtime(4, version, epoch); diff --git a/src/clarity_vm/tests/contracts.rs b/src/clarity_vm/tests/contracts.rs index 293ddb32d8..7b8659ba14 100644 --- a/src/clarity_vm/tests/contracts.rs +++ b/src/clarity_vm/tests/contracts.rs @@ -38,8 +38,8 @@ use clarity::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use clarity::vm::execute as vm_execute; use clarity::vm::representations::SymbolicExpression; use clarity::vm::tests::{ - execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, - with_memory_environment, BurnStateDB, TEST_BURN_STATE_DB, TEST_HEADER_DB, + execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, BurnStateDB, + TEST_BURN_STATE_DB, TEST_HEADER_DB, }; use clarity::vm::types::{ OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, StandardPrincipalData, diff --git a/src/clarity_vm/tests/forking.rs b/src/clarity_vm/tests/forking.rs index 029e5654f6..3c18ebb1ef 100644 --- a/src/clarity_vm/tests/forking.rs +++ b/src/clarity_vm/tests/forking.rs @@ -34,27 +34,11 @@ use stacks_common::types::chainstate::BlockHeaderHash; use stacks_common::types::chainstate::StacksBlockId; use crate::clarity_vm::database::marf::MarfedKV; +use clarity::vm::tests::test_clarity_versions; const p1_str: &str = "'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn test_clarity_versions_type_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_forking_simple(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { with_separate_forks_environment( version, @@ -72,7 +56,7 @@ fn test_forking_simple(#[case] version: ClarityVersion, #[case] epoch: StacksEpo ); } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_at_block_mutations(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { // test how at-block works when a mutation has occurred fn initialize(owned_env: &mut OwnedEnvironment) { @@ -149,7 +133,7 @@ fn test_at_block_mutations(#[case] version: ClarityVersion, #[case] epoch: Stack ); } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_at_block_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { fn initialize(owned_env: &mut OwnedEnvironment) { let c = QualifiedContractIdentifier::local("contract").unwrap(); @@ -228,7 +212,7 @@ fn test_at_block_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpoc ); } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_at_block_missing_defines(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { fn initialize_1(owned_env: &mut OwnedEnvironment) { let c_a = QualifiedContractIdentifier::local("contract-a").unwrap(); diff --git a/src/clarity_vm/tests/large_contract.rs b/src/clarity_vm/tests/large_contract.rs index 3212b74330..71503d7881 100644 --- a/src/clarity_vm/tests/large_contract.rs +++ b/src/clarity_vm/tests/large_contract.rs @@ -46,8 +46,6 @@ use crate::clarity_vm::database::MemoryBackingStore; use clarity::vm::clarity::ClarityConnection; use clarity::vm::clarity::TransactionConnection; -use crate::vm::tests::with_memory_environment; - use clarity::vm::version::ClarityVersion; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; @@ -55,19 +53,7 @@ use stacks_common::types::StacksEpochId; use crate::chainstate::stacks::boot::{BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_3}; use crate::util_lib::boot::boot_code_id; - -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch22)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch23)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch24)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch24)] -fn clarity_version_template(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} +use clarity::vm::tests::test_clarity_versions; fn test_block_headers(n: u8) -> StacksBlockId { StacksBlockId([n as u8; 32]) @@ -110,8 +96,11 @@ const SIMPLE_TOKENS: &str = "(define-map tokens { account: principal } { balance (token-credit! 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G u200) (token-credit! .tokens u4))"; -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { + if epoch < StacksEpochId::Epoch2_05 { + return; + } let mut clarity = ClarityInstance::new(false, CHAIN_ID_TESTNET, MarfedKV::temporary()); let p1 = PrincipalData::from( PrincipalData::parse_standard_principal("SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR") @@ -438,7 +427,7 @@ where f(&mut owned_env, version) } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] fn test_simple_naming_system(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { with_versioned_memory_environment(inner_test_simple_naming_system, version, false); } @@ -672,7 +661,7 @@ fn inner_test_simple_naming_system(owned_env: &mut OwnedEnvironment, version: Cl * `(define-data-var var-x ...)` uses more than 1048576 bytes of memory. * this is mainly due to using hex encoding in the sqlite storage. */ -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn rollback_log_memory_test( #[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId, @@ -744,7 +733,7 @@ pub fn rollback_log_memory_test( } } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId) { let marf = MarfedKV::temporary(); let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); @@ -819,7 +808,7 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id } } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn argument_memory_test( #[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId, @@ -897,7 +886,7 @@ pub fn argument_memory_test( } } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId) { let marf = MarfedKV::temporary(); let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); @@ -1017,7 +1006,7 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ } } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId) { let marf = MarfedKV::temporary(); let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); From bfae0835bea5a8a7dca83dc1229c94dffc24fbcb Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 24 May 2023 09:35:25 -0500 Subject: [PATCH 153/158] chore: use explicit tmp path for testing marf. fixes fopen issues running multithreaded in some platforms --- src/clarity_vm/database/marf.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/clarity_vm/database/marf.rs b/src/clarity_vm/database/marf.rs index f9c7884138..a1bb96cb9f 100644 --- a/src/clarity_vm/database/marf.rs +++ b/src/clarity_vm/database/marf.rs @@ -1,4 +1,5 @@ use std::path::PathBuf; +use std::str::FromStr; use rusqlite::Connection; @@ -114,7 +115,7 @@ impl MarfedKV { use stacks_common::util::hash::to_hex; use std::env; - let mut path = env::temp_dir(); + let mut path = PathBuf::from_str("/tmp/stacks-node-tests/unit-tests-marf").unwrap(); let random_bytes = rand::thread_rng().gen::<[u8; 32]>(); path.push(to_hex(&random_bytes)); From 8a7a37122705f00cabab70b669ac89ffa90b1d0a Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 6 Jun 2023 13:43:41 -0700 Subject: [PATCH 154/158] Update github action CI workflow (#3199) * Updating Github Action - build all release images from binaries - create checksum of binary artifacts for release - adds clippy and crate advisory actions - update all dockerfiles to use ramdisk for building - separate actions to relevant files - adds arm64 binaries/docker image - update all debian builds to use bullseye (latest debian) * only run btc int tests on default branch * enable btc int test on develop * Feat/update ci (#38) * Updating Github Action - build all release images from binaries - create checksum of binary artifacts for release - adds clippy and crate advisory actions - update all dockerfiles to use ramdisk for building - separate actions to relevant files - adds arm64 binaries/docker image - update all debian builds to use bullseye (latest debian) * only run btc int tests on default branch * final action test disabled a lot of the long-running tests * Update ci.yml * Update ci.yml * Update bitcoin-tests.yml * Update bitcoin-tests.yml * run on push to master (merge) * Update Github Actions - build all release images from binaries - create checksum of binary artifacts for release - adds clippy and crate advisory actions - update all dockerfiles to use ramdisk for building - separate actions to relevant files - adds arm64 binaries/docker image - update all debian builds to use bullseye (latest debian) * adding build features to dockerfiles * update repo org to stacks-network missed a ref to wileyj forked repo * addressing comments in pr 3199 see https://github.com/stacks-network/stacks-blockchain/pull/3199 for changes requested * cleaning up docker tags prevent overwriting of docker image branch tags * disabling audit workflow disabling this workflow until we can test further * Adding a release file * Update to trigger logic Updating the logic of how/when builds and releases happen based on comments in PR. Updated the RELEASING.md file to reflect these changes * chore: delete circle.yml CircleCI hasn't been used in 11+ months. Fixes #3072 * switch repo&owner to var remove hardcoded value in favor or `GITHUB_REPOSITORY` * use local workflows * fix: don't assume that the bitcoin node always gives a non-zero number of headers * fix: add unit test and change log entry * fix: Exclude benchmarks from compilation * fix: Resolve conflicts and the remaining two errors * clean: remove benchmark files * fix: use explicit version number * minor update to reconcile diffs since some files were renamed, some minor changes had to be made manually here, i.e. fail_ci_if_error: false * hardcode some vals for testing * revert * use org/repo locations for jobs testing the locally built packages/docker images is successful, this reverts those changes need for testing the resulting artifacts * Moving file to docs dir * continue build if unit-tests fail use current settings from master branch * 3199 - minor updates for recent upstream changes --------- Co-authored-by: Diwaker Gupta <15990+diwakergupta@users.noreply.github.com> Co-authored-by: Jude Nelson Co-authored-by: Stjepan Golemac --- .../dockerfiles/Dockerfile.alpine-binary | 23 + .../dockerfiles/Dockerfile.debian-binary | 23 + .../dockerfiles/Dockerfile.debian-source | 24 + .github/workflows/audit.yml | 35 ++ .github/workflows/bitcoin-tests.yml | 61 ++- .github/workflows/build-source-binary.yml | 65 +++ .github/workflows/ci.yml | 472 ++++++------------ .github/workflows/clarity-js-sdk-pr.yml | 16 +- .github/workflows/clippy.yml | 44 ++ .github/workflows/docker-platforms.yml | 125 ----- .github/workflows/docs-pr.yml | 34 +- .github/workflows/github-release.yml | 58 +++ .../workflows/image-build-alpine-binary.yml | 81 +++ .../workflows/image-build-debian-binary.yml | 92 ++++ .../workflows/image-build-debian-source.yml | 90 ++++ .github/workflows/stacks-blockchain-tests.yml | 117 +++++ build-scripts/Dockerfile.linux-arm64 | 23 - build-scripts/Dockerfile.linux-armv7 | 23 - build-scripts/Dockerfile.linux-glibc-arm64 | 26 + build-scripts/Dockerfile.linux-glibc-x64 | 23 + build-scripts/Dockerfile.linux-musl-arm64 | 21 + build-scripts/Dockerfile.linux-musl-x64 | 24 +- build-scripts/Dockerfile.linux-x64 | 20 - build-scripts/Dockerfile.macos-arm64 | 27 +- build-scripts/Dockerfile.macos-x64 | 27 +- build-scripts/Dockerfile.windows-x64 | 17 +- build-scripts/build-dist.sh | 20 +- docs/ci-release.md | 150 ++++++ 28 files changed, 1173 insertions(+), 588 deletions(-) create mode 100644 .github/actions/dockerfiles/Dockerfile.alpine-binary create mode 100644 .github/actions/dockerfiles/Dockerfile.debian-binary create mode 100644 .github/actions/dockerfiles/Dockerfile.debian-source create mode 100644 .github/workflows/audit.yml create mode 100644 .github/workflows/build-source-binary.yml create mode 100644 .github/workflows/clippy.yml delete mode 100644 .github/workflows/docker-platforms.yml create mode 100644 .github/workflows/github-release.yml create mode 100644 .github/workflows/image-build-alpine-binary.yml create mode 100644 .github/workflows/image-build-debian-binary.yml create mode 100644 .github/workflows/image-build-debian-source.yml create mode 100644 .github/workflows/stacks-blockchain-tests.yml delete mode 100644 build-scripts/Dockerfile.linux-arm64 delete mode 100644 build-scripts/Dockerfile.linux-armv7 create mode 100644 build-scripts/Dockerfile.linux-glibc-arm64 create mode 100644 build-scripts/Dockerfile.linux-glibc-x64 create mode 100644 build-scripts/Dockerfile.linux-musl-arm64 delete mode 100644 build-scripts/Dockerfile.linux-x64 create mode 100644 docs/ci-release.md diff --git a/.github/actions/dockerfiles/Dockerfile.alpine-binary b/.github/actions/dockerfiles/Dockerfile.alpine-binary new file mode 100644 index 0000000000..8c450a67f3 --- /dev/null +++ b/.github/actions/dockerfiles/Dockerfile.alpine-binary @@ -0,0 +1,23 @@ +FROM --platform=${TARGETPLATFORM} alpine as builder +# Use a small image to download and extract the release archive + +ARG TAG +ARG BIN_ARCH +ARG TARGETPLATFORM +ARG BUILDPLATFORM +ARG TARGETARCH +ARG TARGETVARIANT +ARG REPO=stacks-network/stacks-blockchain + +RUN case ${TARGETARCH} in \ + "amd64") BIN_ARCH=linux-musl-x64 ;; \ + "arm64") BIN_ARCH=linux-musl-arm64 ;; \ + "*") exit 1 ;; \ + esac \ + && echo "wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip" \ + && wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip \ + && unzip ${BIN_ARCH}.zip -d /out + +FROM --platform=${TARGETPLATFORM} alpine +COPY --from=builder /out/stacks-node /bin/ +CMD ["stacks-node", "mainnet"] diff --git a/.github/actions/dockerfiles/Dockerfile.debian-binary b/.github/actions/dockerfiles/Dockerfile.debian-binary new file mode 100644 index 0000000000..cf1380361b --- /dev/null +++ b/.github/actions/dockerfiles/Dockerfile.debian-binary @@ -0,0 +1,23 @@ +FROM --platform=${TARGETPLATFORM} alpine as builder +# Use a small image to download and extract the release archive + +ARG TAG +ARG BIN_ARCH +ARG TARGETPLATFORM +ARG BUILDPLATFORM +ARG TARGETARCH +ARG TARGETVARIANT +ARG REPO=stacks-network/stacks-blockchain + +RUN case ${TARGETARCH} in \ + "amd64") BIN_ARCH=linux-musl-x64 ;; \ + "arm64") BIN_ARCH=linux-musl-arm64 ;; \ + "*") exit 1 ;; \ + esac \ + && echo "wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip" \ + && wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip \ + && unzip ${BIN_ARCH}.zip -d /out + +FROM --platform=${TARGETPLATFORM} debian:bullseye +COPY --from=builder /out/stacks-node /bin/ +CMD ["stacks-node", "mainnet"] diff --git a/.github/actions/dockerfiles/Dockerfile.debian-source b/.github/actions/dockerfiles/Dockerfile.debian-source new file mode 100644 index 0000000000..bbae34c2d5 --- /dev/null +++ b/.github/actions/dockerfiles/Dockerfile.debian-source @@ -0,0 +1,24 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=x86_64-unknown-linux-gnu +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM --platform=${TARGETPLATFORM} debian:bullseye +COPY --from=build /out/stacks-node /bin/ +CMD ["stacks-node", "mainnet"] diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml new file mode 100644 index 0000000000..c3864d5659 --- /dev/null +++ b/.github/workflows/audit.yml @@ -0,0 +1,35 @@ +## +## Performs an audit for crate advisories against cargo dependencies +## + +name: Security Audit + +# Only run when: +# - workflow is manually triggered +# - Cargo.toml/lock is changed +# - Daily at 0330 UTC +# Note: this will create issues for any crate advisories unless they already exist + +on: + workflow_dispatch: + push: + paths: + - "**/Cargo.toml" + - "**/Cargo.lock" + schedule: + - cron: 30 03 * * * + +jobs: + security_audit: + if: ${{ false }} + name: Crate Vulnerability Check + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Rust Dependency Check + id: rust_dep_check + uses: actions-rs/audit-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a6fc20a3a2..fee0d91639 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -1,9 +1,17 @@ -name: stacks-bitcoin-integration-tests +## +## Bitcoin Integration Tests +## + +name: Bitcoin Integration Tests # Only run when: -# - PRs are opened +# - PRs are (re)opened against master branch + on: pull_request: + types: + - opened + - reopened concurrency: group: stacks-bitcoin-integration-tests-${{ github.ref }} @@ -11,26 +19,35 @@ concurrency: cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: + # Create bitcoin image used for later tests build-integration-image: + name: Build Image runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 - name: Build bitcoin integration testing image + id: build_docker_image env: DOCKER_BUILDKIT: 1 - # Remove .dockerignore file so codecov has access to git info + # Remove .dockerignore file so codecov has access to git info and build the image run: | rm .dockerignore - docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.generic.bitcoin-tests -t stacks-node:integrations . + docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.generic.bitcoin-tests -t stacks-blockchain:integrations . - name: Export docker image as tarball - run: docker save -o integration-image.tar stacks-node:integrations + id: export_docker_image + run: docker save -o integration-image.tar stacks-blockchain:integrations - name: Upload built docker image - uses: actions/upload-artifact@v2 + id: upload_docker_image + uses: actions/upload-artifact@v3 with: name: integration-image.tar path: integration-image.tar + # Run integration tests using sampled genesis block sampled-genesis: + name: Sampled Genesis runs-on: ubuntu-latest needs: - build-integration-image @@ -106,26 +123,35 @@ jobs: - tests::epoch_24::fix_to_pox_contract - tests::epoch_24::verify_auto_unlock_behavior steps: - - uses: actions/checkout@v2 + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 - name: Download docker image - uses: actions/download-artifact@v2 + id: download_docker_image + uses: actions/download-artifact@v3 with: name: integration-image.tar - name: Load docker image + id: load_docker_image run: docker load -i integration-image.tar && rm integration-image.tar - name: All integration tests with sampled genesis + id: bitcoin_integration_tests timeout-minutes: 30 env: DOCKER_BUILDKIT: 1 TEST_NAME: ${{ matrix.test-name }} run: docker build -o coverage-output --build-arg test_name=${{ matrix.test-name }} -f ./.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests . - - uses: codecov/codecov-action@v2 + - name: Code Coverage + id: code_coverage + uses: codecov/codecov-action@v3 with: files: ./coverage-output/lcov.info name: ${{ matrix.test-name }} fail_ci_if_error: false + + # Run atlas integration tests atlas-test: - if: ${{ true }} + name: Atlas Test runs-on: ubuntu-latest needs: - build-integration-image @@ -136,20 +162,27 @@ jobs: - tests::neon_integrations::atlas_integration_test - tests::neon_integrations::atlas_stress_integration_test steps: - - uses: actions/checkout@v2 + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 - name: Download docker image - uses: actions/download-artifact@v2 + id: download_docker_image + uses: actions/download-artifact@v3 with: name: integration-image.tar - name: Load docker image + id: load_docker_image run: docker load -i integration-image.tar && rm integration-image.tar - name: Atlas integration tests + id: atlas_integration_tests timeout-minutes: 40 env: DOCKER_BUILDKIT: 1 TEST_NAME: ${{ matrix.test-name }} run: docker build -o coverage-output --build-arg test_name=${{ matrix.test-name }} -f ./.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests . - - uses: codecov/codecov-action@v2 + - name: Code Coverage + id: code_coverage + uses: codecov/codecov-action@v3 with: files: ./coverage-output/lcov.info name: ${{ matrix.test-name }} diff --git a/.github/workflows/build-source-binary.yml b/.github/workflows/build-source-binary.yml new file mode 100644 index 0000000000..284171d672 --- /dev/null +++ b/.github/workflows/build-source-binary.yml @@ -0,0 +1,65 @@ +## +## Builds binary assets of stacks-blockchain and creates a named tag github (draft) release +## + +name: Build Distributable Assets + +# Only run when: +# - manually triggered via the ci.yml workflow with a provided input tag + +on: + workflow_call: + inputs: + tag: + description: "Tag name of this release (x.y.z)" + required: true + type: string + parallel_jobs: + description: "Number of parallel binary builds" + required: false + type: number + default: 4 + arch: + description: "Stringified JSON object listing of platform matrix" + required: true + type: string + +jobs: + artifact: + if: ${{ inputs.tag != '' }} + name: Create Artifacts + runs-on: ubuntu-latest + strategy: + max-parallel: ${{ inputs.parallel_jobs }} + matrix: + platform: ${{ fromJson(inputs.arch) }} + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Set Vars + id: set_vars + run: | + echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up Docker Buildx + id: setup_buildx + uses: docker/setup-buildx-action@v2 + - name: Build Binaries + id: build_binaries + uses: docker/build-push-action@v3 + with: + file: build-scripts/Dockerfile.${{ matrix.platform }} + outputs: type=local,dest=./release/${{ matrix.platform }} + build-args: | + OS_ARCH=${{ matrix.platform }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + - name: Compress artifact + id: compress_artifact + run: zip --junk-paths ${{ matrix.platform }} ./release/${{ matrix.platform }}/* + - name: Upload artifact + id: upload_artifact + uses: actions/upload-artifact@v3 + with: + path: ${{ matrix.platform }}.zip diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0ec84fe2e0..5b72c9faf3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,10 +1,14 @@ name: CI -# Only run when: -# - PRs are opened against the master branch -# - the workflow is started from the UI (an optional tag can be passed in via parameter) -# - If the optional tag parameter is passed in, a new tag will be generated based off the selected branch +## Only run when: +## - manually triggered +## - PR's are (re)opened +## - push to master (i.e. merge develop -> master) + on: + push: + branches: + - master pull_request: workflow_dispatch: inputs: @@ -13,320 +17,164 @@ on: required: false concurrency: - group: stacks-blockchain-${{ github.ref }} - # Only cancel in progress if this is for a PR - cancel-in-progress: ${{ github.event_name == 'pull_request' }} + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true jobs: - # Run full genesis test - full-genesis: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Single full genesis integration test - env: - DOCKER_BUILDKIT: 1 - # Remove .dockerignore file so codecov has access to git info - run: | - rm .dockerignore - docker build -o coverage-output -f ./.github/actions/bitcoin-int-tests/Dockerfile.large-genesis . - - uses: codecov/codecov-action@v2 - with: - files: ./coverage-output/lcov.info - name: large_genesis - fail_ci_if_error: false - - # Run unit tests with code coverage - unit-tests: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Run units tests (with coverage) - env: - DOCKER_BUILDKIT: 1 - # Remove .dockerignore file so codecov has access to git info - run: | - rm .dockerignore - docker build -o coverage-output -f ./.github/actions/bitcoin-int-tests/Dockerfile.code-cov . - - uses: codecov/codecov-action@v2 - with: - files: ./coverage-output/lcov.info - name: unit_tests - fail_ci_if_error: false - - open-api-validation: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Run units tests (with coverage) - env: - DOCKER_BUILDKIT: 1 - run: docker build -o dist/ -f .github/actions/open-api/Dockerfile.open-api-validate . - - name: Upload bundled html - uses: actions/upload-artifact@v2 - with: - name: open-api-bundle - path: | - dist - # Run net-tests - nettest: - # disable this job/test for now, since we haven't seen this pass - # on github actions in a while, and the failures can take > 4 hours - if: ${{ false }} - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Run network relay tests - env: - DOCKER_BUILDKIT: 1 - run: docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.net-tests . - - core-contracts-clarinet-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: "Execute core contract unit tests in Clarinet" - uses: docker://hirosystems/clarinet:1.1.0 - with: - args: test --coverage --manifest-path=./contrib/core-contract-tests/Clarinet.toml - - name: "Export code coverage" - uses: codecov/codecov-action@v1 - with: - files: ./coverage.lcov - verbose: true - fail_ci_if_error: false - - # rustfmt checking + ## rust format: Execute on every run rustfmt: + name: Rust Format runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Run rustfmt check - env: - DOCKER_BUILDKIT: 1 - run: docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.rustfmt . - - # Create distributions - dist: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Define Rust Toolchain + id: define_rust_toolchain + run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV + - name: Setup Rust Toolchain + id: setup_rust_toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + components: rustfmt + - name: Rustfmt + id: rustfmt + uses: actions-rust-lang/rustfmt@v1 + + ## Release tests: Execute on every run + release-tests: + name: Release Tests + uses: stacks-network/stacks-blockchain/.github/workflows/stacks-blockchain-tests.yml@master + + ## Checked for leaked credentials: Execute on every run + leaked-cred-test: + name: Leaked Credential Test runs-on: ubuntu-latest - strategy: - matrix: - platform: - [ - # disable mac builds until osxcross can be updated for bullseye - windows-x64, - # macos-x64, - # macos-arm64, - linux-x64, - linux-musl-x64, - linux-armv7, - linux-arm64, - ] - steps: - - uses: actions/checkout@v2 - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Build distributable - uses: docker/build-push-action@v2 - with: - file: build-scripts/Dockerfile.${{ matrix.platform }} - outputs: dist/${{ matrix.platform }} - build-args: | - STACKS_NODE_VERSION=${{ github.event.inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - - - name: Compress artifact - run: zip --junk-paths ${{ matrix.platform }} ./dist/${{ matrix.platform }}/* - - - name: Upload artifact - uses: actions/upload-artifact@v2 - with: - name: ${{ matrix.platform }} - path: ${{ matrix.platform }}.zip - -# call-docker-platforms-workflow: -# if: ${{ github.event.inputs.tag != '' }} -# uses: stacks-network/stacks-blockchain/.github/workflows/docker-platforms.yml@master -# with: -# tag: ${{ github.event.inputs.tag }} -# secrets: -# DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} -# DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} - - # Build docker image, tag it with the git tag and `latest` if running on master branch, and publish under the following conditions - # Will publish if: - # - a tag was passed into this workflow - # - a tag was pushed up - # - this workflow was invoked against a non-master branch (a Docker image tag with the name of the branch will be published) - build-publish: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: | - blockstack/${{ github.event.repository.name }} - tags: | - type=ref,event=branch - type=ref,event=pr - ${{ github.event.inputs.tag }} - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build/Tag/Push Image - uses: docker/build-push-action@v2 - with: - platforms: linux/amd64 - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - STACKS_NODE_VERSION=${{ github.event.inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - # Only push if (a tag was passed in) or (we're building a non-master branch which isn't a PR) - push: ${{ github.event.inputs.tag != '' || (github.ref != 'refs/heads/master' && !contains(github.ref, 'refs/pull')) }} - - # Build docker image, tag it with the git tag and `latest` if running on master branch, and publish under the following conditions - # Will publish if: - # - a tag was passed into this workflow - # - a tag was pushed up - # - this workflow was invoked against a non-master branch (a Docker image tag with the name of the branch will be published) - build-publish-stretch: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - # Stretch tag will be "-stretch" if a tag was passed in, otherwise "-stretch". If the BRANCH is master, will result in "latest-stretch" - # Also determines platforms to be build in docker step - - name: Determine Stretch Tag - run: | - if [[ -z ${TAG} ]]; then - REF=$(echo ${GITHUB_REF#refs/*/} | tr / -) - if [[ "${REF}" == "master" ]]; then - echo "STRETCH_TAG=latest-stretch" >> $GITHUB_ENV - else - echo "STRETCH_TAG=${REF}-stretch" >> $GITHUB_ENV - fi - else - echo "STRETCH_TAG=${TAG}-stretch" >> $GITHUB_ENV - fi - env: - TAG: ${{ github.event.inputs.tag }} - - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: | - blockstack/${{ github.event.repository.name }} - tags: | - type=ref,event=branch - type=ref,event=pr - ${{ env.STRETCH_TAG }} - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build/Tag/Push Image - uses: docker/build-push-action@v2 - with: - platforms: linux/amd64 - file: Dockerfile.debian - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - STACKS_NODE_VERSION=${{ github.event.inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - # Only push if (a tag was passed in) or (we're building a non-master branch which isn't a PR) - push: ${{ github.event.inputs.tag != '' || (github.ref != 'refs/heads/master' && !contains(github.ref, 'refs/pull')) }} - - # Create a new release if we're building a tag - create-release: - runs-on: ubuntu-latest - if: ${{ github.event.inputs.tag != '' }} - outputs: - upload_url: ${{ steps.create_release.outputs.upload_url }} + - name: Extract branch name + id: extract_branch + if: ${{ github.event_name != 'pull_request' }} + run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + - name: Extract branch name + id: extract_branch_pr + if: ${{ github.event_name == 'pull_request' }} + run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF})" >> $GITHUB_ENV + - name: Branch name + run: echo running on branch ${{ env.BRANCH_NAME }} + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: TruffleHog Scan + id: trufflehog_check + uses: trufflesecurity/trufflehog@main + with: + path: ./ + base: ${{ env.BRANCH_NAME }} + head: HEAD + + ############################################### + ## Build Tagged Release + ############################################### + ## Build source binaries + ## Only run if: + ## - Tag is provided + ## - OR + ## - Not the default branch + ## - AND + ## - Not a PR + build-source: + if: ${{ inputs.tag != '' || (github.ref != format('refs/heads/{0}', github.event.repository.default_branch) && !contains(github.ref, 'refs/pull')) }} + name: Build Binaries + uses: stacks-network/stacks-blockchain/.github/workflows/build-source-binary.yml@master needs: - - dist - - build-publish - - build-publish-stretch - - steps: - - name: Create Release - id: create_release - uses: actions/create-release@v1 - env: - # Use custom secrets.GH_TOKEN instead of default secrets.GITHUB_TOKEN because the custom token will trigger the - # clarity-js-sdk-pr workflow. As events caused by default tokens do not trigger subsequent workflow runs to avoid loops. - GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - with: - tag_name: ${{ github.event.inputs.tag || github.ref }} - release_name: Release ${{ github.event.inputs.tag || github.ref }} - draft: false - prerelease: true - - # Upload distributables to a new release if we're building a tag or a tag was passed in - upload-dist: - runs-on: ubuntu-latest - if: ${{ github.event.inputs.tag != '' }} + - rustfmt + - release-tests + - leaked-cred-test + with: + tag: ${{ inputs.tag }} + parallel_jobs: 4 + arch: >- + ["linux-glibc-x64", "linux-musl-x64", "linux-glibc-arm64", "linux-musl-arm64", "macos-x64", "macos-arm64", "windows-x64"] + + ## Create github release with binary archives + ## Only run if: + ## - Tag is provided + ## - OR + ## - Not the default branch + ## - AND + ## - Not a PR + github-release: + if: ${{ inputs.tag != '' || (github.ref != format('refs/heads/{0}', github.event.repository.default_branch) && !contains(github.ref, 'refs/pull')) }} + name: Github Release + uses: stacks-network/stacks-blockchain/.github/workflows/github-release.yml@master + needs: build-source + with: + tag: ${{ inputs.tag }} + arch: >- + ["linux-glibc-x64", "linux-musl-x64", "linux-glibc-arm64", "linux-musl-arm64", "macos-x64", "macos-arm64", "windows-x64"] + secrets: + GH_TOKEN: ${{ secrets.GH_TOKEN }} + + ## Create docker alpine images + ## Only run if: + ## - Tag is provided + ## - OR + ## - Not the default branch + ## - AND + ## - Not a PR + docker-alpine: + if: ${{ inputs.tag != '' || (github.ref != format('refs/heads/{0}', github.event.repository.default_branch) && !contains(github.ref, 'refs/pull')) }} + name: Docker Alpine (Binary) + uses: stacks-network/stacks-blockchain/.github/workflows/image-build-alpine-binary.yml@master + needs: github-release + with: + tag: ${{ inputs.tag }} + docker_platforms: linux/arm64, linux/amd64, linux/amd64/v2, linux/amd64/v3 + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + + ## Create docker debian images + ## Only run if: + ## - Tag is provided + ## - OR + ## - Not the default branch + ## - AND + ## - Not a PR + docker-debian: + if: ${{ inputs.tag != '' || (github.ref != format('refs/heads/{0}', github.event.repository.default_branch) && !contains(github.ref, 'refs/pull')) }} + name: Docker Debian (Binary) + uses: stacks-network/stacks-blockchain/.github/workflows/image-build-debian-binary.yml@master + needs: github-release + with: + tag: ${{ inputs.tag }} + docker_platforms: linux/amd64, linux/amd64/v2, linux/amd64/v3 + linux_version: debian + build_type: binary + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + + ############################################### + ## Build Branch/PR + ############################################### + ## Create docker debian images + ## Only run if: + ## - Tag is *not* provided + build-branch: + if: ${{ inputs.tag == '' }} + name: Docker Debian (Source) + uses: stacks-network/stacks-blockchain/.github/workflows/image-build-debian-source.yml@master needs: - - create-release - strategy: - matrix: - platform: - [ - # disable mac builds until osxcross can be updated for bullseye - windows-x64, -# macos-x64, -# macos-arm64, - linux-x64, - linux-musl-x64, - linux-armv7, - linux-arm64, - ] - - steps: - - uses: actions/checkout@v2 - - name: Download distro - uses: actions/download-artifact@v2 - with: - name: ${{ matrix.platform }} - path: dist/ - - - name: Upload Release Asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.create-release.outputs.upload_url }} - asset_path: ./dist/${{ matrix.platform }}.zip - asset_name: ${{ matrix.platform }}.zip - asset_content_type: application/zip + - rustfmt + - leaked-cred-test + with: + docker_platforms: linux/amd64 + linux_version: debian + build_type: source + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} diff --git a/.github/workflows/clarity-js-sdk-pr.yml b/.github/workflows/clarity-js-sdk-pr.yml index fd28738cf1..9ac0956a85 100644 --- a/.github/workflows/clarity-js-sdk-pr.yml +++ b/.github/workflows/clarity-js-sdk-pr.yml @@ -5,7 +5,7 @@ name: Open Clarity JS SDK PR env: - CLARITY_JS_SDK_REPOSITORY: blockstack/clarity-js-sdk + CLARITY_JS_SDK_REPOSITORY: stacks-network/clarity-js-sdk COMMIT_USER: Hiro DevOps COMMIT_EMAIL: 45208873+blockstack-devops@users.noreply.github.com on: @@ -16,28 +16,30 @@ on: jobs: run: + name: Open Clarity JS SDK PR runs-on: ubuntu-latest # This condition can be removed once the main `stacks-blockchain` workflow creates pre-releases # when appropriate, instead of full releases for every tag passed in. if: "!contains(github.ref, '-rc')" steps: - name: Checkout latest clarity js sdk - uses: actions/checkout@v2 + id: git_checkout + uses: actions/checkout@v3 with: token: ${{ secrets.GH_TOKEN }} repository: ${{ env.CLARITY_JS_SDK_REPOSITORY }} ref: master - - name: Determine Release Version + id: get_release_version run: | RELEASE_VERSION=$(echo ${GITHUB_REF#refs/*/} | tr / -) echo "RELEASE_VERSION=$RELEASE_VERSION" >> $GITHUB_ENV - - name: Update SDK Tag + id: update_sdk_tag run: sed -i "s@CORE_SDK_TAG = \".*\"@CORE_SDK_TAG = \"$RELEASE_VERSION\"@g" packages/clarity-native-bin/src/index.ts - - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + id: create_pr + uses: peter-evans/create-pull-request@v4 with: token: ${{ secrets.GH_TOKEN }} commit-message: "chore: update clarity-native-bin tag" @@ -49,7 +51,7 @@ jobs: labels: | dependencies body: | - :robot: This is an automated pull request created from a new release in [stacks-blockchain](https://github.com/blockstack/stacks-blockchain/releases). + :robot: This is an automated pull request created from a new release in [stacks-blockchain](https://github.com/stacks-network/stacks-blockchain/releases). Updates the clarity-native-bin tag. assignees: zone117x diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml new file mode 100644 index 0000000000..1e6872bd69 --- /dev/null +++ b/.github/workflows/clippy.yml @@ -0,0 +1,44 @@ +# Disabled - this workflow needs more work so it's not incredibly chatty +## +## Perform Clippy checks - currently set to defaults +## https://github.com/rust-lang/rust-clippy#usage +## https://rust-lang.github.io/rust-clippy/master/index.html +## + +name: Clippy Checks + +# Only run when: +# - PRs are (re)opened against develop branch + +on: + pull_request: + branches: + - develop + types: + - opened + - reopened + +jobs: + clippy_check: + if: ${{ false }} + name: Clippy Check + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Define Rust Toolchain + id: define_rust_toolchain + run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV + - name: Setup Rust Toolchain + id: setup_rust_toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + components: clippy + - name: Clippy + id: clippy + uses: actions-rs/clippy-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + args: --all-features diff --git a/.github/workflows/docker-platforms.yml b/.github/workflows/docker-platforms.yml deleted file mode 100644 index d8d7efa4c3..0000000000 --- a/.github/workflows/docker-platforms.yml +++ /dev/null @@ -1,125 +0,0 @@ -name: Build/Release Additional Docker Platform Images - -# Only run when: -# - the workflow is automatically triggered during a release with the relevant tag -# - the workflow is started from the UI with a tag -on: - workflow_call: - inputs: - tag: - required: true - type: string - secrets: - DOCKERHUB_USERNAME: - required: true - DOCKERHUB_PASSWORD: - required: true - -env: - BUILD_PLATFORMS: linux/arm64 - -jobs: - # Build docker image, tag it with the branch and docker image tag passed in, and publish - build-publish: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: | - blockstack/${{ github.event.repository.name }} - tags: | - type=ref,event=branch - ${{ inputs.tag }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build/Tag/Push Image - uses: docker/build-push-action@v2 - with: - platforms: ${{ env.BUILD_PLATFORMS }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - push: true - - # Build docker image, tag it with the branch and docker image tag passed in, and publish - build-publish-stretch: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Determine Stretch Tag and build platforms - run: | - if [[ -z ${TAG} ]]; then - REF=$(echo ${GITHUB_REF#refs/*/} | tr / -) - if [[ "${REF}" == "master" ]]; then - echo "STRETCH_TAG=latest-stretch" >> $GITHUB_ENV - else - echo "STRETCH_TAG=${REF}-stretch" >> $GITHUB_ENV - fi - else - echo "STRETCH_TAG=${TAG}-stretch" >> $GITHUB_ENV - fi - env: - TAG: ${{ inputs.tag }} - - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: | - blockstack/${{ github.event.repository.name }} - tags: | - type=ref,event=branch - ${{ env.STRETCH_TAG }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build/Tag/Push Image - uses: docker/build-push-action@v2 - with: - platforms: ${{ env.BUILD_PLATFORMS }} - file: Dockerfile.debian - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - push: true diff --git a/.github/workflows/docs-pr.yml b/.github/workflows/docs-pr.yml index ad293191b9..b2a44f7296 100644 --- a/.github/workflows/docs-pr.yml +++ b/.github/workflows/docs-pr.yml @@ -1,5 +1,5 @@ ## -## Github workflow for auto-opening a PR on the docs.blockstack repo +## Github workflow for auto-opening a PR on the stacks-network/docs repo ## whenever the auto-generated documentation here changes. ## ## It does this using a robot account `kantai-robot` to create a @@ -12,37 +12,44 @@ name: Open Docs PR env: ROBOT_OWNER: kantai-robot ROBOT_REPO: docs.blockstack - TARGET_OWNER: blockstack - TARGET_REPO: docs.blockstack - TARGET_REPOSITORY: blockstack/docs.blockstack + TARGET_OWNER: stacks-network + TARGET_REPO: docs + TARGET_REPOSITORY: stacks-network/docs + +# Only run when: +# - push to master + on: push: - branches: [master] + branches: + - master jobs: dist: + name: Open Docs PR runs-on: ubuntu-latest env: ROBOT_BRANCH: ${{ format('auto/clarity-ref-{0}', github.sha) }} steps: - - uses: actions/checkout@v2 - + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 - name: Build docs + id: build_docs env: DOCKER_BUILDKIT: 1 run: rm -rf docs-output && docker build -o docs-output -f ./.github/actions/docsgen/Dockerfile.docsgen . - - name: Checkout latest docs - uses: actions/checkout@v2 + id: git_checkout_docs + uses: actions/checkout@v3 with: token: ${{ secrets.DOCS_GITHUB_TOKEN }} repository: ${{ env.TARGET_REPOSITORY }} - path: docs.blockstack - + path: docs - name: Branch and commit id: push run: | - cd docs.blockstack + cd docs git config user.email "kantai+robot@gmail.com" git config user.name "PR Robot" git fetch --unshallow @@ -61,8 +68,9 @@ jobs: echo "::set-output name=open_pr::1" fi - name: Open PR + id: open_pr if: ${{ steps.push.outputs.open_pr == '1' }} - uses: actions/github-script@v2 + uses: actions/github-script@v6 with: github-token: ${{ secrets.DOCS_GITHUB_TOKEN }} script: | diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml new file mode 100644 index 0000000000..c0683f51df --- /dev/null +++ b/.github/workflows/github-release.yml @@ -0,0 +1,58 @@ +## +## Create the github release and store artifact files (with checksum) +## + +name: Github Release + +# Only run when: +# - manually triggered via the ci.yml workflow + +on: + workflow_call: + inputs: + tag: + required: true + type: string + arch: + description: "Stringified JSON object listing of platform matrix" + required: true + type: string + secrets: + GH_TOKEN: + required: true + +jobs: + create-release: + if: ${{ inputs.tag != '' }} + name: Create Release + runs-on: ubuntu-latest + steps: + - name: Download Artifacts + id: download_artifacts + uses: actions/download-artifact@v3 + with: + name: artifact + path: release + # Generate a checksums file to be added to the release page + - name: Generate Checksums + id: generate_checksum + uses: jmgilman/actions-generate-checksum@v1 + with: + output: CHECKSUMS.txt + patterns: | + release/*.zip + # Upload the release archives with the checksums file + - name: Upload Release + id: upload_release + uses: softprops/action-gh-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + with: + name: Release ${{ github.event.inputs.tag || github.ref }} + tag_name: ${{ github.event.inputs.tag || github.ref }} + draft: false + prerelease: true + fail_on_unmatched_files: true + files: | + release/*.zip + CHECKSUMS.txt diff --git a/.github/workflows/image-build-alpine-binary.yml b/.github/workflows/image-build-alpine-binary.yml new file mode 100644 index 0000000000..f18bac51ad --- /dev/null +++ b/.github/workflows/image-build-alpine-binary.yml @@ -0,0 +1,81 @@ +## +## Build the Docker Alpine image from the pre-built downloaded binary asset +## + +name: Build Alpine Binary Image + +# Only run when: +# - manually triggered via the ci.yml workflow + +on: + workflow_call: + inputs: + tag: + required: true + type: string + description: "semver tag for alpine images" + docker_platforms: + required: true + description: "Arch to buid alpine images" + type: string + secrets: + DOCKERHUB_USERNAME: + required: true + DOCKERHUB_PASSWORD: + required: true + +jobs: + image: + # Only run if a tag is provided manually + if: ${{ inputs.tag != '' }} + name: Build Image + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Set Vars + id: set_vars + run: | + echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up QEMU + id: docker_qemu + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + id: docker_buildx + uses: docker/setup-buildx-action@v2 + # tag image with: + # latest: `latest` + # input tag: `` + # git tag: `1234` + - name: Docker Metadata + id: docker_metadata + uses: docker/metadata-action@v4 + with: + images: | + ${{ github.repository }} + tags: | + type=raw,value=latest,enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) )}} + type=raw,value=${{ inputs.tag }},enable=${{ inputs.tag != '' }} + type=ref,event=tag,enable=true + - name: Login to DockerHub + id: docker_login + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Build and Push + id: docker_build + uses: docker/build-push-action@v3 + with: + file: ./.github/actions/dockerfiles/Dockerfile.alpine-binary + platforms: ${{ inputs.docker_platforms }} + tags: ${{ steps.docker_metadata.outputs.tags }} + labels: ${{ steps.docker_metadata.outputs.labels }} + build-args: | + TAG=${{ inputs.tag}} + STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + push: true diff --git a/.github/workflows/image-build-debian-binary.yml b/.github/workflows/image-build-debian-binary.yml new file mode 100644 index 0000000000..38947f1c53 --- /dev/null +++ b/.github/workflows/image-build-debian-binary.yml @@ -0,0 +1,92 @@ +## +## Build the Docker Debian image from the pre-built downloaded binary asset +## + +name: Build Linux Binary Image + +# Only run when: +# - manually triggered via the ci.yml workflow + +on: + workflow_call: + inputs: + tag: + required: true + type: string + description: "semver tag for linux images" + docker_platforms: + required: true + description: "Arch to buid linux images" + type: string + linux_version: + required: true + description: "Linux image to build" + type: string + default: debian + build_type: + required: true + description: Build type (source/binary) + type: string + default: binary + secrets: + DOCKERHUB_USERNAME: + required: true + DOCKERHUB_PASSWORD: + required: true + +jobs: + image: + # Only run if a tag is provided manually + if: ${{ inputs.tag != '' }} + name: Build Image + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Set Vars + id: set_vars + run: | + echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up QEMU + id: docker_qemu + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + id: docker_buildx + uses: docker/setup-buildx-action@v2 + - name: Extract branch name + id: extract_branch + run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + # tag image with: + # branch name: `latest-` + # input tag: `-` + - name: Docker Metadata + id: docker_metadata + uses: docker/metadata-action@v4 + with: + images: | + ${{ github.repository }} + tags: | + type=raw,value=latest-${{ inputs.linux_version }},enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) )}} + type=raw,value=${{ inputs.tag }}-${{ inputs.linux_version }},enable=${{ inputs.tag != '' }} + - name: Login to DockerHub + id: docker_login + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Build and Push + id: docker_build + uses: docker/build-push-action@v3 + with: + file: ./.github/actions/dockerfiles/Dockerfile.${{ inputs.linux_version }}-${{ inputs.build_type }} + platforms: ${{ inputs.docker_platforms }} + tags: ${{ steps.docker_metadata.outputs.tags }} + labels: ${{ steps.docker_metadata.outputs.labels }} + build-args: | + TAG=${{ inputs.tag}} + STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + push: true diff --git a/.github/workflows/image-build-debian-source.yml b/.github/workflows/image-build-debian-source.yml new file mode 100644 index 0000000000..de3615a3e0 --- /dev/null +++ b/.github/workflows/image-build-debian-source.yml @@ -0,0 +1,90 @@ +## +## Build the Docker Debian image from source +## + +name: Build Linux Source Image + +# Only run when: +# - workflow is manually triggered +# - manually triggered via the ci.yml workflow + +on: + workflow_dispatch: + workflow_call: + inputs: + docker_platforms: + required: true + description: "Arch to buid images" + type: string + default: linux/amd64 + linux_version: + required: true + description: "Linux image to build" + type: string + default: debian + build_type: + required: true + description: Build type (source/binary) + type: string + default: source + secrets: + DOCKERHUB_USERNAME: + required: true + DOCKERHUB_PASSWORD: + required: true + +jobs: + image: + name: Build Image + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Set Vars + id: set_vars + run: | + echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up QEMU + id: docker_qemu + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + id: docker_buildx + uses: docker/setup-buildx-action@v2 + - name: Extract branch name + id: extract_branch + if: ${{ github.event_name != 'pull_request' }} + run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + - name: Extract branch name (PR) + id: extract_branch_pr + if: ${{ github.event_name == 'pull_request' }} + run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF})" >> $GITHUB_ENV + - name: Docker Metadata + id: docker_metadata + uses: docker/metadata-action@v4 + with: + images: | + ${{ github.repository }} + tags: | + type=raw,value=${{ env.BRANCH_NAME }} + type=ref,event=pr + - name: Login to DockerHub + id: docker_login + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Build and Push + id: docker_build + uses: docker/build-push-action@v3 + with: + file: ./.github/actions/dockerfiles/Dockerfile.${{ inputs.linux_version }}-${{ inputs.build_type }} + platforms: ${{ inputs.docker_platforms }} + tags: ${{ steps.docker_metadata.outputs.tags }} + labels: ${{ steps.docker_metadata.outputs.labels }} + build-args: | + STACKS_NODE_VERSION=${{ env.GITHUB_SHA_SHORT }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + push: true diff --git a/.github/workflows/stacks-blockchain-tests.yml b/.github/workflows/stacks-blockchain-tests.yml new file mode 100644 index 0000000000..fb1dffc1ae --- /dev/null +++ b/.github/workflows/stacks-blockchain-tests.yml @@ -0,0 +1,117 @@ +## +## Run tests for tagged releases +## + +name: Tests + +# Only run when: +# - manually triggered via the ci.yml workflow + +on: + workflow_call: + +jobs: + # Run full genesis test + full-genesis: + name: Full Genesis Test + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Single full genesis integration test + id: full_genesis_test + env: + DOCKER_BUILDKIT: 1 + # Remove .dockerignore file so codecov has access to git info + run: | + rm .dockerignore + docker build -o coverage-output -f ./.github/actions/bitcoin-int-tests/Dockerfile.large-genesis . + - name: Large Genesis Codecov + id: full_genesis_codecov + uses: codecov/codecov-action@v3 + with: + files: ./coverage-output/lcov.info + name: large_genesis + fail_ci_if_error: false + + # Run unit tests with code coverage + unit-tests: + name: Unit Tests + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Run unit tests (with coverage) + id: unit_tests_codecov + env: + DOCKER_BUILDKIT: 1 + # Remove .dockerignore file so codecov has access to git info + run: | + rm .dockerignore + docker build -o coverage-output -f ./.github/actions/bitcoin-int-tests/Dockerfile.code-cov . + - name: Run unit tests + id: codedov + uses: codecov/codecov-action@v3 + with: + files: ./coverage-output/lcov.info + name: unit_tests + fail_ci_if_error: false + + open-api-validation: + name: OpenAPI Validation + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Run units tests (with coverage) + id: api_codecov + env: + DOCKER_BUILDKIT: 1 + run: docker build -o dist/ -f .github/actions/open-api/Dockerfile.open-api-validate . + - name: Upload bundled html + id: upload_html_artifact + uses: actions/upload-artifact@v3 + with: + name: open-api-bundle + path: | + dist + + # Run net-tests + nettest: + # disable this job/test for now, since we haven't seen this pass + # on github actions in a while, and the failures can take > 4 hours + if: ${{ false }} + name: Net-Test + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Run network relay tests + id: nettest + env: + DOCKER_BUILDKIT: 1 + run: docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.net-tests . + + # Core contract tests + core-contracts-clarinet-test: + name: Core Contracts Test + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Execute core contract unit tests in Clarinet + id: clarinet_unit_test + uses: docker://hirosystems/clarinet:1.1.0 + with: + args: test --coverage --manifest-path=./contrib/core-contract-tests/Clarinet.toml + - name: Export code coverage + id: clarinet_codecov + uses: codecov/codecov-action@v3 + with: + files: ./coverage.lcov + verbose: true diff --git a/build-scripts/Dockerfile.linux-arm64 b/build-scripts/Dockerfile.linux-arm64 deleted file mode 100644 index 072fee58b7..0000000000 --- a/build-scripts/Dockerfile.linux-arm64 +++ /dev/null @@ -1,23 +0,0 @@ -FROM rust:bullseye as build - -ARG STACKS_NODE_VERSION="No Version Info" -ARG GIT_BRANCH='No Branch Info' -ARG GIT_COMMIT='No Commit Info' - -WORKDIR /src - -COPY . . - -RUN rustup target add aarch64-unknown-linux-gnu - -RUN apt-get update && apt-get install -y git gcc-aarch64-linux-gnu - -RUN CC=aarch64-linux-gnu-gcc \ - CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc \ - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ - cargo build --release --workspace --target aarch64-unknown-linux-gnu - -RUN mkdir /out && cp -R /src/target/aarch64-unknown-linux-gnu/release/. /out - -FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.linux-armv7 b/build-scripts/Dockerfile.linux-armv7 deleted file mode 100644 index d871c03dc0..0000000000 --- a/build-scripts/Dockerfile.linux-armv7 +++ /dev/null @@ -1,23 +0,0 @@ -FROM rust:bullseye as build - -ARG STACKS_NODE_VERSION="No Version Info" -ARG GIT_BRANCH='No Branch Info' -ARG GIT_COMMIT='No Commit Info' - -WORKDIR /src - -COPY . . - -RUN rustup target add armv7-unknown-linux-gnueabihf - -RUN apt-get update && apt-get install -y git gcc-arm-linux-gnueabihf - -RUN CC=arm-linux-gnueabihf-gcc \ - CC_armv7_unknown_linux_gnueabihf=arm-linux-gnueabihf-gcc \ - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ - cargo build --release --workspace --target armv7-unknown-linux-gnueabihf - -RUN mkdir /out && cp -R /src/target/armv7-unknown-linux-gnueabihf/release/. /out - -FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.linux-glibc-arm64 b/build-scripts/Dockerfile.linux-glibc-arm64 new file mode 100644 index 0000000000..7ce50b6a68 --- /dev/null +++ b/build-scripts/Dockerfile.linux-glibc-arm64 @@ -0,0 +1,26 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=aarch64-unknown-linux-gnu +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git gcc-aarch64-linux-gnu + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && CC=aarch64-linux-gnu-gcc \ + CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc \ + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-glibc-x64 b/build-scripts/Dockerfile.linux-glibc-x64 new file mode 100644 index 0000000000..2db13cb51e --- /dev/null +++ b/build-scripts/Dockerfile.linux-glibc-x64 @@ -0,0 +1,23 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=x86_64-unknown-linux-gnu +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-musl-arm64 b/build-scripts/Dockerfile.linux-musl-arm64 new file mode 100644 index 0000000000..135e6f9fc9 --- /dev/null +++ b/build-scripts/Dockerfile.linux-musl-arm64 @@ -0,0 +1,21 @@ +FROM messense/rust-musl-cross:aarch64-musl as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=aarch64-unknown-linux-musl +WORKDIR /src + +COPY . . + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-musl-x64 b/build-scripts/Dockerfile.linux-musl-x64 index c0a31b190d..73e64b4d67 100644 --- a/build-scripts/Dockerfile.linux-musl-x64 +++ b/build-scripts/Dockerfile.linux-musl-x64 @@ -1,23 +1,23 @@ -FROM rust:bullseye as build +FROM rust:alpine as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' - +ARG BUILD_DIR=/build +ARG TARGET=x86_64-unknown-linux-musl WORKDIR /src COPY . . -RUN rustup target add x86_64-unknown-linux-musl - -RUN apt-get update && apt-get install -y git musl-tools - -RUN CC=musl-gcc \ - CC_x86_64_unknown_linux_musl=musl-gcc \ - CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER=musl-gcc \ - cargo build --release --workspace --target x86_64-unknown-linux-musl +RUN apk update && apk add git musl-dev -RUN mkdir /out && cp -R /src/target/x86_64-unknown-linux-musl/release/. /out +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-x64 b/build-scripts/Dockerfile.linux-x64 deleted file mode 100644 index b451b6f427..0000000000 --- a/build-scripts/Dockerfile.linux-x64 +++ /dev/null @@ -1,20 +0,0 @@ -FROM rust:bullseye as build - -ARG STACKS_NODE_VERSION="No Version Info" -ARG GIT_BRANCH='No Branch Info' -ARG GIT_COMMIT='No Commit Info' - -WORKDIR /src - -COPY . . - -RUN apt-get update && apt-get install -y git - -RUN rustup target add x86_64-unknown-linux-gnu - -RUN cargo build --release --workspace --target x86_64-unknown-linux-gnu - -RUN mkdir /out && cp -R /src/target/x86_64-unknown-linux-gnu/release/. /out - -FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.macos-arm64 b/build-scripts/Dockerfile.macos-arm64 index f56dd2e25a..d6b80f267a 100644 --- a/build-scripts/Dockerfile.macos-arm64 +++ b/build-scripts/Dockerfile.macos-arm64 @@ -1,24 +1,29 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' - +ARG BUILD_DIR=/build +ARG OSXCROSS="https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" +ARG TARGET=aarch64-apple-darwin WORKDIR /src COPY . . -RUN rustup target add aarch64-apple-darwin - RUN apt-get update && apt-get install -y clang zstd -RUN wget -nc -O /tmp/osxcross.tar.zst "https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" -RUN mkdir /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross - -RUN . /opt/osxcross/env-macos-aarch64 && \ - cargo build --target aarch64-apple-darwin --release --workspace - -RUN mkdir /out && cp -R /src/target/aarch64-apple-darwin/release/. /out +# Retrieve and install osxcross +RUN wget -nc -O /tmp/osxcross.tar.zst ${OSXCROSS} \ + && mkdir -p /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && . /opt/osxcross/env-macos-aarch64 \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.macos-x64 b/build-scripts/Dockerfile.macos-x64 index c4fe7a5a58..5403b2fe87 100644 --- a/build-scripts/Dockerfile.macos-x64 +++ b/build-scripts/Dockerfile.macos-x64 @@ -1,24 +1,29 @@ -FROM rust:buster as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' - +ARG BUILD_DIR=/build +ARG OSXCROSS="https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" +ARG TARGET=x86_64-apple-darwin WORKDIR /src COPY . . -RUN rustup target add x86_64-apple-darwin - RUN apt-get update && apt-get install -y clang zstd -RUN wget -nc -O /tmp/osxcross.tar.zst "https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" -RUN mkdir /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross - -RUN . /opt/osxcross/env-macos-x86_64 && \ - cargo build --target x86_64-apple-darwin --release --workspace +# Retrieve and install osxcross +RUN wget -nc -O /tmp/osxcross.tar.zst ${OSXCROSS} \ + && mkdir -p /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross -RUN mkdir /out && cp -R /src/target/x86_64-apple-darwin/release/. /out +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && . /opt/osxcross/env-macos-x86_64 \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.windows-x64 b/build-scripts/Dockerfile.windows-x64 index 923217b122..c3ffcd5d29 100644 --- a/build-scripts/Dockerfile.windows-x64 +++ b/build-scripts/Dockerfile.windows-x64 @@ -3,20 +3,23 @@ FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' - +ARG BUILD_DIR=/build +ARG TARGET=x86_64-pc-windows-gnu WORKDIR /src COPY . . -RUN rustup target add x86_64-pc-windows-gnu - RUN apt-get update && apt-get install -y git gcc-mingw-w64-x86-64 -RUN CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc \ +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc \ CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER=x86_64-w64-mingw32-gcc \ - cargo build --release --workspace --target x86_64-pc-windows-gnu - -RUN mkdir /out && cp -R /src/target/x86_64-pc-windows-gnu/release/. /out + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage COPY --from=build /out/stacks-inspect.exe /out/blockstack-cli.exe /out/clarity-cli.exe /out/stacks-node.exe / \ No newline at end of file diff --git a/build-scripts/build-dist.sh b/build-scripts/build-dist.sh index 760d0ea613..ac2c8bcd5c 100755 --- a/build-scripts/build-dist.sh +++ b/build-scripts/build-dist.sh @@ -15,13 +15,13 @@ build_platform () { case $DIST_TARGET_FILTER in (*[![:blank:]]*) case $DIST_TARGET_FILTER in - linux-x64) build_platform linux-x64 ;; - linux-musl-x64) build_platform linux-musl-x64 ;; - linux-armv7) build_platform linux-armv7 ;; - linux-arm64) build_platform linux-arm64 ;; - windows-x64) build_platform windows-x64 ;; - macos-x64) build_platform macos-x64 ;; - macos-arm64) build_platform macos-arm64 ;; + linux-glibc-x64) build_platform linux-glibc-x64 ;; + linux-glibc-arm64) build_platform linux-glibc-arm64 ;; + linux-musl-x64) build_platform linux-musl-x64 ;; + linux-musl-arm64) build_platform linux-musl-arm64 ;; + windows-x64) build_platform windows-x64 ;; + macos-x64) build_platform macos-x64 ;; + macos-arm64) build_platform macos-arm64 ;; *) echo "Invalid dist target filter '$DIST_TARGET_FILTER'" exit 1 @@ -30,10 +30,10 @@ case $DIST_TARGET_FILTER in ;; (*) echo "Building distrubtions for all targets." - build_platform linux-x64 + build_platform linux-glibc-x64 + build_platform linux-glibc-arm64 build_platform linux-musl-x64 - build_platform linux-armv7 - build_platform linux-arm64 + build_platform linux-musl-arm64 build_platform windows-x64 build_platform macos-x64 build_platform macos-arm64 diff --git a/docs/ci-release.md b/docs/ci-release.md new file mode 100644 index 0000000000..7025226d1e --- /dev/null +++ b/docs/ci-release.md @@ -0,0 +1,150 @@ +# Releases + +All releases are built via a Github Actions workflow named `CI`, and is responsible for building binary archives, checksums, and resulting docker images. +This workflow will also trigger any tests that need to be run, like integration tests. + +1. Releases are only created if a tag is manually provided when the ci workflow is triggered. +2. Pushing a new feature branch: Nothing is triggered automatically. PR's are required, or the ci workflow can be triggered manually on a specific branch to build a docker image for the specified branch. + +The following workflow steps are currently disabled: + +- Clippy +- Net-test +- Crate audit + +## TL;DR + +1. A PR will produce a single image built from source on Debian with glibc with 2 tags: + - `stacks-blockchain:` + - `stacks-blockchain:` +2. A merged PR from `develop` to the default branch will produce a single image built from source on Debian with glibc: + - `stacks-blockchain:` +3. An untagged build of any branch will produce a single image built from source on Debian with glibc: + - `stacks-blockchain:` +4. A tagged release on a non-default branch will produce 2 versions of the docker image (along with all binary archives): + - An Alpine image for several architectures tagged with: + - `stacks-blockchain:` + - An Debian image for several architectures tagged with: + - `stacks-blockchain:` +5. A tagged release on the default branch will produce 2 versions of the docker image (along with all binary archives): + - An Alpine image for several architectures tagged with: + - `stacks-blockchain:` + - `stacks-blockchain:` + - An Debian image for several architectures tagged with: + - `stacks-blockchain:` + - `stacks-blockchain:` + +## Release workflow: + +1. Create a feature branch: `feat/112-fix-something` +2. PR `feat/112-fix-something` to the `develop` branch + 1. CI Workflow is automatically triggered, resulting in a pushed docker image tagged with the **branch name** and **PR number** +3. PR `develop` to the default branch + 1. CI Workflow is automatically triggered, resulting in a pushed docker image tagged with the **branch name** and **PR number** +4. Merge `develop` branch to the default branch + 1. CI Workflow is triggered, resulting in a pushed docker image tagged with the **default branch name** +5. CI workflow is manually triggered on **non-default branch** with a version, i.e. `2.1.0.0.0-rc0` + 1. Github release for the manually input version is created with binaries + 2. Docker image pushed with tags of the **input version** and **branch** +6. CI workflow is manually triggered on **default branch** with a version, i.e. `2.1.0.0.0` + 1. Github release for the manually input version is created with binaries + 2. Docker image pushed with tags of the **input version** and **latest** + +## PR a branch to develop: + +ex: Branch is named `feat/112-fix-something` and the PR is numbered `112` + +- Steps executed: + - Rust Format + - Integration Tests + - Leaked credential test + - Docker image is built from source on a debian distribution and pushed with the branch name and PR number as tags + - ex: + - `stacks-blockchain:feat-112-fix-something` + - `stacks-blockchain:pr-112` +- Steps _not_ executed: + - No binaries are built + - No github release + - No docker images built from binary artifacts + +## Merging a branch to develop: + +Nothing is triggered automatically + +## PR develop to master branches: + +ex: Branch is named `develop` and the PR is numbered `113` + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Docker image is built from source on a debian distribution and pushed with the branch name and PR number as tags + - ex: + - `stacks-blockchain:develop` + - `stacks-blockchain:pr-113` +- Steps _not_ executed: + - No binaries are built + - No github release + - No docker images built from binary artifacts + +## Merging a PR from develop to master: + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Docker image is built from source on a debian distribution and pushed with the branch name as a tag + - ex: + - `stacks-blockchain:master` +- Steps _not_ executed: + - No binaries are built + - No github release + - No docker images built from binary artifacts + +## Manually triggering workflow without tag (any branch): + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Docker image is built from source on a debian distribution and pushed with the branch name as a tag + - ex: + - `stacks-blockchain:` +- Steps _not_ executed: + - No binaries are built + - No github release + - No docker images built from binary artifacts + +## Manually triggering workflow with tag on a non-default branch (i.e. tag of `2.1.0.0.0-rc0`): + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Binaries built for specified architectures + - Archive and checksum files added to github release + - Github release (with artifacts/checksum) is created using the manually input tag + - Docker image built from binaries on debian/alpine distributions and pushed with the provided input tag and `latest` + - ex: + - `stacks-blockchain:2.1.0.0.0-rc0` +- Steps _not_ executed: + - No docker images built from source + +## Manually triggering workflow with tag on default branch (i.e. tag of `2.1.0.0.0`): + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Binaries built for specified architectures + - Archive and checksum files added to github release + - Github release (with artifacts/checksum) is created using the manually input tag + - Docker image built from binaries on debian/alpine distributions and pushed with the provided input tag and `latest` + - ex: + - `stacks-blockchain:2.1.0.0.0-debian` + - `stacks-blockchain:latest-debian` + - `stacks-blockchain:2.1.0.0.0` + - `stacks-blockchain:latest` +- Steps _not_ executed: + - No docker images built from source From 1da60adaab5d17b17348b6d38038f598de18318d Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 7 Jun 2023 10:32:30 -0700 Subject: [PATCH 155/158] hardcode the dockerhub org (#3741) * hardcode the dockerhub org * rename base image to stacks-blockchain from stacks-node --- .github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests | 2 +- .github/workflows/image-build-alpine-binary.yml | 2 +- .github/workflows/image-build-debian-binary.yml | 2 +- .github/workflows/image-build-debian-source.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests b/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests index 7eec85b5e6..231e7ddb13 100644 --- a/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests +++ b/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests @@ -1,4 +1,4 @@ -FROM stacks-node:integrations AS test +FROM stacks-blockchain:integrations AS test ARG test_name ENV BITCOIND_TEST 1 diff --git a/.github/workflows/image-build-alpine-binary.yml b/.github/workflows/image-build-alpine-binary.yml index f18bac51ad..f5dc992380 100644 --- a/.github/workflows/image-build-alpine-binary.yml +++ b/.github/workflows/image-build-alpine-binary.yml @@ -54,7 +54,7 @@ jobs: uses: docker/metadata-action@v4 with: images: | - ${{ github.repository }} + blockstack/${{ github.event.repository.name }} tags: | type=raw,value=latest,enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) )}} type=raw,value=${{ inputs.tag }},enable=${{ inputs.tag != '' }} diff --git a/.github/workflows/image-build-debian-binary.yml b/.github/workflows/image-build-debian-binary.yml index 38947f1c53..e1584abbc1 100644 --- a/.github/workflows/image-build-debian-binary.yml +++ b/.github/workflows/image-build-debian-binary.yml @@ -66,7 +66,7 @@ jobs: uses: docker/metadata-action@v4 with: images: | - ${{ github.repository }} + blockstack/${{ github.event.repository.name }} tags: | type=raw,value=latest-${{ inputs.linux_version }},enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) )}} type=raw,value=${{ inputs.tag }}-${{ inputs.linux_version }},enable=${{ inputs.tag != '' }} diff --git a/.github/workflows/image-build-debian-source.yml b/.github/workflows/image-build-debian-source.yml index de3615a3e0..d60166e26c 100644 --- a/.github/workflows/image-build-debian-source.yml +++ b/.github/workflows/image-build-debian-source.yml @@ -65,7 +65,7 @@ jobs: uses: docker/metadata-action@v4 with: images: | - ${{ github.repository }} + blockstack/${{ github.event.repository.name }} tags: | type=raw,value=${{ env.BRANCH_NAME }} type=ref,event=pr From a04f36465f491f6394c4cdc993cd8b3c1922b7bf Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 7 Jun 2023 14:16:00 -0700 Subject: [PATCH 156/158] update docs-pr dockerfile to use bullseye --- .github/actions/docsgen/Dockerfile.docsgen | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/docsgen/Dockerfile.docsgen b/.github/actions/docsgen/Dockerfile.docsgen index 61c95fb70a..925587c71c 100644 --- a/.github/actions/docsgen/Dockerfile.docsgen +++ b/.github/actions/docsgen/Dockerfile.docsgen @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:bullseye as build WORKDIR /src From 7c10037a7d539fb9f1cc4f9da990cd74fc115d3b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 12 Jun 2023 09:54:16 -0500 Subject: [PATCH 157/158] chore: eliminate unused imports, replace unused version tests with epochs tests --- clarity/src/vm/tests/assets.rs | 9 +-- clarity/src/vm/tests/contracts.rs | 92 +++++++---------------- clarity/src/vm/tests/datamaps.rs | 7 +- clarity/src/vm/tests/mod.rs | 29 +------ clarity/src/vm/tests/principals.rs | 18 +---- clarity/src/vm/tests/sequences.rs | 6 +- clarity/src/vm/tests/simple_apply_eval.rs | 5 +- clarity/src/vm/tests/traits.rs | 19 +---- 8 files changed, 44 insertions(+), 141 deletions(-) diff --git a/clarity/src/vm/tests/assets.rs b/clarity/src/vm/tests/assets.rs index dd1c1bb890..064daf50d3 100644 --- a/clarity/src/vm/tests/assets.rs +++ b/clarity/src/vm/tests/assets.rs @@ -15,24 +15,19 @@ // along with this program. If not, see . use crate::vm::ast::ASTRules; -use crate::vm::contexts::{AssetMap, AssetMapEntry, GlobalContext, OwnedEnvironment}; -use crate::vm::contracts::Contract; +use crate::vm::contexts::{AssetMap, AssetMapEntry, OwnedEnvironment}; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::events::StacksTransactionEvent; -use crate::vm::execute as vm_execute; use crate::vm::representations::SymbolicExpression; use crate::vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; use crate::vm::tests::{ test_clarity_versions, test_epochs, tl_env_factory as env_factory, TopLevelMemoryEnvironmentGenerator, }; -use crate::vm::types::{ - AssetIdentifier, PrincipalData, QualifiedContractIdentifier, ResponseData, Value, -}; +use crate::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier, Value}; use crate::vm::version::ClarityVersion; use crate::vm::ContractContext; use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::hex_bytes; const FIRST_CLASS_TOKENS: &str = "(define-fungible-token stackaroos) (define-read-only (my-ft-get-balance (account principal)) diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index b0b2db0c2c..b313285064 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -14,27 +14,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::types::chainstate::BlockHeaderHash; -use crate::types::chainstate::StacksBlockId; - -#[cfg(any(test, feature = "testing"))] -use rstest::rstest; -use stacks_common::types::StacksEpochId; - -use crate::vm::ast; use crate::vm::ast::errors::ParseErrors; use crate::vm::ast::ASTRules; -use crate::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; -use crate::vm::contracts::Contract; -use crate::vm::costs::ExecutionCost; -use crate::vm::database::ClarityDatabase; +use crate::vm::contexts::Environment; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::execute as vm_execute; -use crate::vm::representations::SymbolicExpression; use crate::vm::tests::{ env_factory, execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, - tl_env_factory, BurnStateDB, MemoryEnvironmentGenerator, TopLevelMemoryEnvironmentGenerator, - TEST_BURN_STATE_DB, TEST_HEADER_DB, + tl_env_factory, MemoryEnvironmentGenerator, TopLevelMemoryEnvironmentGenerator, }; use crate::vm::tests::{test_clarity_versions, test_epochs}; use crate::vm::types::{ @@ -42,15 +29,12 @@ use crate::vm::types::{ TypeSignature, Value, }; use crate::vm::ClarityVersion; -use stacks_common::types::chainstate::{ConsensusHash, SortitionId}; -use stacks_common::util::hash::hex_bytes; +#[cfg(any(test, feature = "testing"))] +use rstest::rstest; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::StacksEpochId; -use crate::vm::types::serialization::TypePrefix::Buffer; -use crate::vm::types::BuffData; use crate::vm::ContractContext; -use crate::vm::Value::Sequence; - -use crate::vm::database::MemoryBackingStore; const FACTORIAL_CONTRACT: &str = "(define-map factorials { id: int } { current: int, index: int }) (define-private (init-factorial (id int) (factorial int)) @@ -110,8 +94,11 @@ fn get_principal_as_principal_data() -> PrincipalData { StandardPrincipalData::transient().into() } -#[test] -fn test_get_block_info_eval() { +#[apply(test_epochs)] +fn test_get_block_info_eval( + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { let contracts = [ "(define-private (test-func) (get-block-info? time u1))", "(define-private (test-func) (get-block-info? time block-height))", @@ -139,10 +126,10 @@ fn test_get_block_info_eval() { ClarityVersion::Clarity2, ); + let mut owned_env = tl_env_factory.get_env(epoch); for i in 0..contracts.len() { - let mut marf = MemoryBackingStore::new(); - let mut owned_env = OwnedEnvironment::new(marf.as_clarity_db(), StacksEpochId::latest()); - let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + let contract_identifier = + QualifiedContractIdentifier::local(&format!("test-contract-{}", i)).unwrap(); owned_env .initialize_contract( contract_identifier.clone(), @@ -175,12 +162,8 @@ fn test_get_block_info_eval() { } } -#[apply(test_clarity_versions)] -fn test_contract_caller( - version: ClarityVersion, - epoch: StacksEpochId, - mut env_factory: MemoryEnvironmentGenerator, -) { +#[apply(test_epochs)] +fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { let mut owned_env = env_factory.get_env(epoch); let contract_a = "(define-read-only (get-caller) (list contract-caller tx-sender))"; @@ -314,12 +297,8 @@ fn tx_sponsor_contract_asserts(env: &mut Environment, sponsor: Option. -use crate::vm::contexts::OwnedEnvironment; -use crate::vm::database::MemoryBackingStore; -use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType, ShortReturnType}; +use crate::vm::errors::{CheckErrors, Error, ShortReturnType}; use crate::vm::execute; use crate::vm::types::{ - ListData, QualifiedContractIdentifier, SequenceData, StandardPrincipalData, TupleData, - TupleTypeSignature, TypeSignature, Value, + ListData, SequenceData, TupleData, TupleTypeSignature, TypeSignature, Value, }; use crate::vm::ClarityName; use std::convert::From; diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 66817a6cd7..518b063e41 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -13,37 +13,16 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . - -use crate::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; -use crate::vm::contracts::Contract; -use crate::vm::database::ClarityDatabase; +use crate::vm::contexts::OwnedEnvironment; use crate::vm::database::MemoryBackingStore; use crate::vm::errors::Error; -use crate::vm::representations::SymbolicExpression; -use crate::vm::types::{PrincipalData, ResponseData, Value}; -use crate::vm::StacksEpoch; -use stacks_common::util::hash::hex_bytes; - -use stacks_common::consts::{ - BITCOIN_REGTEST_FIRST_BLOCK_HASH, BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, - BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, -}; -use stacks_common::types::{StacksEpochId, PEER_VERSION_EPOCH_2_0}; - -use crate::vm::{ - analysis::AnalysisDatabase, - clarity::{ClarityConnection, TransactionConnection}, - contexts::AssetMap, - costs::{ExecutionCost, LimitedCostTracker}, -}; +use crate::vm::types::Value; + +use stacks_common::types::StacksEpochId; pub use crate::vm::database::BurnStateDB; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; -use super::events::StacksTransactionEvent; pub use super::test_util::*; use super::ClarityVersion; diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index c162d8153e..684347406c 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -1,29 +1,15 @@ use crate::vm::ast::ASTRules; use crate::vm::execute_with_parameters; -use crate::vm::types::BufferLength; -use crate::vm::types::SequenceSubtype::{BufferType, StringType}; -use crate::vm::types::StringSubtype::ASCII; -use crate::vm::types::TypeSignature::{PrincipalType, SequenceType}; +use crate::vm::types::TypeSignature::PrincipalType; use crate::vm::types::{ASCIIData, BuffData, CharType, SequenceData, Value}; use crate::vm::ClarityVersion; -use crate::vm::callables::{DefineType, DefinedFunction}; -use crate::vm::costs::LimitedCostTracker; -use crate::vm::database::MemoryBackingStore; -use crate::vm::errors::{ - CheckErrors, Error, InterpreterError, InterpreterResult as Result, RuntimeErrorType, -}; -use crate::vm::eval; -use crate::vm::execute; +use crate::vm::errors::CheckErrors; use crate::vm::types::{ OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, StandardPrincipalData, TupleData, TypeSignature, BUFF_1, BUFF_20, }; -use crate::vm::{ - CallStack, ContractContext, Environment, GlobalContext, LocalContext, SymbolicExpression, -}; use stacks_common::types::StacksEpochId; -use std::collections::HashMap; use crate::vm::functions::principals::PrincipalConstructErrorCode; diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index dcdf841b65..30af202163 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -14,19 +14,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::vm::types::signatures::{ListTypeData, SequenceSubtype}; +use crate::vm::types::signatures::SequenceSubtype; use crate::vm::types::TypeSignature::{BoolType, IntType, SequenceType, UIntType}; use crate::vm::types::{StringSubtype, StringUTF8Length, TypeSignature, Value}; use rstest::rstest; use rstest_reuse::{self, *}; -use crate::vm::analysis::errors::CheckError; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::tests::test_clarity_versions; -use crate::vm::types::signatures::SequenceSubtype::{BufferType, ListType, StringType}; +use crate::vm::types::signatures::SequenceSubtype::{BufferType, StringType}; use crate::vm::types::signatures::StringSubtype::ASCII; use crate::vm::types::BufferLength; -use crate::vm::types::CharType::UTF8; use crate::vm::{execute, execute_v2, ClarityVersion}; use stacks_common::types::StacksEpochId; use std::convert::{TryFrom, TryInto}; diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 83f4c53271..df2607b8d9 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -14,8 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; - use rstest::rstest; use rstest_reuse::{self, *}; @@ -32,13 +30,12 @@ use crate::vm::tests::test_clarity_versions; use crate::vm::types::signatures::*; use crate::vm::types::StacksAddressExtensions; use crate::vm::types::{ASCIIData, BuffData, CharType, QualifiedContractIdentifier, TypeSignature}; -use crate::vm::types::{PrincipalData, ResponseData, SequenceData, SequenceSubtype, StringSubtype}; +use crate::vm::types::{PrincipalData, SequenceData}; use crate::vm::ClarityVersion; use crate::vm::{ eval, execute as vm_execute, execute_v2 as vm_execute_v2, execute_with_parameters, }; use crate::vm::{CallStack, ContractContext, Environment, GlobalContext, LocalContext, Value}; -use stacks_common::address::c32; use stacks_common::address::AddressHashMode; use stacks_common::address::C32_ADDRESS_VERSION_MAINNET_SINGLESIG; use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index cb515b7563..6d231f55dd 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -16,17 +16,10 @@ use stacks_common::types::StacksEpochId; -use crate::vm::analysis::errors::CheckError; use crate::vm::ast::ASTRules; -use crate::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; -use crate::vm::database::MemoryBackingStore; -use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; -use crate::vm::execute as vm_execute; +use crate::vm::errors::{CheckErrors, Error}; use crate::vm::tests::{execute, symbols_from_values}; -use crate::vm::types::{ - PrincipalData, QualifiedContractIdentifier, ResponseData, TypeSignature, Value, -}; -use std::convert::TryInto; +use crate::vm::types::{PrincipalData, QualifiedContractIdentifier, Value}; use crate::vm::tests::env_factory; use crate::vm::tests::test_clarity_versions; @@ -1468,12 +1461,8 @@ fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvi } } -#[apply(test_clarity_versions)] -fn test_embedded_trait( - version: ClarityVersion, - epoch: StacksEpochId, - mut env_factory: MemoryEnvironmentGenerator, -) { +#[apply(test_epochs)] +fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { if epoch < StacksEpochId::Epoch21 { return; } From 3b43af6771101a56d4a5b4ab0b51ac2d87d7b072 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 23 Jun 2023 16:38:41 -0400 Subject: [PATCH 158/158] Clippy changes Signed-off-by: Jacinta Ferrant --- clarity/src/vm/analysis/arithmetic_checker/tests.rs | 1 - clarity/src/vm/tests/contracts.rs | 1 - clarity/src/vm/tests/mod.rs | 2 +- clarity/src/vm/tests/sequences.rs | 2 +- clarity/src/vm/tests/traits.rs | 4 +--- src/chainstate/coordinator/tests.rs | 2 +- src/clarity_vm/special.rs | 2 +- src/clarity_vm/tests/analysis_costs.rs | 4 +--- src/clarity_vm/tests/ast.rs | 2 +- src/clarity_vm/tests/large_contract.rs | 2 +- 10 files changed, 8 insertions(+), 14 deletions(-) diff --git a/clarity/src/vm/analysis/arithmetic_checker/tests.rs b/clarity/src/vm/analysis/arithmetic_checker/tests.rs index 2d1064cedc..819e1e86b7 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/tests.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/tests.rs @@ -34,7 +34,6 @@ use crate::vm::types::QualifiedContractIdentifier; use crate::vm::variables::NativeVariables; use crate::vm::ClarityVersion; - /// Checks whether or not a contract only contains arithmetic expressions (for example, defining a /// map would not pass this check). /// This check is useful in determining the validity of new potential cost functions. diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 1ddc280796..f230b39a25 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . - #[cfg(any(test, feature = "testing"))] use rstest::rstest; #[cfg(any(test, feature = "testing"))] diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 5cfa1df60e..518b063e41 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -170,4 +170,4 @@ pub fn test_only_mainnet_to_chain_id(mainnet: bool) -> u32 { } else { CHAIN_ID_TESTNET } -} \ No newline at end of file +} diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index d9e7dc9d61..6864223fd8 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -26,9 +26,9 @@ use stacks_common::types::StacksEpochId; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::tests::test_clarity_versions; +use crate::vm::types::signatures::ListTypeData; use crate::vm::types::signatures::SequenceSubtype::{BufferType, StringType}; use crate::vm::types::signatures::StringSubtype::ASCII; -use crate::vm::types::signatures::ListTypeData; use crate::vm::types::BufferLength; use crate::vm::{execute, execute_v2, ClarityVersion}; diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index cd2511cb17..7cc22b7a07 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -22,9 +22,7 @@ use crate::vm::ast::ASTRules; use crate::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::execute as vm_execute; -use crate::vm::tests::{ - execute, symbols_from_values, -}; +use crate::vm::tests::{execute, symbols_from_values}; use crate::vm::types::{ PrincipalData, QualifiedContractIdentifier, ResponseData, TypeSignature, Value, }; diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index a62b6ff9a5..7557156e83 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -3296,7 +3296,7 @@ fn test_sbtc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, - u32::MAX + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); diff --git a/src/clarity_vm/special.rs b/src/clarity_vm/special.rs index cc31484aaf..eddb3dacae 100644 --- a/src/clarity_vm/special.rs +++ b/src/clarity_vm/special.rs @@ -42,8 +42,8 @@ use stacks_common::util::hash::Hash160; use crate::chainstate::stacks::address::PoxAddress; use crate::core::StacksEpochId; -use crate::vm::costs::runtime_cost; use crate::vm::ast::ASTRules; +use crate::vm::costs::runtime_cost; /// Parse the returned value from PoX `stack-stx` and `delegate-stack-stx` functions /// from pox-2.clar or pox-3.clar into a format more readily digestible in rust. diff --git a/src/clarity_vm/tests/analysis_costs.rs b/src/clarity_vm/tests/analysis_costs.rs index 1d84a87ddb..f9ffeef1c6 100644 --- a/src/clarity_vm/tests/analysis_costs.rs +++ b/src/clarity_vm/tests/analysis_costs.rs @@ -27,9 +27,7 @@ use clarity::vm::functions::NativeFunctions; use clarity::vm::representations::SymbolicExpression; use clarity::vm::test_util::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; use clarity::vm::tests::test_only_mainnet_to_chain_id; -use clarity::vm::tests::{ - execute, symbols_from_values, UnitTestBurnStateDB, -}; +use clarity::vm::tests::{execute, symbols_from_values, UnitTestBurnStateDB}; use clarity::vm::types::{ AssetIdentifier, PrincipalData, QualifiedContractIdentifier, ResponseData, Value, }; diff --git a/src/clarity_vm/tests/ast.rs b/src/clarity_vm/tests/ast.rs index 76b84cac15..64adebd271 100644 --- a/src/clarity_vm/tests/ast.rs +++ b/src/clarity_vm/tests/ast.rs @@ -1,7 +1,7 @@ use clarity::vm::ast::build_ast; use clarity::vm::test_util::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; -use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::tests::test_clarity_versions; +use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::version::ClarityVersion; #[cfg(test)] use rstest::rstest; diff --git a/src/clarity_vm/tests/large_contract.rs b/src/clarity_vm/tests/large_contract.rs index 444ab0ba72..2b2ca1694c 100644 --- a/src/clarity_vm/tests/large_contract.rs +++ b/src/clarity_vm/tests/large_contract.rs @@ -27,6 +27,7 @@ use clarity::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use clarity::vm::representations::SymbolicExpression; use clarity::vm::test_util::*; +use clarity::vm::tests::test_clarity_versions; use clarity::vm::types::{ OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, StandardPrincipalData, TypeSignature, Value, @@ -34,7 +35,6 @@ use clarity::vm::types::{ use clarity::vm::version::ClarityVersion; use clarity::vm::ContractContext; use clarity::vm::MAX_CALL_STACK_DEPTH; -use clarity::vm::tests::test_clarity_versions; #[cfg(test)] use rstest::rstest;