diff --git a/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests b/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests index 7eec85b5e6..231e7ddb13 100644 --- a/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests +++ b/.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests @@ -1,4 +1,4 @@ -FROM stacks-node:integrations AS test +FROM stacks-blockchain:integrations AS test ARG test_name ENV BITCOIND_TEST 1 diff --git a/.github/actions/dockerfiles/Dockerfile.alpine-binary b/.github/actions/dockerfiles/Dockerfile.alpine-binary new file mode 100644 index 0000000000..8c450a67f3 --- /dev/null +++ b/.github/actions/dockerfiles/Dockerfile.alpine-binary @@ -0,0 +1,23 @@ +FROM --platform=${TARGETPLATFORM} alpine as builder +# Use a small image to download and extract the release archive + +ARG TAG +ARG BIN_ARCH +ARG TARGETPLATFORM +ARG BUILDPLATFORM +ARG TARGETARCH +ARG TARGETVARIANT +ARG REPO=stacks-network/stacks-blockchain + +RUN case ${TARGETARCH} in \ + "amd64") BIN_ARCH=linux-musl-x64 ;; \ + "arm64") BIN_ARCH=linux-musl-arm64 ;; \ + "*") exit 1 ;; \ + esac \ + && echo "wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip" \ + && wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip \ + && unzip ${BIN_ARCH}.zip -d /out + +FROM --platform=${TARGETPLATFORM} alpine +COPY --from=builder /out/stacks-node /bin/ +CMD ["stacks-node", "mainnet"] diff --git a/.github/actions/dockerfiles/Dockerfile.debian-binary b/.github/actions/dockerfiles/Dockerfile.debian-binary new file mode 100644 index 0000000000..cf1380361b --- /dev/null +++ b/.github/actions/dockerfiles/Dockerfile.debian-binary @@ -0,0 +1,23 @@ +FROM --platform=${TARGETPLATFORM} alpine as builder +# Use a small image to download and extract the release archive + +ARG TAG +ARG BIN_ARCH +ARG TARGETPLATFORM +ARG BUILDPLATFORM +ARG TARGETARCH +ARG TARGETVARIANT +ARG REPO=stacks-network/stacks-blockchain + +RUN case ${TARGETARCH} in \ + "amd64") BIN_ARCH=linux-musl-x64 ;; \ + "arm64") BIN_ARCH=linux-musl-arm64 ;; \ + "*") exit 1 ;; \ + esac \ + && echo "wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip" \ + && wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip \ + && unzip ${BIN_ARCH}.zip -d /out + +FROM --platform=${TARGETPLATFORM} debian:bullseye +COPY --from=builder /out/stacks-node /bin/ +CMD ["stacks-node", "mainnet"] diff --git a/.github/actions/dockerfiles/Dockerfile.debian-source b/.github/actions/dockerfiles/Dockerfile.debian-source new file mode 100644 index 0000000000..bbae34c2d5 --- /dev/null +++ b/.github/actions/dockerfiles/Dockerfile.debian-source @@ -0,0 +1,24 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=x86_64-unknown-linux-gnu +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM --platform=${TARGETPLATFORM} debian:bullseye +COPY --from=build /out/stacks-node /bin/ +CMD ["stacks-node", "mainnet"] diff --git a/.github/actions/docsgen/Dockerfile.docsgen b/.github/actions/docsgen/Dockerfile.docsgen index 61c95fb70a..925587c71c 100644 --- a/.github/actions/docsgen/Dockerfile.docsgen +++ b/.github/actions/docsgen/Dockerfile.docsgen @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:bullseye as build WORKDIR /src diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml new file mode 100644 index 0000000000..c3864d5659 --- /dev/null +++ b/.github/workflows/audit.yml @@ -0,0 +1,35 @@ +## +## Performs an audit for crate advisories against cargo dependencies +## + +name: Security Audit + +# Only run when: +# - workflow is manually triggered +# - Cargo.toml/lock is changed +# - Daily at 0330 UTC +# Note: this will create issues for any crate advisories unless they already exist + +on: + workflow_dispatch: + push: + paths: + - "**/Cargo.toml" + - "**/Cargo.lock" + schedule: + - cron: 30 03 * * * + +jobs: + security_audit: + if: ${{ false }} + name: Crate Vulnerability Check + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Rust Dependency Check + id: rust_dep_check + uses: actions-rs/audit-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 8f22fe7249..698b1dec41 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -1,11 +1,17 @@ -name: stacks-bitcoin-integration-tests +## +## Bitcoin Integration Tests +## + +name: Bitcoin Integration Tests # Only run when: -# - PRs are opened -# - the workflow is started from the UI +# - PRs are (re)opened against master branch + on: pull_request: - workflow_dispatch: + types: + - opened + - reopened concurrency: group: stacks-bitcoin-integration-tests-${{ github.ref }} @@ -13,26 +19,35 @@ concurrency: cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: + # Create bitcoin image used for later tests build-integration-image: + name: Build Image runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 - name: Build bitcoin integration testing image + id: build_docker_image env: DOCKER_BUILDKIT: 1 - # Remove .dockerignore file so codecov has access to git info + # Remove .dockerignore file so codecov has access to git info and build the image run: | rm .dockerignore - docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.generic.bitcoin-tests -t stacks-node:integrations . + docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.generic.bitcoin-tests -t stacks-blockchain:integrations . - name: Export docker image as tarball - run: docker save -o integration-image.tar stacks-node:integrations + id: export_docker_image + run: docker save -o integration-image.tar stacks-blockchain:integrations - name: Upload built docker image - uses: actions/upload-artifact@v2 + id: upload_docker_image + uses: actions/upload-artifact@v3 with: name: integration-image.tar path: integration-image.tar + # Run integration tests using sampled genesis block sampled-genesis: + name: Sampled Genesis runs-on: ubuntu-latest needs: - build-integration-image @@ -101,28 +116,43 @@ jobs: - tests::epoch_21::test_v1_unlock_height_with_current_stackers - tests::epoch_21::test_v1_unlock_height_with_delay_and_current_stackers - tests::epoch_21::trait_invocation_cross_epoch + - tests::epoch_22::pox_2_unlock_all + - tests::epoch_22::disable_pox + - tests::epoch_22::test_pox_reorg_one_flap + - tests::epoch_23::trait_invocation_behavior - tests::neon_integrations::bad_microblock_pubkey + - tests::epoch_24::fix_to_pox_contract + - tests::epoch_24::verify_auto_unlock_behavior steps: - - uses: actions/checkout@v2 + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 - name: Download docker image - uses: actions/download-artifact@v2 + id: download_docker_image + uses: actions/download-artifact@v3 with: name: integration-image.tar - name: Load docker image + id: load_docker_image run: docker load -i integration-image.tar && rm integration-image.tar - name: All integration tests with sampled genesis + id: bitcoin_integration_tests timeout-minutes: 30 env: DOCKER_BUILDKIT: 1 TEST_NAME: ${{ matrix.test-name }} run: docker build -o coverage-output --build-arg test_name=${{ matrix.test-name }} -f ./.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests . - - uses: codecov/codecov-action@v2 + - name: Code Coverage + id: code_coverage + uses: codecov/codecov-action@v3 with: files: ./coverage-output/lcov.info name: ${{ matrix.test-name }} fail_ci_if_error: false + + # Run atlas integration tests atlas-test: - if: ${{ true }} + name: Atlas Test runs-on: ubuntu-latest needs: - build-integration-image @@ -133,20 +163,27 @@ jobs: - tests::neon_integrations::atlas_integration_test - tests::neon_integrations::atlas_stress_integration_test steps: - - uses: actions/checkout@v2 + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 - name: Download docker image - uses: actions/download-artifact@v2 + id: download_docker_image + uses: actions/download-artifact@v3 with: name: integration-image.tar - name: Load docker image + id: load_docker_image run: docker load -i integration-image.tar && rm integration-image.tar - name: Atlas integration tests + id: atlas_integration_tests timeout-minutes: 40 env: DOCKER_BUILDKIT: 1 TEST_NAME: ${{ matrix.test-name }} run: docker build -o coverage-output --build-arg test_name=${{ matrix.test-name }} -f ./.github/actions/bitcoin-int-tests/Dockerfile.bitcoin-tests . - - uses: codecov/codecov-action@v2 + - name: Code Coverage + id: code_coverage + uses: codecov/codecov-action@v3 with: files: ./coverage-output/lcov.info name: ${{ matrix.test-name }} diff --git a/.github/workflows/build-source-binary.yml b/.github/workflows/build-source-binary.yml new file mode 100644 index 0000000000..284171d672 --- /dev/null +++ b/.github/workflows/build-source-binary.yml @@ -0,0 +1,65 @@ +## +## Builds binary assets of stacks-blockchain and creates a named tag github (draft) release +## + +name: Build Distributable Assets + +# Only run when: +# - manually triggered via the ci.yml workflow with a provided input tag + +on: + workflow_call: + inputs: + tag: + description: "Tag name of this release (x.y.z)" + required: true + type: string + parallel_jobs: + description: "Number of parallel binary builds" + required: false + type: number + default: 4 + arch: + description: "Stringified JSON object listing of platform matrix" + required: true + type: string + +jobs: + artifact: + if: ${{ inputs.tag != '' }} + name: Create Artifacts + runs-on: ubuntu-latest + strategy: + max-parallel: ${{ inputs.parallel_jobs }} + matrix: + platform: ${{ fromJson(inputs.arch) }} + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Set Vars + id: set_vars + run: | + echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up Docker Buildx + id: setup_buildx + uses: docker/setup-buildx-action@v2 + - name: Build Binaries + id: build_binaries + uses: docker/build-push-action@v3 + with: + file: build-scripts/Dockerfile.${{ matrix.platform }} + outputs: type=local,dest=./release/${{ matrix.platform }} + build-args: | + OS_ARCH=${{ matrix.platform }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + - name: Compress artifact + id: compress_artifact + run: zip --junk-paths ${{ matrix.platform }} ./release/${{ matrix.platform }}/* + - name: Upload artifact + id: upload_artifact + uses: actions/upload-artifact@v3 + with: + path: ${{ matrix.platform }}.zip diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d1a04a194f..5b72c9faf3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,10 +1,14 @@ name: CI -# Only run when: -# - PRs are opened against the master branch -# - the workflow is started from the UI (an optional tag can be passed in via parameter) -# - If the optional tag parameter is passed in, a new tag will be generated based off the selected branch +## Only run when: +## - manually triggered +## - PR's are (re)opened +## - push to master (i.e. merge develop -> master) + on: + push: + branches: + - master pull_request: workflow_dispatch: inputs: @@ -13,318 +17,164 @@ on: required: false concurrency: - group: stacks-blockchain-${{ github.ref }} - # Only cancel in progress if this is for a PR - cancel-in-progress: ${{ github.event_name == 'pull_request' }} + group: ${{ github.head_ref || github.run_id }} + cancel-in-progress: true jobs: - # Run full genesis test - full-genesis: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Single full genesis integration test - env: - DOCKER_BUILDKIT: 1 - # Remove .dockerignore file so codecov has access to git info - run: | - rm .dockerignore - docker build -o coverage-output -f ./.github/actions/bitcoin-int-tests/Dockerfile.large-genesis . - - uses: codecov/codecov-action@v2 - with: - files: ./coverage-output/lcov.info - name: large_genesis - fail_ci_if_error: false - - # Run unit tests with code coverage - unit-tests: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Run units tests (with coverage) - env: - DOCKER_BUILDKIT: 1 - # Remove .dockerignore file so codecov has access to git info - run: | - rm .dockerignore - docker build -o coverage-output -f ./.github/actions/bitcoin-int-tests/Dockerfile.code-cov . - - uses: codecov/codecov-action@v2 - with: - files: ./coverage-output/lcov.info - name: unit_tests - fail_ci_if_error: false - - open-api-validation: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Run units tests (with coverage) - env: - DOCKER_BUILDKIT: 1 - run: docker build -o dist/ -f .github/actions/open-api/Dockerfile.open-api-validate . - - name: Upload bundled html - uses: actions/upload-artifact@v2 - with: - name: open-api-bundle - path: | - dist - # Run net-tests - nettest: - # disable this job/test for now, since we haven't seen this pass - # on github actions in a while, and the failures can take > 4 hours - if: ${{ false }} - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Run network relay tests - env: - DOCKER_BUILDKIT: 1 - run: docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.net-tests . - - core-contracts-clarinet-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: "Execute core contract unit tests in Clarinet" - uses: docker://hirosystems/clarinet:1.1.0 - with: - args: test --coverage --manifest-path=./contrib/core-contract-tests/Clarinet.toml - - name: "Export code coverage" - uses: codecov/codecov-action@v1 - with: - files: ./coverage.lcov - verbose: true - fail_ci_if_error: false - - # rustfmt checking + ## rust format: Execute on every run rustfmt: + name: Rust Format runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Run rustfmt check - env: - DOCKER_BUILDKIT: 1 - run: docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.rustfmt . - - # Create distributions - dist: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Define Rust Toolchain + id: define_rust_toolchain + run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV + - name: Setup Rust Toolchain + id: setup_rust_toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + components: rustfmt + - name: Rustfmt + id: rustfmt + uses: actions-rust-lang/rustfmt@v1 + + ## Release tests: Execute on every run + release-tests: + name: Release Tests + uses: stacks-network/stacks-blockchain/.github/workflows/stacks-blockchain-tests.yml@master + + ## Checked for leaked credentials: Execute on every run + leaked-cred-test: + name: Leaked Credential Test runs-on: ubuntu-latest - strategy: - matrix: - platform: - [ - windows-x64, - macos-x64, - macos-arm64, - linux-x64, - linux-musl-x64, - linux-armv7, - linux-arm64, - ] - steps: - - uses: actions/checkout@v2 - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Build distributable - uses: docker/build-push-action@v2 - with: - file: build-scripts/Dockerfile.${{ matrix.platform }} - outputs: dist/${{ matrix.platform }} - build-args: | - STACKS_NODE_VERSION=${{ github.event.inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - - - name: Compress artifact - run: zip --junk-paths ${{ matrix.platform }} ./dist/${{ matrix.platform }}/* - - - name: Upload artifact - uses: actions/upload-artifact@v2 - with: - name: ${{ matrix.platform }} - path: ${{ matrix.platform }}.zip - -# call-docker-platforms-workflow: -# if: ${{ github.event.inputs.tag != '' }} -# uses: stacks-network/stacks-blockchain/.github/workflows/docker-platforms.yml@master -# with: -# tag: ${{ github.event.inputs.tag }} -# secrets: -# DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} -# DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} - - # Build docker image, tag it with the git tag and `latest` if running on master branch, and publish under the following conditions - # Will publish if: - # - a tag was passed into this workflow - # - a tag was pushed up - # - this workflow was invoked against a non-master branch (a Docker image tag with the name of the branch will be published) - build-publish: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: | - blockstack/${{ github.event.repository.name }} - tags: | - type=ref,event=branch - type=ref,event=pr - ${{ github.event.inputs.tag }} - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build/Tag/Push Image - uses: docker/build-push-action@v2 - with: - platforms: linux/amd64 - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - STACKS_NODE_VERSION=${{ github.event.inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - # Only push if (a tag was passed in) or (we're building a non-master branch which isn't a PR) - push: ${{ github.event.inputs.tag != '' || (github.ref != 'refs/heads/master' && !contains(github.ref, 'refs/pull')) }} - - # Build docker image, tag it with the git tag and `latest` if running on master branch, and publish under the following conditions - # Will publish if: - # - a tag was passed into this workflow - # - a tag was pushed up - # - this workflow was invoked against a non-master branch (a Docker image tag with the name of the branch will be published) - build-publish-stretch: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - # Stretch tag will be "-stretch" if a tag was passed in, otherwise "-stretch". If the BRANCH is master, will result in "latest-stretch" - # Also determines platforms to be build in docker step - - name: Determine Stretch Tag - run: | - if [[ -z ${TAG} ]]; then - REF=$(echo ${GITHUB_REF#refs/*/} | tr / -) - if [[ "${REF}" == "master" ]]; then - echo "STRETCH_TAG=latest-stretch" >> $GITHUB_ENV - else - echo "STRETCH_TAG=${REF}-stretch" >> $GITHUB_ENV - fi - else - echo "STRETCH_TAG=${TAG}-stretch" >> $GITHUB_ENV - fi - env: - TAG: ${{ github.event.inputs.tag }} - - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: | - blockstack/${{ github.event.repository.name }} - tags: | - type=ref,event=branch - type=ref,event=pr - ${{ env.STRETCH_TAG }} - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build/Tag/Push Image - uses: docker/build-push-action@v2 - with: - platforms: linux/amd64 - file: Dockerfile.stretch - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - STACKS_NODE_VERSION=${{ github.event.inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - # Only push if (a tag was passed in) or (we're building a non-master branch which isn't a PR) - push: ${{ github.event.inputs.tag != '' || (github.ref != 'refs/heads/master' && !contains(github.ref, 'refs/pull')) }} - - # Create a new release if we're building a tag - create-release: - runs-on: ubuntu-latest - if: ${{ github.event.inputs.tag != '' }} - outputs: - upload_url: ${{ steps.create_release.outputs.upload_url }} + - name: Extract branch name + id: extract_branch + if: ${{ github.event_name != 'pull_request' }} + run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + - name: Extract branch name + id: extract_branch_pr + if: ${{ github.event_name == 'pull_request' }} + run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF})" >> $GITHUB_ENV + - name: Branch name + run: echo running on branch ${{ env.BRANCH_NAME }} + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: TruffleHog Scan + id: trufflehog_check + uses: trufflesecurity/trufflehog@main + with: + path: ./ + base: ${{ env.BRANCH_NAME }} + head: HEAD + + ############################################### + ## Build Tagged Release + ############################################### + ## Build source binaries + ## Only run if: + ## - Tag is provided + ## - OR + ## - Not the default branch + ## - AND + ## - Not a PR + build-source: + if: ${{ inputs.tag != '' || (github.ref != format('refs/heads/{0}', github.event.repository.default_branch) && !contains(github.ref, 'refs/pull')) }} + name: Build Binaries + uses: stacks-network/stacks-blockchain/.github/workflows/build-source-binary.yml@master needs: - - dist - - build-publish - - build-publish-stretch - - steps: - - name: Create Release - id: create_release - uses: actions/create-release@v1 - env: - # Use custom secrets.GH_TOKEN instead of default secrets.GITHUB_TOKEN because the custom token will trigger the - # clarity-js-sdk-pr workflow. As events caused by default tokens do not trigger subsequent workflow runs to avoid loops. - GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - with: - tag_name: ${{ github.event.inputs.tag || github.ref }} - release_name: Release ${{ github.event.inputs.tag || github.ref }} - draft: false - prerelease: true - - # Upload distributables to a new release if we're building a tag or a tag was passed in - upload-dist: - runs-on: ubuntu-latest - if: ${{ github.event.inputs.tag != '' }} + - rustfmt + - release-tests + - leaked-cred-test + with: + tag: ${{ inputs.tag }} + parallel_jobs: 4 + arch: >- + ["linux-glibc-x64", "linux-musl-x64", "linux-glibc-arm64", "linux-musl-arm64", "macos-x64", "macos-arm64", "windows-x64"] + + ## Create github release with binary archives + ## Only run if: + ## - Tag is provided + ## - OR + ## - Not the default branch + ## - AND + ## - Not a PR + github-release: + if: ${{ inputs.tag != '' || (github.ref != format('refs/heads/{0}', github.event.repository.default_branch) && !contains(github.ref, 'refs/pull')) }} + name: Github Release + uses: stacks-network/stacks-blockchain/.github/workflows/github-release.yml@master + needs: build-source + with: + tag: ${{ inputs.tag }} + arch: >- + ["linux-glibc-x64", "linux-musl-x64", "linux-glibc-arm64", "linux-musl-arm64", "macos-x64", "macos-arm64", "windows-x64"] + secrets: + GH_TOKEN: ${{ secrets.GH_TOKEN }} + + ## Create docker alpine images + ## Only run if: + ## - Tag is provided + ## - OR + ## - Not the default branch + ## - AND + ## - Not a PR + docker-alpine: + if: ${{ inputs.tag != '' || (github.ref != format('refs/heads/{0}', github.event.repository.default_branch) && !contains(github.ref, 'refs/pull')) }} + name: Docker Alpine (Binary) + uses: stacks-network/stacks-blockchain/.github/workflows/image-build-alpine-binary.yml@master + needs: github-release + with: + tag: ${{ inputs.tag }} + docker_platforms: linux/arm64, linux/amd64, linux/amd64/v2, linux/amd64/v3 + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + + ## Create docker debian images + ## Only run if: + ## - Tag is provided + ## - OR + ## - Not the default branch + ## - AND + ## - Not a PR + docker-debian: + if: ${{ inputs.tag != '' || (github.ref != format('refs/heads/{0}', github.event.repository.default_branch) && !contains(github.ref, 'refs/pull')) }} + name: Docker Debian (Binary) + uses: stacks-network/stacks-blockchain/.github/workflows/image-build-debian-binary.yml@master + needs: github-release + with: + tag: ${{ inputs.tag }} + docker_platforms: linux/amd64, linux/amd64/v2, linux/amd64/v3 + linux_version: debian + build_type: binary + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + + ############################################### + ## Build Branch/PR + ############################################### + ## Create docker debian images + ## Only run if: + ## - Tag is *not* provided + build-branch: + if: ${{ inputs.tag == '' }} + name: Docker Debian (Source) + uses: stacks-network/stacks-blockchain/.github/workflows/image-build-debian-source.yml@master needs: - - create-release - strategy: - matrix: - platform: - [ - windows-x64, - macos-x64, - macos-arm64, - linux-x64, - linux-musl-x64, - linux-armv7, - linux-arm64, - ] - - steps: - - uses: actions/checkout@v2 - - name: Download distro - uses: actions/download-artifact@v2 - with: - name: ${{ matrix.platform }} - path: dist/ - - - name: Upload Release Asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.create-release.outputs.upload_url }} - asset_path: ./dist/${{ matrix.platform }}.zip - asset_name: ${{ matrix.platform }}.zip - asset_content_type: application/zip + - rustfmt + - leaked-cred-test + with: + docker_platforms: linux/amd64 + linux_version: debian + build_type: source + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} diff --git a/.github/workflows/clarity-js-sdk-pr.yml b/.github/workflows/clarity-js-sdk-pr.yml index fd28738cf1..9ac0956a85 100644 --- a/.github/workflows/clarity-js-sdk-pr.yml +++ b/.github/workflows/clarity-js-sdk-pr.yml @@ -5,7 +5,7 @@ name: Open Clarity JS SDK PR env: - CLARITY_JS_SDK_REPOSITORY: blockstack/clarity-js-sdk + CLARITY_JS_SDK_REPOSITORY: stacks-network/clarity-js-sdk COMMIT_USER: Hiro DevOps COMMIT_EMAIL: 45208873+blockstack-devops@users.noreply.github.com on: @@ -16,28 +16,30 @@ on: jobs: run: + name: Open Clarity JS SDK PR runs-on: ubuntu-latest # This condition can be removed once the main `stacks-blockchain` workflow creates pre-releases # when appropriate, instead of full releases for every tag passed in. if: "!contains(github.ref, '-rc')" steps: - name: Checkout latest clarity js sdk - uses: actions/checkout@v2 + id: git_checkout + uses: actions/checkout@v3 with: token: ${{ secrets.GH_TOKEN }} repository: ${{ env.CLARITY_JS_SDK_REPOSITORY }} ref: master - - name: Determine Release Version + id: get_release_version run: | RELEASE_VERSION=$(echo ${GITHUB_REF#refs/*/} | tr / -) echo "RELEASE_VERSION=$RELEASE_VERSION" >> $GITHUB_ENV - - name: Update SDK Tag + id: update_sdk_tag run: sed -i "s@CORE_SDK_TAG = \".*\"@CORE_SDK_TAG = \"$RELEASE_VERSION\"@g" packages/clarity-native-bin/src/index.ts - - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + id: create_pr + uses: peter-evans/create-pull-request@v4 with: token: ${{ secrets.GH_TOKEN }} commit-message: "chore: update clarity-native-bin tag" @@ -49,7 +51,7 @@ jobs: labels: | dependencies body: | - :robot: This is an automated pull request created from a new release in [stacks-blockchain](https://github.com/blockstack/stacks-blockchain/releases). + :robot: This is an automated pull request created from a new release in [stacks-blockchain](https://github.com/stacks-network/stacks-blockchain/releases). Updates the clarity-native-bin tag. assignees: zone117x diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml new file mode 100644 index 0000000000..1e6872bd69 --- /dev/null +++ b/.github/workflows/clippy.yml @@ -0,0 +1,44 @@ +# Disabled - this workflow needs more work so it's not incredibly chatty +## +## Perform Clippy checks - currently set to defaults +## https://github.com/rust-lang/rust-clippy#usage +## https://rust-lang.github.io/rust-clippy/master/index.html +## + +name: Clippy Checks + +# Only run when: +# - PRs are (re)opened against develop branch + +on: + pull_request: + branches: + - develop + types: + - opened + - reopened + +jobs: + clippy_check: + if: ${{ false }} + name: Clippy Check + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Define Rust Toolchain + id: define_rust_toolchain + run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV + - name: Setup Rust Toolchain + id: setup_rust_toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + components: clippy + - name: Clippy + id: clippy + uses: actions-rs/clippy-check@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + args: --all-features diff --git a/.github/workflows/docker-platforms.yml b/.github/workflows/docker-platforms.yml deleted file mode 100644 index 7ee44b3eda..0000000000 --- a/.github/workflows/docker-platforms.yml +++ /dev/null @@ -1,125 +0,0 @@ -name: Build/Release Additional Docker Platform Images - -# Only run when: -# - the workflow is automatically triggered during a release with the relevant tag -# - the workflow is started from the UI with a tag -on: - workflow_call: - inputs: - tag: - required: true - type: string - secrets: - DOCKERHUB_USERNAME: - required: true - DOCKERHUB_PASSWORD: - required: true - -env: - BUILD_PLATFORMS: linux/arm64 - -jobs: - # Build docker image, tag it with the branch and docker image tag passed in, and publish - build-publish: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: | - blockstack/${{ github.event.repository.name }} - tags: | - type=ref,event=branch - ${{ inputs.tag }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build/Tag/Push Image - uses: docker/build-push-action@v2 - with: - platforms: ${{ env.BUILD_PLATFORMS }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - push: true - - # Build docker image, tag it with the branch and docker image tag passed in, and publish - build-publish-stretch: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Determine Stretch Tag and build platforms - run: | - if [[ -z ${TAG} ]]; then - REF=$(echo ${GITHUB_REF#refs/*/} | tr / -) - if [[ "${REF}" == "master" ]]; then - echo "STRETCH_TAG=latest-stretch" >> $GITHUB_ENV - else - echo "STRETCH_TAG=${REF}-stretch" >> $GITHUB_ENV - fi - else - echo "STRETCH_TAG=${TAG}-stretch" >> $GITHUB_ENV - fi - env: - TAG: ${{ inputs.tag }} - - - name: Set Vars - run: | - echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV - echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Docker meta - id: meta - uses: docker/metadata-action@v3 - with: - images: | - blockstack/${{ github.event.repository.name }} - tags: | - type=ref,event=branch - ${{ env.STRETCH_TAG }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Build/Tag/Push Image - uses: docker/build-push-action@v2 - with: - platforms: ${{ env.BUILD_PLATFORMS }} - file: Dockerfile.stretch - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - push: true diff --git a/.github/workflows/docs-pr.yml b/.github/workflows/docs-pr.yml index ad293191b9..b2a44f7296 100644 --- a/.github/workflows/docs-pr.yml +++ b/.github/workflows/docs-pr.yml @@ -1,5 +1,5 @@ ## -## Github workflow for auto-opening a PR on the docs.blockstack repo +## Github workflow for auto-opening a PR on the stacks-network/docs repo ## whenever the auto-generated documentation here changes. ## ## It does this using a robot account `kantai-robot` to create a @@ -12,37 +12,44 @@ name: Open Docs PR env: ROBOT_OWNER: kantai-robot ROBOT_REPO: docs.blockstack - TARGET_OWNER: blockstack - TARGET_REPO: docs.blockstack - TARGET_REPOSITORY: blockstack/docs.blockstack + TARGET_OWNER: stacks-network + TARGET_REPO: docs + TARGET_REPOSITORY: stacks-network/docs + +# Only run when: +# - push to master + on: push: - branches: [master] + branches: + - master jobs: dist: + name: Open Docs PR runs-on: ubuntu-latest env: ROBOT_BRANCH: ${{ format('auto/clarity-ref-{0}', github.sha) }} steps: - - uses: actions/checkout@v2 - + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 - name: Build docs + id: build_docs env: DOCKER_BUILDKIT: 1 run: rm -rf docs-output && docker build -o docs-output -f ./.github/actions/docsgen/Dockerfile.docsgen . - - name: Checkout latest docs - uses: actions/checkout@v2 + id: git_checkout_docs + uses: actions/checkout@v3 with: token: ${{ secrets.DOCS_GITHUB_TOKEN }} repository: ${{ env.TARGET_REPOSITORY }} - path: docs.blockstack - + path: docs - name: Branch and commit id: push run: | - cd docs.blockstack + cd docs git config user.email "kantai+robot@gmail.com" git config user.name "PR Robot" git fetch --unshallow @@ -61,8 +68,9 @@ jobs: echo "::set-output name=open_pr::1" fi - name: Open PR + id: open_pr if: ${{ steps.push.outputs.open_pr == '1' }} - uses: actions/github-script@v2 + uses: actions/github-script@v6 with: github-token: ${{ secrets.DOCS_GITHUB_TOKEN }} script: | diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml new file mode 100644 index 0000000000..c0683f51df --- /dev/null +++ b/.github/workflows/github-release.yml @@ -0,0 +1,58 @@ +## +## Create the github release and store artifact files (with checksum) +## + +name: Github Release + +# Only run when: +# - manually triggered via the ci.yml workflow + +on: + workflow_call: + inputs: + tag: + required: true + type: string + arch: + description: "Stringified JSON object listing of platform matrix" + required: true + type: string + secrets: + GH_TOKEN: + required: true + +jobs: + create-release: + if: ${{ inputs.tag != '' }} + name: Create Release + runs-on: ubuntu-latest + steps: + - name: Download Artifacts + id: download_artifacts + uses: actions/download-artifact@v3 + with: + name: artifact + path: release + # Generate a checksums file to be added to the release page + - name: Generate Checksums + id: generate_checksum + uses: jmgilman/actions-generate-checksum@v1 + with: + output: CHECKSUMS.txt + patterns: | + release/*.zip + # Upload the release archives with the checksums file + - name: Upload Release + id: upload_release + uses: softprops/action-gh-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + with: + name: Release ${{ github.event.inputs.tag || github.ref }} + tag_name: ${{ github.event.inputs.tag || github.ref }} + draft: false + prerelease: true + fail_on_unmatched_files: true + files: | + release/*.zip + CHECKSUMS.txt diff --git a/.github/workflows/image-build-alpine-binary.yml b/.github/workflows/image-build-alpine-binary.yml new file mode 100644 index 0000000000..f5dc992380 --- /dev/null +++ b/.github/workflows/image-build-alpine-binary.yml @@ -0,0 +1,81 @@ +## +## Build the Docker Alpine image from the pre-built downloaded binary asset +## + +name: Build Alpine Binary Image + +# Only run when: +# - manually triggered via the ci.yml workflow + +on: + workflow_call: + inputs: + tag: + required: true + type: string + description: "semver tag for alpine images" + docker_platforms: + required: true + description: "Arch to buid alpine images" + type: string + secrets: + DOCKERHUB_USERNAME: + required: true + DOCKERHUB_PASSWORD: + required: true + +jobs: + image: + # Only run if a tag is provided manually + if: ${{ inputs.tag != '' }} + name: Build Image + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Set Vars + id: set_vars + run: | + echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up QEMU + id: docker_qemu + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + id: docker_buildx + uses: docker/setup-buildx-action@v2 + # tag image with: + # latest: `latest` + # input tag: `` + # git tag: `1234` + - name: Docker Metadata + id: docker_metadata + uses: docker/metadata-action@v4 + with: + images: | + blockstack/${{ github.event.repository.name }} + tags: | + type=raw,value=latest,enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) )}} + type=raw,value=${{ inputs.tag }},enable=${{ inputs.tag != '' }} + type=ref,event=tag,enable=true + - name: Login to DockerHub + id: docker_login + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Build and Push + id: docker_build + uses: docker/build-push-action@v3 + with: + file: ./.github/actions/dockerfiles/Dockerfile.alpine-binary + platforms: ${{ inputs.docker_platforms }} + tags: ${{ steps.docker_metadata.outputs.tags }} + labels: ${{ steps.docker_metadata.outputs.labels }} + build-args: | + TAG=${{ inputs.tag}} + STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + push: true diff --git a/.github/workflows/image-build-debian-binary.yml b/.github/workflows/image-build-debian-binary.yml new file mode 100644 index 0000000000..e1584abbc1 --- /dev/null +++ b/.github/workflows/image-build-debian-binary.yml @@ -0,0 +1,92 @@ +## +## Build the Docker Debian image from the pre-built downloaded binary asset +## + +name: Build Linux Binary Image + +# Only run when: +# - manually triggered via the ci.yml workflow + +on: + workflow_call: + inputs: + tag: + required: true + type: string + description: "semver tag for linux images" + docker_platforms: + required: true + description: "Arch to buid linux images" + type: string + linux_version: + required: true + description: "Linux image to build" + type: string + default: debian + build_type: + required: true + description: Build type (source/binary) + type: string + default: binary + secrets: + DOCKERHUB_USERNAME: + required: true + DOCKERHUB_PASSWORD: + required: true + +jobs: + image: + # Only run if a tag is provided manually + if: ${{ inputs.tag != '' }} + name: Build Image + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Set Vars + id: set_vars + run: | + echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up QEMU + id: docker_qemu + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + id: docker_buildx + uses: docker/setup-buildx-action@v2 + - name: Extract branch name + id: extract_branch + run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + # tag image with: + # branch name: `latest-` + # input tag: `-` + - name: Docker Metadata + id: docker_metadata + uses: docker/metadata-action@v4 + with: + images: | + blockstack/${{ github.event.repository.name }} + tags: | + type=raw,value=latest-${{ inputs.linux_version }},enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) )}} + type=raw,value=${{ inputs.tag }}-${{ inputs.linux_version }},enable=${{ inputs.tag != '' }} + - name: Login to DockerHub + id: docker_login + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Build and Push + id: docker_build + uses: docker/build-push-action@v3 + with: + file: ./.github/actions/dockerfiles/Dockerfile.${{ inputs.linux_version }}-${{ inputs.build_type }} + platforms: ${{ inputs.docker_platforms }} + tags: ${{ steps.docker_metadata.outputs.tags }} + labels: ${{ steps.docker_metadata.outputs.labels }} + build-args: | + TAG=${{ inputs.tag}} + STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + push: true diff --git a/.github/workflows/image-build-debian-source.yml b/.github/workflows/image-build-debian-source.yml new file mode 100644 index 0000000000..d60166e26c --- /dev/null +++ b/.github/workflows/image-build-debian-source.yml @@ -0,0 +1,90 @@ +## +## Build the Docker Debian image from source +## + +name: Build Linux Source Image + +# Only run when: +# - workflow is manually triggered +# - manually triggered via the ci.yml workflow + +on: + workflow_dispatch: + workflow_call: + inputs: + docker_platforms: + required: true + description: "Arch to buid images" + type: string + default: linux/amd64 + linux_version: + required: true + description: "Linux image to build" + type: string + default: debian + build_type: + required: true + description: Build type (source/binary) + type: string + default: source + secrets: + DOCKERHUB_USERNAME: + required: true + DOCKERHUB_PASSWORD: + required: true + +jobs: + image: + name: Build Image + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Set Vars + id: set_vars + run: | + echo "GITHUB_SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "GITHUB_REF_SHORT=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up QEMU + id: docker_qemu + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + id: docker_buildx + uses: docker/setup-buildx-action@v2 + - name: Extract branch name + id: extract_branch + if: ${{ github.event_name != 'pull_request' }} + run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_ENV + - name: Extract branch name (PR) + id: extract_branch_pr + if: ${{ github.event_name == 'pull_request' }} + run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF})" >> $GITHUB_ENV + - name: Docker Metadata + id: docker_metadata + uses: docker/metadata-action@v4 + with: + images: | + blockstack/${{ github.event.repository.name }} + tags: | + type=raw,value=${{ env.BRANCH_NAME }} + type=ref,event=pr + - name: Login to DockerHub + id: docker_login + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Build and Push + id: docker_build + uses: docker/build-push-action@v3 + with: + file: ./.github/actions/dockerfiles/Dockerfile.${{ inputs.linux_version }}-${{ inputs.build_type }} + platforms: ${{ inputs.docker_platforms }} + tags: ${{ steps.docker_metadata.outputs.tags }} + labels: ${{ steps.docker_metadata.outputs.labels }} + build-args: | + STACKS_NODE_VERSION=${{ env.GITHUB_SHA_SHORT }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + push: true diff --git a/.github/workflows/stacks-blockchain-tests.yml b/.github/workflows/stacks-blockchain-tests.yml new file mode 100644 index 0000000000..fb1dffc1ae --- /dev/null +++ b/.github/workflows/stacks-blockchain-tests.yml @@ -0,0 +1,117 @@ +## +## Run tests for tagged releases +## + +name: Tests + +# Only run when: +# - manually triggered via the ci.yml workflow + +on: + workflow_call: + +jobs: + # Run full genesis test + full-genesis: + name: Full Genesis Test + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Single full genesis integration test + id: full_genesis_test + env: + DOCKER_BUILDKIT: 1 + # Remove .dockerignore file so codecov has access to git info + run: | + rm .dockerignore + docker build -o coverage-output -f ./.github/actions/bitcoin-int-tests/Dockerfile.large-genesis . + - name: Large Genesis Codecov + id: full_genesis_codecov + uses: codecov/codecov-action@v3 + with: + files: ./coverage-output/lcov.info + name: large_genesis + fail_ci_if_error: false + + # Run unit tests with code coverage + unit-tests: + name: Unit Tests + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Run unit tests (with coverage) + id: unit_tests_codecov + env: + DOCKER_BUILDKIT: 1 + # Remove .dockerignore file so codecov has access to git info + run: | + rm .dockerignore + docker build -o coverage-output -f ./.github/actions/bitcoin-int-tests/Dockerfile.code-cov . + - name: Run unit tests + id: codedov + uses: codecov/codecov-action@v3 + with: + files: ./coverage-output/lcov.info + name: unit_tests + fail_ci_if_error: false + + open-api-validation: + name: OpenAPI Validation + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Run units tests (with coverage) + id: api_codecov + env: + DOCKER_BUILDKIT: 1 + run: docker build -o dist/ -f .github/actions/open-api/Dockerfile.open-api-validate . + - name: Upload bundled html + id: upload_html_artifact + uses: actions/upload-artifact@v3 + with: + name: open-api-bundle + path: | + dist + + # Run net-tests + nettest: + # disable this job/test for now, since we haven't seen this pass + # on github actions in a while, and the failures can take > 4 hours + if: ${{ false }} + name: Net-Test + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Run network relay tests + id: nettest + env: + DOCKER_BUILDKIT: 1 + run: docker build -f ./.github/actions/bitcoin-int-tests/Dockerfile.net-tests . + + # Core contract tests + core-contracts-clarinet-test: + name: Core Contracts Test + runs-on: ubuntu-latest + steps: + - name: Checkout the latest code + id: git_checkout + uses: actions/checkout@v3 + - name: Execute core contract unit tests in Clarinet + id: clarinet_unit_test + uses: docker://hirosystems/clarinet:1.1.0 + with: + args: test --coverage --manifest-path=./contrib/core-contract-tests/Clarinet.toml + - name: Export code coverage + id: clarinet_codecov + uses: codecov/codecov-action@v3 + with: + files: ./coverage.lcov + verbose: true diff --git a/CHANGELOG.md b/CHANGELOG.md index c28a30c5a3..5c9ad7b314 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,32 +5,178 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [Unreleased - Stacks 2.1] +## [2.4.0.0.0] +This is a **consensus-breaking** release to revert consensus to PoX, and is the second fork proposed in SIP-022. -This release will contain consensus-breaking changes. +- [SIP-022](https://github.com/stacksgov/sips/blob/main/sips/sip-022/sip-022-emergency-pox-fix.md) +- [SIP-024](https://github.com/stacksgov/sips/blob/main/sips/sip-024/sip-024-least-supertype-fix.md) + +### Fixed +- PoX is re-enabled and stacking resumes starting at Bitcoin block `791551` +- Peer network id is updated to `0x18000009` +- Adds the type sanitization described in SIP-024 + +This release is compatible with chainstate directories from 2.1.0.0.x and 2.3.0.0.x + +## [2.3.0.0.2] + +This is a high-priority hotfix release to address a bug in the +stacks-node miner logic which could impact miner availability. + +This release is compatible with chainstate directories from 2.3.0.0.x and 2.1.0.0.x + +## [2.3.0.0.1] + +This is a hotfix release to update: +- peer version identifier used by the stacks-node p2p network. +- yield interpreter errors in deser_hex + +This release is compatible with chainstate directories from 2.3.0.0.x and 2.1.0.0.x + +## [2.3.0.0.0] + +This is a **consensus-breaking** release to address a Clarity VM bug discovered in 2.2.0.0.1. +Tx and read-only calls to functions with traits as parameters are rejected with unchecked TypeValueError. +Additional context and rationale can be found in [SIP-023](https://github.com/stacksgov/sips/blob/main/sips/sip-023/sip-023-emergency-fix-traits.md). + +This release is compatible with chainstate directories from 2.1.0.0.x. + +## [2.2.0.0.1] + +This is a **consensus-breaking** release to address a bug and DoS vector in pox-2's `stack-increase` function. +Additional context and rationale can be found in [SIP-022](https://github.com/stacksgov/sips/blob/main/sips/sip-022/sip-022-emergency-pox-fix.md). + +This release is compatible with chainstate directories from 2.1.0.0.x. + +## [2.1.0.0.3] + +This is a high-priority hotfix release to address a bug in the +stacks-node miner logic which could impact miner availability. This +release's chainstate directory is compatible with chainstate +directories from 2.1.0.0.2. + +## [2.1.0.0.2] + +This software update is a hotfix to resolve improper unlock handling +in mempool admission. This release's chainstate directory is +compatible with chainstate directories from 2.1.0.0.1. + +### Fixed + +- Fix mempool admission logic's improper handling of PoX unlocks. This would + cause users to get spurious `NotEnoughFunds` rejections when trying to submit + their transactions (#3623) + +## [2.1.0.0.1] + +### Fixed + +- Handle the case where a bitcoin node returns zero headers (#3588) +- The default value for `always_use_affirmation_maps` is now set to `false`, + instead of `true`. This was preventing testnet nodes from reaching the chain + tip with the default configuration. +- Reduce default poll time of the `chain-liveness` thread which reduces the + possibility that a miner thread will get interrupted (#3610). + +## [2.1] + +This is a **consensus-breaking** release that introduces a _lot_ of new +functionality. Details on the how and why can be found in [SIP-015](https://github.com/stacksgov/sips/blob/feat/sip-015/sips/sip-015/sip-015-network-upgrade.md), +[SIP-018](https://github.com/MarvinJanssen/sips/blob/feat/signed-structured-data/sips/sip-018/sip-018-signed-structured-data.md), +and [SIP-20](https://github.com/obycode/sips/blob/bitwise-ops/sips/sip-020/sip-020-bitwise-ops.md). + +The changelog for this release is a high-level summary of these SIPs. ### Added -- Clarity function `stx-transfer?` now takes a 4th optional argument, which is a memo. -- Added a new parser which will be used to parse Clarity code beginning with 2.1, - resolving several bugs in the old parser and improving performance. -- Documentation will indicate explicitly which Clarity version introduced each - keyword or function. -- Clarity2 improvements to traits (see #3251 for details): +- There is a new `.pox-2` contract for implementing proof-of-transfer. This PoX + contract enables re-stacking while the user's STX are locked, and incrementing +the amount stacked on top of a locked batch of STX. +- The Clarity function `stx-account` has been added, which returns the account's + locked and unlocked balances. +- The Clarity functions `principal-destruct` and `principal-construct?` + functions have been added, which provide the means to convert between a +`principal` instance and the `buff`s and `string-ascii`s that constitute it. +- The Clarity function `get-burn-block-info?` has been added to support + fetching the burnchain header hash of _any_ burnchain block starting from the +sortition height of the Stacks genesis block, and to support fetching the PoX +addresses and rewards paid by miners for a particular burnchain block height. +- The Clarity function `slice` has been added for obtaining a sub-sequence of a + `buff`, `string-ascii`, `string-utf8`, or `list`. +- Clarity functions for converting between `string-ascii`, `string-utf8`, + `uint`, and `int` have been added. +- Clarity functions for converting between big- and little-endian +`buff` representations of `int` and `uint` have been added. +- The Clarity function `stx-transfer-memo?` has been added, which behaves the + same as `stx-transfer?` but also takes a memo argument. +- The Clarity function `is-standard` has been added to identify whether or not a + `principal` instance is a standard or contract principal. +- Clarity functions have been added for converting an arbitrary Clarity type to + and from its canonical byte string representation. +- The Clarity function `replace-at?` has been added for replacing a single item + in a `list`, `string-ascii`, `string-utf8`, or `buff`. +- The Clarity global variable `tx-sponsor?` has been added, which evaluates to + the sponsor of the transaction if the transaction is sponsored. +- The Clarity global variable `chain-id` has been added, which evaluates to the + 4-byte chain ID of this Stacks network. +- The Clarity parser has been rewritten to be about 3x faster than the parser in + Stacks 2.05.x.x.x. +- Clarity trait semantics have been refined and made more explicit, so as to + avoid certain corner cases where a trait reference might be downgraded to a +`principal` in Clarity 1. * Trait values can be passed to compatible sub-trait types * Traits can be embedded in compound types, e.g. `(optional )` * Traits can be assigned to a let-variable - Fixes to unexpected behavior in traits - * A trait with duplicate function names is now an error (#3214) - * Aliased trait names do not interfere with local trait definitions (#3215) + * A trait with duplicate function names is now an error + * Aliased trait names do not interfere with local trait definitions +- The comparison functions `<`, `<=`, `>`, and `>=` now work on `string-ascii`, + `string-utf8`, and `buff` based on byte-by-byte comparison (note that this is +_not_ lexicographic comparison). +- It is now possible to call `delegate-stx` from a burnchain transaction, just + as it is for `stack-stx` and `transfer-stx`. -## Upcoming +### Changed -### Added -- Added prometheus output for "transactions in last block" (#3138). -- Added envrionement variable `STACKS_LOG_FORMAT_TIME` to set the time format - stacks-node uses for logging. - Example: `STACKS_LOG_FORMAT_TIME="%Y-%m-%d %H:%M:%S" cargo stacks-node` +- The `delegate-stx` function in `.pox-2` can be called while the user's STX are + locked. +- If a batch of STX is not enough to clinch even a single reward slot, then the + STX are automatically unlocked at the start of the reward cycle in which they +are rendered useless in this capacity. +- The PoX sunset has been removed. PoX rewards will continue in perpetuity. +- Support for segwit and taproot addresses (v0 and v1 witness programs) has been + added for Stacking. +- The Clarity function `get-block-info?` now supports querying a block's total + burnchain spend by miners who tried to mine it, the spend by the winner, and +the total block reward (coinbase plus transaction fees). +- A block's coinbase transaction may specify an alternative recipient principal, + which can be either a standard or contract principal. +- A smart contract transaction can specify which version of Clarity to use. If + no version is given, then the epoch-default version will be used (in Stacks +2.1, this is Clarity 2). +- The Stacks node now includes the number of PoX anchor blocks in its + fork-choice rules. The best Stacks fork is the fork that (1) is on the best +Bitcoin fork, (2) has the most PoX anchor blocks known, and (3) is the longest. +- On-burnchain operations -- `stack-stx`, `delegate-stx`, and `transfer-stx` -- + can take effect within six (6) burnchain blocks in which they are mined, +instead of one. +- Transaction fees are debited from accounts _before_ the transaction is + processed. +- All smart contract analysis errors are now treated as runtime errors, meaning + that smart contract transactions which don't pass analysis will still be mined +(so miners get paid for partially validating them). +- The default Clarity version is now 2. Users can opt for version 1 by using + the new smart contract transaction wire format and explicitly setting version + +### Fixed + +- The authorization of a `contract-caller` in `.pox-2` for stacking will now + expire at the user-specified height, if given. +- The Clarity function `principal-of?` now works on mainnet. +- One or more late block-commits no longer result in the miner losing its + sortition weight. +- Documentation will indicate explicitly which Clarity version introduced each + keyword or function. ## [2.05.0.6.0] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..9c121b9dbf --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,587 @@ +# Contributing to the Stacks Blockchain + +The Stacks blockchain is open-source software written in Rust. Contributions +should adhere to the following best practices. + +Blockchain software development requires a much higher degree of rigor +than most other kinds of software. This is because with blockchains, +**there is no roll-back** from a bad deployment. There is essentially +zero room for consensus bugs. If you ship a consensus bug, that bug +could not only have catastrophic consequences for users (i.e. they +lose all their money), but also be intractable to fix, mitigate, or +remove. This is because unlike nearly every other kind of networked +software, **the state of the blockchain is what the users' computers +say it is.** If you want to make changes, you _must_ get _user_ +buy-in, and this is necessarily time-consuming and not at all +guaranteed to succeed. + +You can find information on joining online community forums (Discord, mailing list etc.) in the [README](README.md). + +# Code of Conduct + +This project and everyone participating in it is governed by this [Code of Conduct](CODE_OF_CONDUCT.md). + +# How Can I Contribute? + +## Development Workflow + +- For typical development, branch off of the `develop` branch. +- For consensus breaking changes, branch off of the `next` branch. +- For hotfixes, branch off of `master`. + +If you have commit access, use a branch in this repository. If you do +not, then you must use a github fork of the repository. + +### Branch naming + +Branch names should use a prefix that conveys the overall goal of the branch: + +- `feat/some-fancy-new-thing` for new features +- `fix/some-broken-thing` for hot fixes and bug fixes +- `docs/something-needs-a-comment` for documentation +- `ci/build-changes` for continuous-integration changes +- `test/more-coverage` for branches that only add more tests +- `refactor/formatting-fix` for refactors + +### Merging PRs from Forks + +PRs from forks or opened by contributors without commit access require +some special handling for merging. Any such PR, after being reviewed, +must get assigned to a contributor with commit access. This merge-owner +is responsible for: + +1. Creating a new branch in this repository based on the base branch + for the PR. +2. Retargeting the PR toward the new branch. +3. Merging the PR into the new branch. +4. Opening a new PR from `new_branch -> original_base` +5. Tagging reviewers for re-approval. +6. Merging the new PR. + +For an example of this process, see PRs +[#3598](https://github.com/stacks-network/stacks-blockchain/pull/3598) and +[#3626](https://github.com/stacks-network/stacks-blockchain/pull/3626). + + +### Documentation Updates + +- Any major changes should be added to the [CHANGELOG](CHANGELOG.md). +- Mention any required documentation changes in the description of your pull request. +- If adding an RPC endpoint, add an entry for the new endpoint to the + OpenAPI spec `./docs/rpc/openapi.yaml`. +- If your code adds or modifies any major features (struct, trait, + test, module, function, etc.), each should be documented according + to our [coding guidelines](#Coding-Guidelines). + +## Git Commit Messages +Aim to use descriptive git commit messages. We try to follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/). +The general format is as follows: +``` +[optional scope]: + +[optional body] +[optional footer(s)] +``` +Common types include build, ci, docs, fix, feat, test, refactor, etc. + +When a commit is addressing or related to a particular Github issue, it +should reference the issue in the commit message. For example: + +``` +fix: incorporate unlocks in mempool admitter, #3623 +``` + +# Creating and Reviewing PRs + +This section describes some best practices on how to create and review PRs in this context. The target audience is people who have commit access to this repository (reviewers), and people who open PRs (submitters). This is a living document -- developers can and should document their own additional guidelines here. + +## Overview + +Blockchain software development requires a much higher degree of rigor than most other kinds of software. This is because with blockchains, **there is no roll-back** from a bad deployment. + +Therefore, making changes to the codebase is necessarily a review-intensive process. No one wants bugs, but **no one can afford consensus bugs**. This page describes how to make and review _non-consensus_ changes. The process for consensus changes includes not only the entirety of this document, but also the [SIP process](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md). + +A good PR review sets both the submitter and reviewers up for success. It minimizes the time required by both parties to get the code into an acceptable state, without sacrificing quality or safety. Unlike most other software development practices, _safety_ is the primary concern. A PR can and will be delayed or closed if there is any concern that it will lead to unintended consensus-breaking changes. + +This document is formatted like a checklist. Each paragraph is one goal or action item that the reviewer and/or submitter must complete. The **key take-away** from each paragraph is bolded. + +## Reviewer Expectations + +The overall task of a reviewer is to create an **acceptance plan** for the submitter. This is simply the list of things that the submitter _must_ do in order for the PR to be merged. The acceptance plan should be coherent, cohesive, succinct, and complete enough that the reviewer will understand exactly what they need to do to make the PR worthy of merging, without further reviews. The _lack of ambiguity_ is the most important trait of an acceptance plan. + +Reviewers should **complete the review in one round**. The reviewer should provide enough detail to the submitter that the submitter can make all of the requested changes without further supervision. Whenever possible, the reviewer should provide all of these details publicly as comments, so that _other_ reviewers can vet them as well. If a reviewer _cannot_ complete the review in one round due to its size and complexity, then the reviewer may request that the PR be simplified or broken into multiple PRs. + +Reviewers should make use of Github's "pending comments" feature. This ensures that the review is "atomic": when the reviewer submits the review, all the comments are published at once. + +Reviewers should aim to **perform a review in one sitting** whenever possible. This enables a reviewer to time-box their review, and ensures that by the time they finish studying the patch, they have a complete understanding of what the PR does in their head. This, in turn, sets them up for success when writing up the acceptance plan. It also enables reviewers to mark time for it on their calendars, which helps everyone else develop reasonable expectations as to when things will be done. + +Code reviews should be timely. Reviewers should start no more than +**2 business days** after reviewers are assigned. This applies to each +reviewer: i.e., we expect all reviewers to respond within two days. +The `develop` and `next` branches in particular often change quickly, +so letting a PR languish only creates more merge work for the +submitter. If a review cannot be started within this timeframe, then +the reviewers should **tell the submitter when they can begin**. This +gives the reviewer the opportunity to keep working on the PR (if +needed) or even withdraw and resubmit it. + +Reviewers must, above all else, **ensure that submitters follow the PR checklist** below. + +**As a reviewer, if you do not understand the PR's code or the potential consequences of the code, it is the submitter's responsibility to simplify the code, provide better documentation, or withdraw the PR.** + +## Submitter Expectations + +Everyone is busy all the time with a host of different tasks. Consequently, a PR's size and scope should be constrained so that **a review can be written for it no more than 2 hours.** This time block starts when the reviewer opens the patch, and ends when the reviewer hits the "submit review" button. If it takes more than 2 hours, then the PR should be broken into multiple PRs unless the reviewers agree to spend more time on it. A PR can be rejected if the reviewers believe they will need longer than this. + +The size and scale of a PR depend on the reviewers' abilities to process the change. Different reviewers and submitters have different levels of familiarity with the codebase. Moreover, everyone has a different schedule -- sometimes, some people are more busy than others. + +A successful PR submitter **takes the reviewers' familiarity and availability into account** when crafting the PR, even going so far as to ask in advance if a particular person could be available for review. + +Providing detailed answers to reviewer questions is often necessary as a submitter. In order to make this information accessible even after a PR has merged, **submitters should strive to incorporate any clarifications into code comments**. + +**Selecting Reviewers**. PR submitters may tag reviewers that they +think are relevant to the code changes in the PR (or using the +reviewer suggestions provided by Github). If a PR is submitted without +assigned reviewers, then reviewers will be assigned at least by the next +Weekly Blockchain Engineering Meeting (information can be found in Discord). + +## Submission Checklist + +A PR submission's text should **answer the following questions** for reviewers: + +* What problem is being solved by this PR? +* What does the solution do to address them? +* Why is this the best solution? What alternatives were considered, and why are they worse? +* What do reviewers need to be familiar with in order to provide useful feedback? +* What issue(s) are addressed by this PR? +* What are some hints to understanding some of the more intricate or clever parts of the PR? +* Does this PR change any database schemas? Does a node need to re-sync from genesis when this PR is applied? + +In addition, the PR submission should **answer the prompts of the Github template** we use for PRs. + +The code itself should adhere to our coding guidelines and conventions, which both submitters and reviewers should check. + +# Coding Conventions + +### Simplicity of implementation + +The most important consideration when accepting or rejecting a contribution is +the simplicity (i.e. ease of understanding) of its implementation. +Contributions that are "clever" or introduce functionality beyond the scope of +the immediate problem they are meant to solve will be rejected. + +#### Type simplicity + +Simplicity of implementation includes simplicity of types. Type parameters +and associated types should only be used if there are at +least two possible implementations of those types. + +Lifetime parameters should only be introduced if the compiler cannot deduce them +on its own. + +### Builds with a stable Rust compiler + +We use a recent, stable Rust compiler. Contributions should _not_ +require nightly Rust features to build and run. + +### Minimal dependencies + +Adding new package dependencies is very much discouraged. Exceptions will be +granted on a case-by-case basis, and only if deemed absolutely necessary. + +### Minimal global macros + +Adding new global macros is discouraged. Exceptions will only be given if +absolutely necessary. + +### No compiler warnings + +Contributions should not trigger compiler warnings if possible, and should not +mask compiler warnings with macros. + +### Minimal `unsafe` code + +Contributions should not contain `unsafe` blocks if at all possible. + +# Coding Guidelines + +## Documentation + +* Each file must have a **copyright statement**. +* Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole ([example](https://github.com/stacks-network/stacks-blockchain/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L17)). +* Any new files must have some **top-of-file documentation** that describes what the contained code does, and how it fits into the overall module. + +Within the source files, the following **code documentation** standards are expected: + +* Each public function, struct, enum, and trait should have a Rustdoc comment block describing the API contract it offers. This goes for private structs and traits as well. +* Each _non-trivial_ private function should likewise have a Rustdoc comment block. Trivial ones that are self-explanatory, like getters and setters, do not need documentation. If you are unsure if your function needs a docstring, err on the side of documenting it. +* Each struct and enum member must have a Rustdoc comment string indicating what it does, and how it is used. This can be as little as a one-liner, as long as the relevant information is communicated. + +## Factoring + +* **Each non-`mod.rs` file implements at most one subsystem**. It may include multiple struct implementations and trait implementations. The filename should succinctly identify the subsystem, and the file-level documentation must succinctly describe it and how it relates to other subsystems it interacts with. + +* Directories represent collections of related but distinct subsystems. + +* To the greatest extent possible, **business logic and I/O should be + separated**. A common pattern used in the codebase is to place the + business logic into an "inner" function that does not do I/O, and + handle I/O reads and writes in an "outer" function. The "outer" + function only does the needful I/O and passes the data into the + "inner" function. The "inner" function is often private, whereas + the "outer" function is often public. For example, [`inner_try_mine_microblock` and `try_mine_microblock`](https://github.com/stacks-network/stacks-blockchain/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L1148-L1216). + +## Refactoring + +* **Any PR that does a large-scale refactoring must be in its own PR**. This includes PRs that touch multiple subsystems. Refactoring often adds line noise that obscures the new functional changes that the PR proposes. Small-scale refactorings are permitted to ship with functional changes. + +* Refactoring PRs can generally be bigger, because they are easier to review. However, **large refactorings that could impact the functional behavior of the system should be discussed first** before carried out. This is because it is imperative that they do not stay open for very long (to keep the submitter's maintenance burden low), but nevertheless reviewing them must still take at most 2 hours. Discussing them first front-loads part of the review process. + +## Databases + +* If at all possible, **the database schema should be preserved**. Exceptions can be made on a case-by-case basis. The reason for this is that it's a big ask for people to re-sync nodes from genesis when they upgrade to a new point release. + +* Any changes to a database schema must also ship with a **new schema version and new schema migration logic**, as well as _test coverage_ for it. + +* The submitter must verify that **any new database columns are indexed**, as relevant to the queries performed on them. Table scans are not permitted if they can be avoided (and they almost always can be). You can find table scans manually by setting the environment variable `BLOCKSTACK_DB_TRACE` when running your tests (this will cause every query executed to be preceded by the output of `EXPLAIN QUERY PLAN` on it). + +* Database changes **cannot be consensus-critical** unless part of a hard fork (see below). + +* If the database schema changes and no migration can be feasibly done, then the submitter **must spin up a node from genesis to verify that it works** _before_ submitting the PR. This genesis spin-up will be tested again before the next node release is made. + +## Data Input + +* **Data from the network, from Bitcoin, and from the config file is untrusted.** Code that ingests such data _cannot assume anything_ about its structure, and _must_ handle any possible byte sequence that can be submitted to the Stacks node. + +* **Data previously written to disk by the node is trusted.** If data loaded from the database that was previously stored by the node is invalid or corrupt, it is appropriate to panic. + +* **All input processing is space-bound.** Every piece of code that ingests data must impose a maximum size on its byte representation. Any inputs that exceed this size _must be discarded with as little processing as possible_. + +* **All input deserialization is resource-bound.** Every piece of code + that ingests data must impose a maximum amount of RAM and CPU + required to decode it into a structured representation. If the data + does not decode with the allotted resources, then no further + processing may be done and the data is discarded. For an example, see + how the parsing functions in the http module use `BoundReader` and + `MAX_PAYLOAD_LEN` in [http.rs](https://github.com/stacks-network/stacks-blockchain/blob/4852d6439b473e24705f14b8af637aded33cb422/src/net/http.rs#L2260-L2285). + +* **All network input reception is time-bound.** Every piece of code that ingests data _from the network_ must impose a maximum amount of time that ingestion can take. If the data takes too long to arrive, then it must be discarded without any further processing. There is no time bound for data ingested from disk or passed as an argument; this requirement is meant by the space-bound requirement. + +* **Untrusted data ingestion must not panic.** Every piece of code that ingests untrusted data must gracefully handle errors. Panicking failures are forbidden for such data. Panics are only allowed if the ingested data was previously written by the node (and thus trusted). + +## Non-consensus Changes to Blocks, Microblocks, Transactions, and Clarity + +Any changes to code that alters how a block, microblock, or transaction is processed by the node should be **treated as a breaking change until proven otherwise**. This includes changes to the Clarity VM. The reviewer _must_ flag any such changes in the PR, and the submitter _must_ convince _all_ reviewers that they will _not_ break consensus. + +Changes that touch any of these four code paths must be treated with the utmost care. If _any_ core developer suspects that a given PR would break consensus, then they _must_ act to prevent the PR from merging. + +## Changes to the Peer Network + +Any changes to the peer networking code **must be run on both mainnet and testnet before the PR can be merged.** The submitter should set up a testable node or set of nodes that reviewers can interact with. + +Changes to the peer network should be deployed incrementally and tested by multiple parties when possible to verify that they function properly in a production setting. + +## Performance Improvements + +Any PRs that claim to improve performance **must ship with reproducible benchmarks** that accurately measure the improvement. This data must also be reported in the PR submission. + +For an example, see [PR #3075](https://github.com/stacks-network/stacks-blockchain/pull/3075). + +## Error Handling + +* **Results must use `Error` types**. Fallible functions in the +codebase must use `Error` types in their `Result`s. If a new module's +errors are sufficiently different from existing `Error` types in the +codebaes, the new module must define a new `Error` type. Errors that +are caused by other `Error` types should be wrapped in a variant of +the new `Error` type. You should provide conversions via a `From` +trait implementation. + +* Functions that act on externally-submitted data **must never panic**. This includes code that acts on incoming network messages, blockchain data, and burnchain (Bitcoin) data. + +* **Runtime panics should be used sparingly**. Generally speaking, a runtime panic is only appropriate if there is no reasonable way to recover from the error condition. For example, this includes (but is not limited to) disk I/O errors, database corruption, and unreachable code. + +* If a runtime panic is desired, it **must have an appropriate error message**. + +## Logging + +* Log messages should be informative and context-free as possible. They are used mainly to help us identify and diagnose problems. They are _not_ used to help you verify that your code works; that's the job of a unit test. + +* **DO NOT USE println!() OR eprintln!()**. Instead, use the logging macros (`test_debug!()`, `trace!()`, `debug!()`, `info!()`, `warn!()`, `error!()`). + +* Use **structured logging** to include dynamic data in your log entry. For example, `info!("Append block"; "block_id" => %block_id)` as opposed to `info!("Append block with block_id = {}", block_id)`. + +* Use `trace!()` and `test_debug!()` liberally. It only runs in tests. + +* Use `debug!()` for information that is relevant for diagnosing problems at runtime. This is off by default, but can be turned on with the `BLOCKSTACK_DEBUG` environment variable. + +* Use `info!()` sparingly. + +* Use `warn!()` or `error!()` only when there really is a problem. + +## Consensus-Critical Code + +A **consensus-critical change** is a change that affects how the Stacks blockchain processes blocks, microblocks, or transactions, such that a node with the patch _could_ produce a different state root hash than a node without the patch. If this is even _possible_, then the PR is automatically treated as a consensus-critical change and must ship as part of a hard fork. It must also be described in a SIP. + +* **All changes to consensus-critical code must be opened against `next`**. It is _never acceptable_ to open them against `develop` or `master`. + +* **All consensus-critical changes must be gated on the Stacks epoch**. They may only take effect once the system enters a specific epoch (and this must be documented). + +A non-exhaustive list of examples of consensus-critical changes include: + +* Adding or changing block, microblock, or transaction wire formats +* Changing the criteria under which a burnchain operation will be accepted by the node +* Changing the data that gets stored to a MARF key/value pair in the Clarity or Stacks chainstate MARFs +* Changing the order in which data gets stored in the above +* Adding, changing, or removing Clarity functions +* Changing the cost of a Clarity function +* Adding new kinds of transactions, or enabling certain transaction data field values that were previously forbidden. + +## Testing + +* **Unit tests should focus on the business logic with mocked data**. To the greatest extent possible, each error path should be tested _in addition to_ the success path. A submitter should expect to spend most of their test-writing time focusing on error paths; getting the success path to work is often much easier than the error paths. + +* **Unit tests should verify that the I/O code paths work**, but do so in a way that does not "clobber" other tests or prevent other tests from running in parallel (if it can be avoided). This means that unit tests should use their own directories for storing transient state (in `/tmp`), and should bind on ports that are not used anywhere else. + +* If randomness is needed, **tests should use a seeded random number generator if possible**. This ensures that they will reliably pass in CI. + +* When testing a consensus-critical code path, the test coverage should verify that the new behavior is only possible within the epoch(s) in which the behavior is slated to activate. Above all else, **backwards-compatibility is a hard requirement.** + +* **Integration tests are necessary when the PR has a consumer-visible effect**. For example, changes to the RESTful API, event stream, and mining behavior all require integration tests. + +* Every consensus-critical change needs an integration test to verify that the feature activates only when the hard fork activates. + +PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel +(which is the default operation of the `cargo test` command), these tests should be decorated with `#[ignore]`. + +A test should be marked `#[ignore]` if: + + 1. It does not _always_ pass `cargo test` in a vanilla environment + (i.e., it does not need to run with `--test-threads 1`). + + 2. Or, it runs for over a minute via a normal `cargo test` execution + (the `cargo test` command will warn if this is not the case). + + + +## Formatting + +This repository uses the default rustfmt formatting style. PRs will be checked against `rustfmt` and will _fail_ if not +properly formatted. + +You can check the formatting locally via: + +```bash +cargo fmt --all -- --check --config group_imports=StdExternalCrate +``` + +You can automatically reformat your commit via: + +```bash +cargo fmt --all -- --config group_imports=StdExternalCrate +``` + +## Comments +Comments are very important for the readability and correctness of the codebase. The purpose of comments is: + +* Allow readers to understand the roles of components and functions without having to check how they are used. +* Allow readers to check the correctness of the code against the comments. +* Allow readers to follow tests. + +In the limit, if there are no comments, the problems that arise are: + +* Understanding one part of the code requires understanding *many* parts of the code. This is because the reader is forced to learn the meanings of constructs inductively through their use. Learning how one construct is used requires understanding its neighbors, and then their neighbors, and so on, recursively. Instead, with a good comment, the reader can understand the role of a construct with `O(1)` work by reading the comment. +* The user cannot be certain if there is a bug in the code, because there is no distinction between the contract of a function, and its definition. +* The user cannot be sure if a test is correct, because the logic of the test is not specified, and the functions do not have contracts. + +### Comment Formatting + +Comments are to be formatted in typical `rust` style, specifically: + +- Use markdown to format comments. + +- Use the triple forward slash "///" for modules, structs, enums, traits and functions. Use double forward slash "//" for comments on individual lines of code. + +- Start with a high-level description of the function, adding more sentences with details if necessary. + +- When documenting panics, errors, or other conceptual sections, introduce a Markdown section with a single `#`, e.g.: + + ```rust + # Errors + * ContractTooLargeError: Thrown when `contract` is larger than `MAX_CONTRACT_SIZE`. + ``` + +### Content of Comments + + +#### Component Comments + +Comments for a component (`struct`, `trait`, or `enum`) should explain what the overall +purpose of that component is. This is usually a concept, and not a formal contract. Include anything that is not obvious about this component. + +**Example:** + +```rust +/// The `ReadOnlyChecker` analyzes a contract to determine whether +/// there are any violations of read-only declarations. By a "violation" +/// we mean a function that is marked as "read only" but which tries +/// to modify chainstate. +pub struct ReadOnlyChecker<'a, 'b> { +``` + +This comment is considered positive because it explains the concept behind the class at a glance, so that the reader has some idea about what the methods will achieve, without reading each method declaration and comment. It also defines some terms that can be used in the comments on the method names. + +#### Function Comments + +The comments on a function should explain what the function does, without having to read it. Wherever practical, it should specify the contract of a function, such that a bug in the logic could be discovered by a discrepancy between contract and implementation, or such that a test could be written with only access to the function comment. + +Without being unnecessarily verbose, explain how the output is calculated +from the inputs. Explain the side effects. Explain any restrictions on the inputs. Explain failure +conditions, including when the function will panic, return an error +or return an empty value. + +**Example:** + +```rust +/// A contract that does not violate its read-only declarations is called +/// *read-only correct*. +impl<'a, 'b> ReadOnlyChecker<'a, 'b> { + /// Checks each top-level expression in `contract_analysis.expressions` + /// for read-only correctness. + /// + /// Returns successfully iff this function is read-only correct. + /// + /// # Errors + /// + /// - Returns CheckErrors::WriteAttemptedInReadOnly if there is a read-only + /// violation, i.e. if some function marked read-only attempts to modify + /// the chainstate. + pub fn run(&mut self, contract_analysis: &ContractAnalysis) -> CheckResult<()> +``` + +This comment is considered positive because it explains the contract of the function in pseudo-code. Someone who understands the constructs mentioned could, e.g., write a test for this method from this description. + +#### Comments on Implementations of Virtual Methods + +Note that, if a function implements a virtual function on an interface, the comments should not +repeat what was specified on the interface declaration. The comment should only add information specific to that implementation. + +#### Data Member Comments + +Each data member in a struct should have a comment describing what that member +is, and what it is used for. Such comments are usually brief but should +clear up any ambiguity that might result from having only the variable +name and type. + +**Example:** + +```rust +pub struct ReadOnlyChecker<'a, 'b> { + /// Mapping from function name to a boolean indicating whether + /// the function with that name is read-only. + /// This map contains all functions in the contract analyzed. + defined_functions: HashMap, +``` + +This comment is considered positive because it clarifies users might have about the content and role of this member. E.g., it explains that the `bool` indicates whether the function is *read-only*, whereas this cannot be gotten from the signature alone. + +#### Test Comments + +Each test should have enough comments to help an unfamiliar reader understand: + +1. what is conceptually being tested +1. why a given answer is expected + +Sometimes this can be obvious without much comments, perhaps from the context, +or because the test is very simple. Often though, comments are necessary. + +**Example:** + +```rust +#[test] +#[ignore] +/// The purpose of this test is to check if the mempool admission checks +/// for the post tx endpoint are working as expected wrt the optional +/// `mempool_admission_check` query parameter. +/// +/// In this test, we are manually creating a microblock as well as +/// reloading the unconfirmed state of the chainstate, instead of relying +/// on `next_block_and_wait` to generate microblocks. We do this because +/// the unconfirmed state is not automatically being initialized +/// on the node, so attempting to validate any transactions against the +/// expected unconfirmed state fails. +fn transaction_validation_integration_test() { +``` + +This comment is considered positive because it explains the purpose of the test (checking the case of an optional parameter), it also guides the reader to understand the low-level details about why a microblock is created manually. + +### How Much to Comment + +Contributors should strike a balance between commenting "too much" and commenting "too little". Commenting "too much" primarily includes commenting things that are clear from the context. Commenting "too little" primarily includes writing no comments at all, or writing comments that leave important questions unresolved. + +Human judgment and creativity must be used to create good comments, which convey important information with small amounts of text. There is no single rule which can determine what a good comment is. Longer comments are *not* always better, since needlessly long comments have a cost: they require the reader to read more, take up whitespace, and take longer to write and review. + +### Don't Restate Names in Comments + +The contracts of functions should be implemented precisely enough that tests could be written looking only at the declaration and the comments (and without looking at the definition!). However: + +* **the author should assume that the reader has already read and understood the function name, variable names, type names, etc.** +* **the author should only state information that is new** + +So, if a function and its variables have very descriptive names, then there may be nothing to add in the comments at all! + +**Bad Example** + +```rust +/// Appends a transaction to a block. +fn append_transaction_to_block(transaction:Transaction, &mut Block) -> Result<()> +``` + +This is considered bad because the function name already says "append transaction to block", so it doesn't add anything to restate it in the comments. However, *do* add anything that is not redundant, such as elaborating what it means to "append" (if there is more to say), or what conditions will lead to an error. + +**Good Example** + +```rust +/// # Errors +/// +/// - BlockTooBigError: Is returned if adding `transaction` to `block` results +/// in a block size bigger than MAX_BLOCK_SIZE. +fn append_transaction_to_block(transaction:Transaction, block:&mut Block) -> Result<()> +``` + +This is considered good because the reader builds on the context created by the function and variable names. Rather than restating them, the function just adds elements of the contract that are not implicit in the declaration. + +### Do's and Dont's of Comments + +*Don't* over-comment by documenting things that are clear from the context. E.g.: + +- Don't document the types of inputs or outputs, since these are parts of the type signature in `rust`. +- Don't necessarily document standard "getters" and "setters", like `get_clarity_version()`, unless there is unexpected information to add with the comment. +- Don't explain that a specific test does type-checking, if it is in a file that is dedicated to type-checking. + +*Do* document things that are not clear, e.g.: + +- For a function called `process_block`, explain what it means to "process" a block. +- For a function called `process_block`, make clear whether we mean anchored blocks, microblocks, or both. +- For a function called `run`, explain the steps involved in "running". +- For a function that takes arguments `peer1` and `peer2`, explain the difference between the two. +- For a function that takes an argument `height`, either explain in the comment what this is the *height of*. Alternatively, expand the variable name to remove the ambiguity. +- For a test, document what it is meant to test, and why the expected answers are, in fact, expected. + +### Changing Code Instead of Comments + +Keep in mind that better variable names can reduce the need for comments, e.g.: + +* `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height +* `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks +* `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment + +# Licensing and contributor license agreement + +`stacks-blockchain` is released under the terms of the GPL version 3. Contributions +that are not licensed under compatible terms will be rejected. Moreover, +contributions will not be accepted unless _all_ authors accept the project's +contributor license agreement. + +## Use of AI-code Generation +The Stacks Foundation has a very strict policy of not accepting AI-generated code PRs due to uncertainly about licensing issues. diff --git a/Cargo.lock b/Cargo.lock index 51df734608..841302aee0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ "gimli", ] @@ -29,7 +29,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] @@ -85,18 +85,27 @@ checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "anyhow" -version = "1.0.53" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0" +checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "assert-json-diff" @@ -116,14 +125,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "async-channel" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ "concurrent-queue", "event-listener", @@ -142,31 +151,30 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" +checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" dependencies = [ + "async-lock", "async-task", "concurrent-queue", "fastrand", "futures-lite", - "once_cell", "slab", ] [[package]] name = "async-global-executor" -version = "2.0.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", "async-io", - "async-mutex", + "async-lock", "blocking", "futures-lite", - "num_cpus", "once_cell", ] @@ -188,46 +196,38 @@ dependencies = [ [[package]] name = "async-io" -version = "1.6.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a811e6a479f2439f0c04038796b5cfb3d2ad56c230e0f2d3f7b04d68cfee607b" +checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" dependencies = [ + "async-lock", + "autocfg", "concurrent-queue", "futures-lite", "libc", "log", - "once_cell", "parking", "polling", "slab", "socket2", "waker-fn", - "winapi 0.3.9", + "windows-sys 0.42.0", ] [[package]] name = "async-lock" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-mutex" -version = "1.4.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", ] [[package]] name = "async-std" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ "async-attributes", "async-channel", @@ -243,7 +243,6 @@ dependencies = [ "kv-log-macro", "log", "memchr", - "num_cpus", "once_cell", "pin-project-lite", "pin-utils", @@ -253,15 +252,15 @@ dependencies = [ [[package]] name = "async-task" -version = "4.1.0" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d306121baf53310a3fd342d88dc0824f6bbeace68347593658525565abee8" +checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "atomic-waker" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" +checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" [[package]] name = "atty" @@ -269,22 +268,22 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi 0.3.9", ] [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.64" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", @@ -297,9 +296,9 @@ dependencies = [ [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base64" @@ -309,9 +308,15 @@ checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "bitflags" @@ -337,16 +342,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] @@ -360,16 +365,16 @@ dependencies = [ [[package]] name = "blocking" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046e47d4b2d391b1f6f8b407b1deb8dee56c1852ccd868becf2710f601b5f427" +checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" dependencies = [ "async-channel", + "async-lock", "async-task", "atomic-waker", "fastrand", "futures-lite", - "once_cell", ] [[package]] @@ -393,15 +398,15 @@ dependencies = [ "rand_chacha 0.2.2", "regex", "ripemd", - "rstest", - "rstest_reuse", + "rstest 0.17.0", + "rstest_reuse 0.5.0", "rusqlite", - "secp256k1 0.24.2", + "secp256k1", "serde", "serde_derive", "serde_json", "serde_stacker", - "sha2 0.10.2", + "sha2 0.10.6", "sha3", "siphasher", "slog", @@ -415,18 +420,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", -] - [[package]] name = "buf_redux" version = "0.8.4" @@ -439,9 +432,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byte-tools" @@ -457,30 +450,21 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" - -[[package]] -name = "cache-padded" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "cast" -version = "0.2.7" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" -dependencies = [ - "rustc_version 0.4.0", -] +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.72" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cfg-if" @@ -496,14 +480,16 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.45", + "wasm-bindgen", "winapi 0.3.9", ] @@ -513,7 +499,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] @@ -537,8 +523,8 @@ dependencies = [ "rand 0.7.3", "rand_chacha 0.2.2", "regex", - "rstest", - "rstest_reuse", + "rstest 0.17.0", + "rstest_reuse 0.5.0", "rusqlite", "serde", "serde_derive", @@ -552,20 +538,30 @@ dependencies = [ [[package]] name = "clear_on_drop" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9cc5db465b294c3fa986d5bbb0f3017cd850bff6dd6c52f9ccff8b4d21b7b08" +checksum = "38508a63f4979f0048febc9966fadbd48e5dab31fd0ec6a3f151bbf4a74f7423" dependencies = [ "cc", ] +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + [[package]] name = "concurrent-queue" -version = "1.2.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" dependencies = [ - "cache-padded", + "crossbeam-utils", ] [[package]] @@ -581,21 +577,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" dependencies = [ "aes-gcm", - "base64 0.13.0", + "base64 0.13.1", "hkdf", "hmac", "percent-encoding", - "rand 0.8.4", + "rand 0.8.5", "sha2 0.9.9", "time 0.2.27", "version_check", ] +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -608,18 +610,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "crc32fast" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "criterion" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" +checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", @@ -643,9 +645,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" +checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" dependencies = [ "cast", "itertools", @@ -653,9 +655,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.2" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -663,9 +665,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", @@ -674,34 +676,33 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.7" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c00d6d2ea26e8b151d99093005cb442fb9a37aeaca582a03ec70946f49ab5ed9" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ + "autocfg", "cfg-if 1.0.0", "crossbeam-utils", - "lazy_static", - "memoffset", + "memoffset 0.8.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.7" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e5bed1f1c269533fa816a0a5492b3545209a205ca1a54842be180eb63a16a6" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if 1.0.0", - "lazy_static", ] [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "typenum", ] @@ -711,19 +712,18 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "subtle", ] [[package]] name = "csv" -version = "1.1.6" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" dependencies = [ - "bstr", "csv-core", - "itoa 0.4.8", + "itoa", "ryu", "serde", ] @@ -739,12 +739,12 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.21" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccc0a48a9b826acdf4028595adc9db92caea352f7af011a3034acd172a52a0aa" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -770,6 +770,50 @@ dependencies = [ "zeroize", ] +[[package]] +name = "cxx" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn 1.0.109", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "digest" version = "0.8.1" @@ -785,16 +829,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", ] [[package]] name = "digest" -version = "0.10.3" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.4", "crypto-common", ] @@ -840,24 +884,45 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encoding_rs" -version = "0.8.30" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "event-listener" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "extend" @@ -868,7 +933,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -891,9 +956,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -906,11 +971,10 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -930,11 +994,26 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +[[package]] +name = "futures" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -942,15 +1021,26 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" + +[[package]] +name = "futures-executor" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" @@ -967,26 +1057,45 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-macro" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.12", +] + [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" + +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ + "futures-channel", "futures-core", "futures-io", + "futures-macro", "futures-sink", "futures-task", "memchr", @@ -1006,9 +1115,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -1027,13 +1136,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -1048,15 +1157,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "gloo-timers" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d12a7f4e95cfe710f1d624fb1210b7d961a5fb05c4fd942f4feab06e61f590e" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", @@ -1066,9 +1175,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.11" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f1f717ddc7b2ba36df7e871fd88db79326551d3d6f1fc406fbfd28b582ff8e" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes", "fnv", @@ -1100,9 +1209,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashlink" @@ -1115,18 +1224,18 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c84c647447a07ca16f5fbd05b633e535cc41a08d2d74ab1e08648df53be9cb89" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "bitflags", "bytes", "headers-core", "http", "httpdate", "mime", - "sha-1", + "sha1 0.10.5", ] [[package]] @@ -1147,6 +1256,15 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + [[package]] name = "hkdf" version = "0.10.0" @@ -1169,20 +1287,20 @@ dependencies = [ [[package]] name = "http" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa", ] [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -1198,7 +1316,7 @@ dependencies = [ "anyhow", "async-channel", "async-std", - "base64 0.13.0", + "base64 0.13.1", "cookie", "futures-lite", "infer", @@ -1213,9 +1331,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.5.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -1225,9 +1343,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.16" +version = "0.14.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" +checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" dependencies = [ "bytes", "futures-channel", @@ -1238,7 +1356,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 0.4.8", + "itoa", "pin-project-lite", "socket2", "tokio", @@ -1249,9 +1367,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", @@ -1260,25 +1378,48 @@ dependencies = [ "tokio-rustls", ] +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi 0.3.9", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "idna" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" -version = "1.8.0" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", - "hashbrown 0.11.2", + "hashbrown 0.12.3", ] [[package]] @@ -1305,6 +1446,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "io-lifetimes" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "iovec" version = "0.1.4" @@ -1316,45 +1467,42 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.1" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" +checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - -[[package]] -name = "itoa" -version = "1.0.1" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" -version = "0.3.56" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +dependencies = [ + "cpufeatures", +] [[package]] name = "kernel32-sys" @@ -1383,15 +1531,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.117" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libflate" -version = "1.1.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d57e534717ac3e0b8dc459fe338bdfb4e29d7eea8fd0926ba649ddd3f4765f" +checksum = "97822bf791bd4d5b403713886a5fbe8bf49520fe78e323b0dc480ca1a03e50b0" dependencies = [ "adler32", "crc32fast", @@ -1400,9 +1548,9 @@ dependencies = [ [[package]] name = "libflate_lz77" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a734c0493409afcd49deee13c006a04e3586b9761a03543c6272c9c51f2f5a" +checksum = "a52d3a8bfc85f250440e4424db7d857e241a3aebbbe301f3eb606ab15c39acbf" dependencies = [ "rle-decode-fast", ] @@ -1419,26 +1567,35 @@ dependencies = [ ] [[package]] -name = "log" -version = "0.4.14" +name = "link-cplusplus" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" dependencies = [ - "cfg-if 1.0.0", - "value-bag", + "cc", ] [[package]] -name = "matches" -version = "0.1.9" +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + +[[package]] +name = "log" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if 1.0.0", + "value-bag", +] [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" @@ -1449,6 +1606,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.16" @@ -1457,9 +1623,9 @@ checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] name = "mime_guess" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", @@ -1467,12 +1633,11 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", - "autocfg", ] [[package]] @@ -1488,7 +1653,7 @@ dependencies = [ "kernel32-sys", "libc", "log", - "miow 0.2.2", + "miow", "net2", "slab", "winapi 0.2.8", @@ -1496,15 +1661,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.14" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", - "miow 0.3.7", - "ntapi", - "winapi 0.3.9", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.45.0", ] [[package]] @@ -1519,15 +1683,6 @@ dependencies = [ "ws2_32-sys", ] -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi 0.3.9", -] - [[package]] name = "multipart" version = "0.18.0" @@ -1540,7 +1695,7 @@ dependencies = [ "mime", "mime_guess", "quick-error", - "rand 0.8.4", + "rand 0.8.5", "safemem", "tempfile", "twoway", @@ -1548,9 +1703,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.37" +version = "0.2.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "74d0df99cfcd2530b2e694f6e17e7f37b8e26bb23983ac530c0c97408837c631" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1559,31 +1714,22 @@ dependencies = [ [[package]] name = "nix" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" dependencies = [ "bitflags", "cc", "cfg-if 1.0.0", "libc", - "memoffset", -] - -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", + "memoffset 0.6.5", ] [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -1591,37 +1737,46 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +dependencies = [ + "hermit-abi 0.2.6", + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ - "hermit-abi", "libc", ] [[package]] name = "object" -version = "0.27.1" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.9.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "oorandom" @@ -1649,16 +1804,17 @@ checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.1.3" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" dependencies = [ + "thiserror", "ucd-trie", ] @@ -1670,29 +1826,29 @@ checksum = "28b9b4df73455c861d7cbf8be42f01d3b373ed7f02e378d55fa84eafc6f638b1" [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -1702,15 +1858,15 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "plotters" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" +checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" dependencies = [ "num-traits", "plotters-backend", @@ -1721,30 +1877,33 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" +checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" [[package]] name = "plotters-svg" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" +checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" dependencies = [ "plotters-backend", ] [[package]] name = "polling" -version = "2.2.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" +checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" dependencies = [ + "autocfg", + "bitflags", "cfg-if 1.0.0", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "winapi 0.3.9", + "pin-project-lite", + "windows-sys 0.45.0", ] [[package]] @@ -1760,9 +1919,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-error" @@ -1773,7 +1932,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -1790,17 +1949,17 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.19" +version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1819,15 +1978,15 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.27.1" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "psm" -version = "0.1.16" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd136ff4382c4753fc061cb9e4712ab2af263376b95bbd5bd8cd50c020b78e69" +checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" dependencies = [ "cc", ] @@ -1840,9 +1999,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.15" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -1857,19 +2016,18 @@ dependencies = [ "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc 0.2.0", + "rand_hc", ] [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", + "rand_core 0.6.4", ] [[package]] @@ -1889,7 +2047,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -1903,11 +2061,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.8", ] [[package]] @@ -1919,102 +2077,72 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", -] - [[package]] name = "rayon" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ - "autocfg", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "lazy_static", "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.8", "redox_syscall", + "thiserror", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" - [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" - -[[package]] -name = "relay-server" -version = "0.0.1" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "reqwest" -version = "0.11.9" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" +checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" dependencies = [ - "base64 0.13.0", + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -2026,18 +2154,19 @@ dependencies = [ "hyper-rustls", "ipnet", "js-sys", - "lazy_static", "log", "mime", + "once_cell", "percent-encoding", "pin-project-lite", "rustls", - "rustls-pemfile", + "rustls-pemfile 1.0.2", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", + "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -2063,11 +2192,11 @@ dependencies = [ [[package]] name = "ripemd" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1facec54cb5e0dc08553501fa740091086d0259ad0067e0d4103448e4cb22ed3" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest 0.10.3", + "digest 0.10.6", ] [[package]] @@ -2086,7 +2215,33 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "rstest" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de1bb486a691878cd320c2f0d319ba91eeaa2e894066d8b5f8f117c000e9d962" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.0", +] + +[[package]] +name = "rstest_macros" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290ca1a1c8ca7edb7c3283bd44dc35dd54fdec6253a3912e201ba1072018fca8" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "rustc_version 0.4.0", + "syn 1.0.109", + "unicode-ident", ] [[package]] @@ -2097,7 +2252,19 @@ checksum = "32c6cfaae58c048728261723a72b80a0aa9f3768e9a7da3b302a24d262525219" dependencies = [ "quote", "rustc_version 0.3.3", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "rstest_reuse" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45f80dcc84beab3a327bbe161f77db25f336a1452428176787c8c79ac79d7073" +dependencies = [ + "quote", + "rand 0.8.5", + "rustc_version 0.4.0", + "syn 1.0.109", ] [[package]] @@ -2147,14 +2314,28 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.5", + "semver 1.0.17", +] + +[[package]] +name = "rustix" +version = "0.36.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", ] [[package]] name = "rustls" -version = "0.20.2" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -2168,20 +2349,29 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +dependencies = [ + "base64 0.21.0", ] [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safemem" @@ -2200,9 +2390,9 @@ dependencies = [ [[package]] name = "scoped-tls" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" @@ -2210,6 +2400,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" + [[package]] name = "sct" version = "0.7.0" @@ -2222,33 +2418,14 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7883017d5b21f011ef8040ea9c6c7ac90834c0df26a69e4c0b06276151f125" -dependencies = [ - "secp256k1-sys 0.4.2", - "serde", -] - -[[package]] -name = "secp256k1" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9512ffd81e3a3503ed401f79c33168b9148c75038956039166cd750eaa037c3" +checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" dependencies = [ - "secp256k1-sys 0.6.1", + "secp256k1-sys", "serde", ] -[[package]] -name = "secp256k1-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" -dependencies = [ - "cc", -] - [[package]] name = "secp256k1-sys" version = "0.6.1" @@ -2278,9 +2455,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.5" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0486718e92ec9a68fbed73bb5ef687d71103b142595b406835649bebd33f72c7" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "semver-parser" @@ -2299,9 +2476,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.136" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +checksum = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4" dependencies = [ "serde_derive", ] @@ -2318,22 +2495,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.136" +version = "1.0.156" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +checksum = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "serde_json" -version = "1.0.78" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -2351,9 +2528,9 @@ dependencies = [ [[package]] name = "serde_stacker" -version = "0.1.4" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c92391a63e3b83f77334d8beaaf11bac4c900f3769483e543bf76a81bf8ee2" +checksum = "2f5557f4c1103cecd0e639a17ab22d670b89912d8a506589ee627bf738a15a5d" dependencies = [ "serde", "stacker", @@ -2366,22 +2543,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa", "ryu", "serde", ] [[package]] name = "sha-1" -version = "0.9.8" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ - "block-buffer 0.9.0", "cfg-if 1.0.0", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest 0.10.6", ] [[package]] @@ -2393,6 +2568,17 @@ dependencies = [ "sha1_smol", ] +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.6", +] + [[package]] name = "sha1_smol" version = "1.0.0" @@ -2426,13 +2612,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.3", + "digest 0.10.6", "sha2-asm 0.6.2", ] @@ -2456,11 +2642,11 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.3", + "digest 0.10.6", "keccak", ] @@ -2475,15 +2661,18 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a86232ab60fa71287d7f2ddae4a7073f6b7aac33631c3015abb556f08c6d0a3e" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" [[package]] name = "slab" -version = "0.4.5" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +dependencies = [ + "autocfg", +] [[package]] name = "slog" @@ -2493,40 +2682,40 @@ checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" [[package]] name = "slog-json" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f7f7a952ce80fca9da17bf0a53895d11f8aa1ba063668ca53fc72e7869329e9" +checksum = "3e1e53f61af1e3c8b852eef0a9dee29008f55d6dd63794f3f12cef786cf0f219" dependencies = [ - "chrono", "serde", "serde_json", "slog", + "time 0.3.20", ] [[package]] name = "slog-term" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95c1e7e5aab61ced6006149ea772770b84a0d16ce0f7885def313e4829946d76" +checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c" dependencies = [ "atty", - "chrono", "slog", "term", "thread_local", + "time 0.3.20", ] [[package]] name = "smallvec" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi 0.3.9", @@ -2540,9 +2729,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "stacker" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90939d5171a4420b3ff5fbc8954d641e7377335454c259dcb80786f3f21dc9b4" +checksum = "c886bd4480155fd3ef527d45e9ac8dd7118a898a46530b7b94c3e21866259fce" dependencies = [ "cc", "cfg-if 1.0.0", @@ -2564,15 +2753,15 @@ dependencies = [ "percent-encoding", "rand 0.7.3", "ripemd", - "rstest", - "rstest_reuse", + "rstest 0.11.0", + "rstest_reuse 0.1.3", "rusqlite", - "secp256k1 0.21.2", + "secp256k1", "serde", "serde_derive", "serde_json", "serde_stacker", - "sha2 0.10.2", + "sha2 0.10.6", "sha3", "slog", "slog-json", @@ -2650,7 +2839,7 @@ dependencies = [ "quote", "serde", "serde_derive", - "syn", + "syn 1.0.109", ] [[package]] @@ -2665,8 +2854,8 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "sha1", - "syn", + "sha1 0.6.1", + "syn 1.0.109", ] [[package]] @@ -2680,7 +2869,7 @@ name = "stx-genesis" version = "0.1.0" dependencies = [ "libflate", - "sha2 0.10.2", + "sha2 0.10.6", ] [[package]] @@ -2691,27 +2880,37 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.86" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" +checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] name = "tempfile" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" dependencies = [ "cfg-if 1.0.0", "fastrand", - "libc", "redox_syscall", - "remove_dir_all", - "winapi 0.3.9", + "rustix", + "windows-sys 0.42.0", ] [[package]] @@ -2725,6 +2924,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "termcolor" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +dependencies = [ + "winapi-util", +] + [[package]] name = "textwrap" version = "0.11.0" @@ -2736,38 +2944,39 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", @@ -2784,11 +2993,31 @@ dependencies = [ "libc", "standback", "stdweb", - "time-macros", + "time-macros 0.1.1", "version_check", "winapi 0.3.9", ] +[[package]] +name = "time" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +dependencies = [ + "itoa", + "libc", + "num_threads", + "serde", + "time-core", + "time-macros 0.2.8", +] + +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + [[package]] name = "time-macros" version = "0.1.1" @@ -2799,6 +3028,15 @@ dependencies = [ "time-macros-impl", ] +[[package]] +name = "time-macros" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +dependencies = [ + "time-core", +] + [[package]] name = "time-macros-impl" version = "0.1.2" @@ -2809,7 +3047,7 @@ dependencies = [ "proc-macro2", "quote", "standback", - "syn", + "syn 1.0.109", ] [[package]] @@ -2824,32 +3062,34 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.16.1" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ + "autocfg", "bytes", "libc", "memchr", - "mio 0.7.14", + "mio 0.8.6", "num_cpus", "pin-project-lite", - "winapi 0.3.9", + "socket2", + "windows-sys 0.45.0", ] [[package]] @@ -2865,9 +3105,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite", @@ -2876,51 +3116,50 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.15.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "pin-project", "tokio", "tungstenite", ] [[package]] name = "tokio-util" -version = "0.6.9" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", "futures-sink", - "log", "pin-project-lite", "tokio", + "tracing", ] [[package]] name = "toml" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.30" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d8d93354fe2a8e50d5953f5ae2e47a3fc2ef03292e7ea46e3cc38f549525fb9" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if 1.0.0", "log", @@ -2930,32 +3169,32 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.22" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" -version = "0.14.0" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", "bytes", "http", "httparse", "log", - "rand 0.8.4", + "rand 0.8.5", "sha-1", "thiserror", "url", @@ -2973,15 +3212,15 @@ dependencies = [ [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "unicase" @@ -2994,30 +3233,30 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" + +[[package]] +name = "unicode-ident" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" - -[[package]] -name = "unicode-xid" -version = "0.2.2" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "universal-hash" @@ -3025,7 +3264,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ - "generic-array 0.14.5", + "generic-array 0.14.6", "subtle", ] @@ -3037,13 +3276,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", "serde", ] @@ -3056,9 +3294,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "value-bag" -version = "1.0.0-alpha.8" +version = "1.0.0-alpha.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79923f7731dc61ebfba3633098bf3ac533bbd35ccd8c57e7088d9a5eebe0263f" +checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" dependencies = [ "ctor", "version_check", @@ -3105,9 +3343,9 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e" +checksum = "ed7b8be92646fc3d18b06147664ebc5f48d222686cb11a8755e561a735aacc6d" dependencies = [ "bytes", "futures-channel", @@ -3121,6 +3359,7 @@ dependencies = [ "multipart", "percent-encoding", "pin-project", + "rustls-pemfile 0.2.1", "scoped-tls", "serde", "serde_json", @@ -3145,11 +3384,17 @@ version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -3157,24 +3402,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.29" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3184,9 +3429,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3194,28 +3439,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.79" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "web-sys" -version = "0.3.56" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", @@ -3233,22 +3478,13 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "winapi" version = "0.2.8" @@ -3292,11 +3528,92 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "winreg" -version = "0.7.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ "winapi 0.3.9", ] @@ -3313,6 +3630,6 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.2" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c88870063c39ee00ec285a2f8d6a966e5b6fb2becc4e8dac77ed0d370ed6006" \ No newline at end of file +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" diff --git a/Cargo.toml b/Cargo.toml index 7ee4dfeee4..0b7ce13203 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,26 +33,6 @@ path = "src/clarity_cli_main.rs" name = "blockstack-cli" path = "src/blockstack_cli.rs" -[[bin]] -name = "relay-server" -path = "contrib/tools/relay-server/src/main.rs" - -[[bench]] -name = "marf_bench" -harness = false - -[[bench]] -name = "large_contract_bench" -harness = false - -[[bench]] -name = "block_limits" -harness = false - -[[bench]] -name = "c32_bench" -harness = false - [dependencies] rand = "0.7.3" rand_chacha = "=0.2.2" @@ -91,7 +71,7 @@ version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] [dependencies.secp256k1] -version = "0.24.2" +version = "0.24.3" features = ["serde", "recovery"] [dependencies.rusqlite] @@ -111,14 +91,14 @@ version = "0.2.23" features = ["std"] [dev-dependencies] -rstest = "0.11.0" -rstest_reuse = "0.1.3" assert-json-diff = "1.0.0" criterion = "0.3.5" stdext = "0.3.1" stx_genesis = { package = "stx-genesis", path = "./stx-genesis/."} clarity = { package = "clarity", features = ["default", "testing"], path = "./clarity/." } stacks_common = { package = "stacks-common", features = ["default", "testing"], path = "./stacks-common/." } +rstest = "0.17.0" +rstest_reuse = "0.5.0" [features] default = ["developer-mode"] @@ -129,8 +109,16 @@ monitoring_prom = ["prometheus"] slog_json = ["slog-json", "stacks_common/slog_json", "clarity/slog_json"] testing = [] -[profile.dev.package.regex] -opt-level = 2 +# Use a bit more than default optimization for +# dev builds to speed up test execution +[profile.dev] +opt-level = 1 + +# Use release-level optimization for dependencies +# This slows down "first" builds on development environments, +# but won't impact subsequent builds. +[profile.dev.package."*"] +opt-level = 3 [target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(target_env = "msvc")))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } @@ -143,5 +131,4 @@ members = [ ".", "clarity", "stx-genesis", - "testnet/stacks-node", - "contrib/tools/relay-server"] + "testnet/stacks-node"] diff --git a/Dockerfile.stretch b/Dockerfile.debian similarity index 87% rename from Dockerfile.stretch rename to Dockerfile.debian index 7f5148dfec..4b9a56b8c5 100644 --- a/Dockerfile.stretch +++ b/Dockerfile.debian @@ -1,4 +1,4 @@ -FROM rust:stretch as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' @@ -14,7 +14,7 @@ RUN cd testnet/stacks-node && cargo build --features monitoring_prom,slog_json - RUN cp target/release/stacks-node /out -FROM debian:stretch-slim +FROM debian:bullseye-slim RUN apt update && apt install -y netcat COPY --from=build /out/ /bin/ diff --git a/README.md b/README.md index 9e70210e94..06fca3166c 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,8 @@ $ cargo nextest run You can observe the state machine in action locally by running: ```bash -$ cargo stacks-node start --config=./testnet/stacks-node/conf/testnet-follower-conf.toml +$ cd testnet/stacks-node +$ cargo run --bin stacks-node -- start --config=./conf/testnet-follower-conf.toml ``` _On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you are have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ diff --git a/benches/block_limits.rs b/benches/block_limits.rs deleted file mode 100644 index 023be05f18..0000000000 --- a/benches/block_limits.rs +++ /dev/null @@ -1,677 +0,0 @@ -extern crate blockstack_lib; -extern crate rand; -extern crate serde_json; - -use std::fs; -use std::path::PathBuf; -use std::process; -use std::{env, time::Instant}; - -use blockstack_lib::clarity_vm::clarity::ClarityInstance; -use blockstack_lib::clarity_vm::database::marf::MarfedKV; -use blockstack_lib::clarity_vm::database::MemoryBackingStore; -use blockstack_lib::core::StacksEpochId; -use blockstack_lib::types::chainstate::BlockHeaderHash; -use blockstack_lib::types::chainstate::BurnchainHeaderHash; -use blockstack_lib::types::chainstate::VRFSeed; -use blockstack_lib::types::chainstate::{StacksAddress, StacksBlockId}; -use blockstack_lib::types::proof::ClarityMarfTrieId; -use blockstack_lib::util::boot::boot_code_id; -use blockstack_lib::vm::ast::build_ast; -use blockstack_lib::vm::contexts::GlobalContext; -use blockstack_lib::vm::costs::LimitedCostTracker; -use blockstack_lib::vm::errors::InterpreterResult; -use blockstack_lib::vm::{eval_all, ContractContext}; -use blockstack_lib::{ - vm::costs::ExecutionCost, - vm::{ - database::{HeadersDB, NULL_BURN_STATE_DB}, - types::{PrincipalData, QualifiedContractIdentifier, StandardPrincipalData}, - Value, - }, -}; -use rand::Rng; - -struct TestHeadersDB; - -impl HeadersDB for TestHeadersDB { - fn get_stacks_block_header_hash_for_block( - &self, - id_bhh: &StacksBlockId, - ) -> Option { - Some(BlockHeaderHash(id_bhh.0.clone())) - } - - fn get_burn_header_hash_for_block( - &self, - id_bhh: &StacksBlockId, - ) -> Option { - Some(BurnchainHeaderHash(id_bhh.0.clone())) - } - - fn get_vrf_seed_for_block(&self, _id_bhh: &StacksBlockId) -> Option { - Some(VRFSeed([0; 32])) - } - - fn get_burn_block_time_for_block(&self, _id_bhh: &StacksBlockId) -> Option { - Some(1) - } - - fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { - if id_bhh == &StacksBlockId::sentinel() { - Some(0) - } else { - let mut bytes = [0; 4]; - bytes.copy_from_slice(&id_bhh.0[0..4]); - let height = u32::from_le_bytes(bytes); - Some(height) - } - } - - fn get_miner_address(&self, _id_bhh: &StacksBlockId) -> Option { - None - } -} - -fn as_hash160(inp: u32) -> [u8; 20] { - let mut out = [0; 20]; - out[0..4].copy_from_slice(&inp.to_le_bytes()); - out -} - -fn as_hash(inp: u32) -> [u8; 32] { - let mut out = [0; 32]; - out[0..4].copy_from_slice(&inp.to_le_bytes()); - out -} - -fn transfer_test(buildup_count: u32, scaling: u32, genesis_size: u32) -> ExecutionCost { - let start = Instant::now(); - - let marf = setup_chain_state(genesis_size); - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let principals: Vec = (0..(buildup_count - 1)) - .into_iter() - .map(|i| StandardPrincipalData(0, as_hash160(i)).into()) - .collect(); - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&principals[ix - 1]); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - Ok(()) - }) - .unwrap() - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // transfer phase - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - let mut rng = rand::thread_rng(); - for _i in 0..scaling { - let from = rng.gen_range(0, principals.len()); - let to = (from + rng.gen_range(1, principals.len())) % principals.len(); - - conn.as_transaction(|tx| { - tx.run_stx_transfer(&principals[from], &principals[to], 10, &BuffData::empty()) - .unwrap() - }); - } - - let this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "{} transfers in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - this_cost -} - -fn setup_chain_state(scaling: u32) -> MarfedKV { - let pre_initialized_path = format!("/tmp/block_limit_bench_{}.marf", scaling); - let out_path = "/tmp/block_limit_bench_last.marf"; - - if fs::metadata(&pre_initialized_path).is_err() { - let marf = MarfedKV::open(&pre_initialized_path, None).unwrap(); - let mut clarity_instance = ClarityInstance::new(false, marf); - let mut conn = clarity_instance.begin_test_genesis_block( - &StacksBlockId::sentinel(), - &StacksBlockId(as_hash(0)), - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - conn.as_transaction(|tx| { - for j in 0..scaling { - tx.with_clarity_db(|db| { - let addr = StandardPrincipalData(0, as_hash160(j + 1)).into(); - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&addr); - stx_account_0.credit(1); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1).unwrap(); - Ok(()) - }) - .unwrap(); - } - }); - - conn.commit_to_block(&StacksBlockId(as_hash(0))); - }; - - if fs::metadata(&out_path).is_err() { - let path = PathBuf::from(out_path); - fs::create_dir_all(&path).expect("Error creating directory"); - } - - fs::copy( - &format!("{}/marf.sqlite", pre_initialized_path), - &format!("{}/marf.sqlite", out_path), - ) - .unwrap(); - return MarfedKV::open(out_path, None).unwrap(); -} - -fn test_via_raw_contract( - eval: &str, - scaling: u32, - buildup_count: u32, - genesis_size: u32, -) -> ExecutionCost { - let start = Instant::now(); - - let marf = setup_chain_state(genesis_size); - - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let stacker: PrincipalData = StandardPrincipalData(0, as_hash160(0)).into(); - - let contract_id = - QualifiedContractIdentifier::new(StandardPrincipalData(0, as_hash160(0)), "test".into()); - - let mut smart_contract = "".to_string(); - for _i in 0..scaling { - smart_contract.push_str(&format!("{}\n", eval)); - } - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&stacker); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1_000_000).unwrap(); - Ok(()) - }) - .unwrap(); - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // execute the block - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - let exec_cost = conn.as_transaction(|tx| { - let analysis_cost = tx.cost_so_far(); - let (contract_ast, contract_analysis) = tx - .analyze_smart_contract(&contract_id, &smart_contract) - .unwrap(); - tx.initialize_smart_contract( - &contract_id, - &contract_ast, - &smart_contract, - None, - |_, _| false, - ) - .unwrap(); - - let mut initialize_cost = tx.cost_so_far(); - initialize_cost.sub(&analysis_cost).unwrap(); - - tx.save_analysis(&contract_id, &contract_analysis) - .expect("FATAL: failed to store contract analysis"); - - initialize_cost - }); - - let _this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "Completed raw execution scaled at {} in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - exec_cost -} - -fn smart_contract_test(scaling: u32, buildup_count: u32, genesis_size: u32) -> ExecutionCost { - let start = Instant::now(); - - let marf = setup_chain_state(genesis_size); - - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let stacker: PrincipalData = StandardPrincipalData(0, as_hash160(0)).into(); - - let contract_id = - QualifiedContractIdentifier::new(StandardPrincipalData(0, as_hash160(0)), "test".into()); - - let mut smart_contract = "".to_string(); - for i in 0..scaling { - smart_contract.push_str(&format!("(define-public (foo-{}) (ok (+ u2 u3)))\n", i)); - } - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&stacker); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1_000_000).unwrap(); - Ok(()) - }) - .unwrap(); - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // execute the block - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - conn.as_transaction(|tx| { - let (contract_ast, contract_analysis) = tx - .analyze_smart_contract(&contract_id, &smart_contract) - .unwrap(); - tx.initialize_smart_contract( - &contract_id, - &contract_ast, - &smart_contract, - None, - |_, _| false, - ) - .unwrap(); - - tx.save_analysis(&contract_id, &contract_analysis) - .expect("FATAL: failed to store contract analysis"); - }); - - let this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "Completed smart-contract scaled at {} in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - this_cost -} - -fn expensive_contract_test(scaling: u32, buildup_count: u32, genesis_size: u32) -> ExecutionCost { - let start = Instant::now(); - - let marf = setup_chain_state(genesis_size); - - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let stacker: PrincipalData = StandardPrincipalData(0, as_hash160(0)).into(); - - let contract_id = - QualifiedContractIdentifier::new(StandardPrincipalData(0, as_hash160(0)), "test".into()); - - let smart_contract = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..scaling) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - contract_id.clone(), - contract_id.clone() - )) - .collect::>() - .join(" ") - ); - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(&stacker); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1_000_000).unwrap(); - Ok(()) - }) - .unwrap(); - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // execute the block - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - conn.as_transaction(|tx| { - let (contract_ast, contract_analysis) = tx - .analyze_smart_contract(&contract_id, &smart_contract) - .unwrap(); - tx.initialize_smart_contract(&contract_id, &contract_ast, &smart_contract, |_, _| false) - .unwrap(); - - tx.save_analysis(&contract_id, &contract_analysis) - .expect("FATAL: failed to store contract analysis"); - }); - - let this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "Completed smart-contract scaled at {} in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - this_cost -} - -pub fn execute_in_epoch(program: &str, epoch: StacksEpochId) -> InterpreterResult> { - let contract_id = QualifiedContractIdentifier::transient(); - let mut contract_context = ContractContext::new(contract_id.clone()); - let mut marf = MemoryBackingStore::new(); - let conn = marf.as_clarity_db(); - let mut global_context = GlobalContext::new(false, conn, LimitedCostTracker::new_free(), epoch); - global_context.execute(|g| { - let parsed = build_ast(&contract_id, program, &mut ())?.expressions; - eval_all(&parsed, &mut contract_context, g) - }) -} - -fn execute(program: &str) -> InterpreterResult> { - let epoch_200_result = execute_in_epoch(program, StacksEpochId::Epoch20); - let epoch_205_result = execute_in_epoch(program, StacksEpochId::Epoch2_05); - assert_eq!( - epoch_200_result, epoch_205_result, - "Epoch 2.0 and 2.05 should have same execution result, but did not for program `{}`", - program - ); - epoch_205_result -} - -fn stack_stx_test(buildup_count: u32, genesis_size: u32, scaling: u32) -> ExecutionCost { - let start = Instant::now(); - let marf = setup_chain_state(genesis_size); - - let mut clarity_instance = ClarityInstance::new(false, marf); - let blocks: Vec<_> = (0..(buildup_count + 1)) - .into_iter() - .map(|i| StacksBlockId(as_hash(i))) - .collect(); - - let stackers: Vec = (0..scaling) - .into_iter() - .map(|i| StandardPrincipalData(0, as_hash160(i)).into()) - .collect(); - - let stacker_balance = (buildup_count as u128 - 1) * 1_000_000; - - let pox_addrs: Vec = (0..50u64) - .map(|ix| { - execute(&format!( - "{{ version: 0x00, hashbytes: 0x000000000000000000000000{} }}", - &blockstack_lib::util::hash::to_hex(&ix.to_le_bytes()) - )) - .unwrap() - .unwrap() - }) - .collect(); - - let last_mint_block = blocks.len() - 2; - let last_block = blocks.len() - 1; - - for ix in 1..(last_mint_block + 1) { - let parent_block = &blocks[ix - 1]; - let current_block = &blocks[ix]; - - let mut conn = clarity_instance.begin_block( - parent_block, - current_block, - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - // minting phase - conn.as_transaction(|tx| { - tx.with_clarity_db(|db| { - for stacker in stackers.iter() { - let mut stx_account_0 = db.get_stx_balance_snapshot_genesis(stacker); - stx_account_0.credit(1_000_000); - stx_account_0.save(); - db.increment_ustx_liquid_supply(1_000_000).unwrap(); - } - Ok(()) - }) - .unwrap(); - }); - - conn.commit_to_block(current_block); - } - - eprintln!("Finished buildup in {}ms", start.elapsed().as_millis()); - - // do the stack-stx block - let mut conn = clarity_instance.begin_block( - &blocks[last_mint_block], - &blocks[last_block], - &TestHeadersDB, - &NULL_BURN_STATE_DB, - ); - - let begin = Instant::now(); - - conn.as_transaction(|tx| { - for stacker in stackers.iter() { - let result = tx - .run_contract_call( - stacker, - None, - &boot_code_id("pox", false), - "stack-stx", - &[ - Value::UInt(stacker_balance), - pox_addrs[0].clone(), - Value::UInt(buildup_count as u128 + 2), - Value::UInt(12), - ], - |_, _| false, - ) - .unwrap() - .0; - if let Err(v) = result.expect_result() { - panic!("Stacking failed: {}", v); - } - } - }); - - let this_cost = conn.commit_to_block(&blocks[last_block]).get_total(); - let elapsed = begin.elapsed(); - - println!( - "Completed {} stack-stx ops in {} ms, after {} block buildup with a {} account genesis", - scaling, - elapsed.as_millis(), - buildup_count, - genesis_size, - ); - - this_cost -} - -fn main() { - let argv: Vec<_> = env::args().collect(); - - if argv.len() < 3 { - eprintln!( - "Usage: {} [test-name] [scalar-0] ... [scalar-n] - -transfer -smart-contract -stack-stx -clarity-transfer -clarity-verify -clarity-raw -", - argv[0] - ); - process::exit(1); - } - - let block_build_up = argv[2].parse().expect("Invalid scalar"); - let genesis_size = argv[3].parse().expect("Invalid scalar"); - let scaling = argv[4].parse().expect("Invalid scalar"); - - let result = match argv[1].as_str() { - "transfer" => transfer_test(block_build_up, scaling, genesis_size), - "smart-contract" => smart_contract_test(scaling, block_build_up, genesis_size), - "clarity-transfer" => test_via_raw_contract(r#"(stx-transfer? u1 tx-sender 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR)"#, - scaling, block_build_up, genesis_size), - "expensive-contract" => expensive_contract_test(scaling, block_build_up, genesis_size), - "clarity-verify" => test_via_raw_contract("(secp256k1-verify 0xde5b9eb9e7c5592930eb2e30a01369c36586d872082ed8181ee83d2a0ec20f04 - 0x8738487ebe69b93d8e51583be8eee50bb4213fc49c767d329632730cc193b873554428fc936ca3569afc15f1c9365f6591d6251a89fee9c9ac661116824d3a1301 - 0x03adb8de4bfb65db2cfd6120d55c6526ae9c52e675db7e47308636534ba7786110)", - scaling, block_build_up, genesis_size), - "stack-stx" => stack_stx_test(block_build_up, genesis_size, scaling), - _ => { - eprintln!("bad test name"); - process::exit(1); - } - }; - - println!("{}", serde_json::to_string(&result).unwrap()); -} diff --git a/benches/c32_bench.rs b/benches/c32_bench.rs deleted file mode 100644 index 3fc4a93381..0000000000 --- a/benches/c32_bench.rs +++ /dev/null @@ -1,35 +0,0 @@ -extern crate blockstack_lib; -extern crate criterion; -extern crate rand; - -use blockstack_lib::address::c32::{c32_address, c32_address_decode}; -use blockstack_lib::address::c32_old::c32_address_decode as c32_address_decode_old; -use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; -use rand::Rng; - -fn bench_c32_decoding(c: &mut Criterion) { - let mut group = c.benchmark_group("C32 Decoding"); - - let mut addrs: Vec = vec![]; - for _ in 0..5 { - // random version - let random_version: u8 = rand::thread_rng().gen_range(0, 31); - // random 20 bytes - let random_bytes = rand::thread_rng().gen::<[u8; 20]>(); - let addr = c32_address(random_version, &random_bytes).unwrap(); - addrs.push(addr); - } - - for addr in addrs.iter() { - group.bench_with_input(BenchmarkId::new("Legacy", addr), addr, |b, i| { - b.iter(|| c32_address_decode_old(i)) - }); - group.bench_with_input(BenchmarkId::new("Updated", addr), addr, |b, i| { - b.iter(|| c32_address_decode(i)) - }); - } - group.finish(); -} - -criterion_group!(benches, bench_c32_decoding); -criterion_main!(benches); diff --git a/benches/large_contract_bench.rs b/benches/large_contract_bench.rs deleted file mode 100644 index 0e57793f2e..0000000000 --- a/benches/large_contract_bench.rs +++ /dev/null @@ -1,158 +0,0 @@ -#[macro_use] -extern crate criterion; -extern crate blockstack_lib; -extern crate rand; - -use blockstack_lib::clarity_vm::clarity::ClarityInstance; -use blockstack_lib::clarity_vm::database::marf::MarfedKV; -use blockstack_lib::types::chainstate::StacksBlockId; -use blockstack_lib::types::proof::ClarityMarfTrieId; -use blockstack_lib::vm::database::NULL_BURN_STATE_DB; -use blockstack_lib::{vm::database::NULL_HEADER_DB, vm::types::QualifiedContractIdentifier}; -use criterion::Criterion; - -pub fn rollback_log_memory_test() { - let marf = MarfedKV::temporary(); - let mut clarity_instance = ClarityInstance::new(false, marf); - let EXPLODE_N = 100; - - let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); - - { - let mut conn = clarity_instance.begin_block( - &StacksBlockId::sentinel(), - &StacksBlockId::from_bytes(&[0 as u8; 32]).unwrap(), - &NULL_HEADER_DB, - &NULL_BURN_STATE_DB, - ); - - let define_data_var = "(define-data-var XZ (buff 1048576) \"a\")"; - - let mut contract = define_data_var.to_string(); - for i in 0..20 { - let cur_size = format!("{}", 2u32.pow(i)); - contract.push_str("\n"); - contract.push_str(&format!( - "(var-set XZ (concat (unwrap-panic (as-max-len? (var-get XZ) u{})) - (unwrap-panic (as-max-len? (var-get XZ) u{}))))", - cur_size, cur_size - )); - } - for i in 0..EXPLODE_N { - let exploder = format!("(define-data-var var-{} (buff 1048576) (var-get XZ))", i); - contract.push_str("\n"); - contract.push_str(&exploder); - } - - conn.as_transaction(|conn| { - let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract(&contract_identifier, &contract) - .unwrap(); - - assert!(format!( - "{:?}", - conn.initialize_smart_contract(&contract_identifier, &ct_ast, &contract, |_, _| { - false - }) - .unwrap_err() - ) - .contains("MemoryBalanceExceeded")); - }); - } -} - -pub fn ccall_memory_test() { - let marf = MarfedKV::temporary(); - let mut clarity_instance = ClarityInstance::new(false, marf); - let COUNT_PER_CONTRACT = 20; - let CONTRACTS = 5; - - { - let mut conn = clarity_instance.begin_block( - &StacksBlockId::sentinel(), - &StacksBlockId::from_bytes(&[0 as u8; 32]).unwrap(), - &NULL_HEADER_DB, - &NULL_BURN_STATE_DB, - ); - - let define_data_var = "(define-constant buff-0 \"a\")\n"; - - let mut contract = define_data_var.to_string(); - for i in 0..20 { - contract.push_str(&format!( - "(define-constant buff-{} (concat buff-{} buff-{}))\n", - i + 1, - i, - i - )); - } - - for i in 0..COUNT_PER_CONTRACT { - contract.push_str(&format!("(define-constant var-{} buff-20)\n", i)); - } - - contract.push_str("(define-public (call)\n"); - - let mut contracts = vec![]; - - for i in 0..CONTRACTS { - let mut my_contract = contract.clone(); - if i == 0 { - my_contract.push_str("(ok 1))\n"); - } else { - my_contract.push_str(&format!("(contract-call? .contract-{} call))\n", i - 1)); - } - my_contract.push_str("(call)\n"); - contracts.push(my_contract); - } - - for (i, contract) in contracts.into_iter().enumerate() { - let contract_name = format!("contract-{}", i); - let contract_identifier = QualifiedContractIdentifier::local(&contract_name).unwrap(); - - if i < (CONTRACTS - 1) { - conn.as_transaction(|conn| { - let (ct_ast, ct_analysis) = conn - .analyze_smart_contract(&contract_identifier, &contract) - .unwrap(); - conn.initialize_smart_contract( - &contract_identifier, - &ct_ast, - &contract, - |_, _| false, - ) - .unwrap(); - conn.save_analysis(&contract_identifier, &ct_analysis) - .unwrap(); - }) - } else { - conn.as_transaction(|conn| { - let (ct_ast, _ct_analysis) = conn - .analyze_smart_contract(&contract_identifier, &contract) - .unwrap(); - assert!(format!( - "{:?}", - conn.initialize_smart_contract( - &contract_identifier, - &ct_ast, - &contract, - |_, _| false - ) - .unwrap_err() - ) - .contains("MemoryBalanceExceeded")); - }) - } - } - } -} - -pub fn basic_usage_benchmark(c: &mut Criterion) { - c.bench_function("rollback_log_memory_test", |b| { - b.iter(|| rollback_log_memory_test()) - }); - c.bench_function("ccall_memory_test", |b| b.iter(|| ccall_memory_test())); -} - -criterion_group!(benches, basic_usage_benchmark); -criterion_main!(benches); diff --git a/benches/marf_bench.rs b/benches/marf_bench.rs deleted file mode 100644 index 951ee07109..0000000000 --- a/benches/marf_bench.rs +++ /dev/null @@ -1,136 +0,0 @@ -#[macro_use] -extern crate criterion; -extern crate blockstack_lib; -extern crate rand; - -use std::fs; - -use blockstack_lib::chainstate::stacks::index::{marf::MARF, storage::TrieFileStorage}; -use blockstack_lib::chainstate::stacks::Error; -use blockstack_lib::types::chainstate::{MARFValue, StacksBlockId}; -use blockstack_lib::types::proof::ClarityMarfTrieId; -use criterion::Criterion; -use rand::prelude::*; - -pub fn begin( - marf: &mut MARF, - chain_tip: &StacksBlockId, - next_chain_tip: &StacksBlockId, -) -> Result<(), Error> { - let mut tx = marf.begin_tx()?; - tx.begin(chain_tip, next_chain_tip)?; - Ok(()) -} - -fn benchmark_marf_usage( - filename: &str, - blocks: u32, - writes_per_block: u32, - reads_per_block: u32, - batch: bool, -) { - if fs::metadata(filename).is_ok() { - fs::remove_file(filename).unwrap(); - }; - let f = TrieFileStorage::open(filename).unwrap(); - let mut block_header = StacksBlockId::from_bytes(&[0u8; 32]).unwrap(); - let mut marf = MARF::from_storage(f); - - begin(&mut marf, &StacksBlockId::sentinel(), &block_header).unwrap(); - - let mut rng = rand::thread_rng(); - - let mut values = vec![]; - - for i in 0..blocks { - if batch { - let mut batch_keys = Vec::new(); - let mut batch_vals = Vec::new(); - for k in 0..writes_per_block { - let key = format!("{}::{}", i, k); - let mut value = [0u8; 40]; - rng.fill_bytes(&mut value); - batch_keys.push(key.clone()); - batch_vals.push(MARFValue(value.clone())); - values.push((key, MARFValue(value))); - } - marf.insert_batch(&batch_keys, batch_vals).unwrap(); - } else { - for k in 0..writes_per_block { - let key = format!("{}::{}", i, k); - let mut value = [0u8; 40]; - rng.fill_bytes(&mut value); - marf.insert(&key, MARFValue(value.clone())).unwrap(); - values.push((key, MARFValue(value))); - } - } - - for _k in 0..reads_per_block { - let (key, value) = values.as_slice().choose(&mut rng).unwrap(); - assert_eq!( - marf.get_with_proof(&block_header, key).unwrap().unwrap().0, - *value - ); - } - - let mut next_block_header = (i + 1).to_le_bytes().to_vec(); - next_block_header.resize(32, 0); - let next_block_header = StacksBlockId::from_bytes(next_block_header.as_slice()).unwrap(); - - marf.commit().unwrap(); - begin(&mut marf, &block_header, &next_block_header).unwrap(); - block_header = next_block_header; - } - marf.commit().unwrap(); -} - -fn benchmark_marf_read(filename: &str, reads: u32, block: u32, writes_per_block: u32) { - let f = TrieFileStorage::open(filename).unwrap(); - let mut block_header = block.to_le_bytes().to_vec(); - block_header.resize(32, 0); - let block_header = StacksBlockId::from_bytes(block_header.as_slice()).unwrap(); - - let mut marf = MARF::from_storage(f); - - let mut rng = rand::thread_rng(); - - for _i in 0..reads { - let i: u32 = rng.gen_range(0, block); - let k: u32 = rng.gen_range(0, writes_per_block); - let key = format!("{}::{}", i, k); - marf.get_with_proof(&block_header, &key).unwrap().unwrap().0; - } -} - -pub fn basic_usage_benchmark(c: &mut Criterion) { - c.bench_function("marf_setup_1000b_5kW", |b| { - b.iter(|| benchmark_marf_usage("/tmp/db.1k.sqlite", 1000, 5000, 0, false)) - }); - c.bench_function("marf_setup_400b_5kW", |b| { - b.iter(|| benchmark_marf_usage("/tmp/db.400.sqlite", 1000, 5000, 0, false)) - }); - c.bench_function("marf_read_1000b_1kW", |b| { - b.iter(|| benchmark_marf_read("/tmp/db.1k.sqlite", 1000, 1000, 5000)) - }); - c.bench_function("marf_read_400b_1kW", |b| { - b.iter(|| benchmark_marf_read("/tmp/db.400.sqlite", 1000, 400, 5000)) - }); - - c.bench_function("marf_usage_1b_10kW_0kR", |b| { - b.iter(|| benchmark_marf_usage("/tmp/foo.bar.z.sqlite", 1, 10000, 0, false)) - }); - c.bench_function("marf_usage_10b_1kW_2kR", |b| { - b.iter(|| benchmark_marf_usage("/tmp/foo.bar.z.sqlite", 10, 1000, 2000, false)) - }); - c.bench_function("marf_usage_100b_5kW_20kR", |b| { - b.iter(|| benchmark_marf_usage("/tmp/foo.bar.z.sqlite", 20, 5000, 20000, false)) - }); - c.bench_function("marf_usage_batches_10b_1kW_2kR", |b| { - b.iter(|| benchmark_marf_usage("/tmp/foo.bar.z.sqlite", 10, 1000, 2000, true)) - }); -} - -pub fn scaling_read_ratio(_c: &mut Criterion) {} - -criterion_group!(benches, basic_usage_benchmark); -criterion_main!(benches); diff --git a/build-scripts/Dockerfile.linux-arm64 b/build-scripts/Dockerfile.linux-arm64 deleted file mode 100644 index 7acc30f6bf..0000000000 --- a/build-scripts/Dockerfile.linux-arm64 +++ /dev/null @@ -1,23 +0,0 @@ -FROM rust:stretch as build - -ARG STACKS_NODE_VERSION="No Version Info" -ARG GIT_BRANCH='No Branch Info' -ARG GIT_COMMIT='No Commit Info' - -WORKDIR /src - -COPY . . - -RUN rustup target add aarch64-unknown-linux-gnu - -RUN apt-get update && apt-get install -y git gcc-aarch64-linux-gnu - -RUN CC=aarch64-linux-gnu-gcc \ - CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc \ - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ - cargo build --release --workspace --target aarch64-unknown-linux-gnu - -RUN mkdir /out && cp -R /src/target/aarch64-unknown-linux-gnu/release/. /out - -FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.linux-armv7 b/build-scripts/Dockerfile.linux-armv7 deleted file mode 100644 index 9fb50d18bc..0000000000 --- a/build-scripts/Dockerfile.linux-armv7 +++ /dev/null @@ -1,23 +0,0 @@ -FROM rust:stretch as build - -ARG STACKS_NODE_VERSION="No Version Info" -ARG GIT_BRANCH='No Branch Info' -ARG GIT_COMMIT='No Commit Info' - -WORKDIR /src - -COPY . . - -RUN rustup target add armv7-unknown-linux-gnueabihf - -RUN apt-get update && apt-get install -y git gcc-arm-linux-gnueabihf - -RUN CC=arm-linux-gnueabihf-gcc \ - CC_armv7_unknown_linux_gnueabihf=arm-linux-gnueabihf-gcc \ - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ - cargo build --release --workspace --target armv7-unknown-linux-gnueabihf - -RUN mkdir /out && cp -R /src/target/armv7-unknown-linux-gnueabihf/release/. /out - -FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.linux-glibc-arm64 b/build-scripts/Dockerfile.linux-glibc-arm64 new file mode 100644 index 0000000000..7ce50b6a68 --- /dev/null +++ b/build-scripts/Dockerfile.linux-glibc-arm64 @@ -0,0 +1,26 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=aarch64-unknown-linux-gnu +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git gcc-aarch64-linux-gnu + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && CC=aarch64-linux-gnu-gcc \ + CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc \ + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-glibc-x64 b/build-scripts/Dockerfile.linux-glibc-x64 new file mode 100644 index 0000000000..2db13cb51e --- /dev/null +++ b/build-scripts/Dockerfile.linux-glibc-x64 @@ -0,0 +1,23 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=x86_64-unknown-linux-gnu +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-musl-arm64 b/build-scripts/Dockerfile.linux-musl-arm64 new file mode 100644 index 0000000000..135e6f9fc9 --- /dev/null +++ b/build-scripts/Dockerfile.linux-musl-arm64 @@ -0,0 +1,21 @@ +FROM messense/rust-musl-cross:aarch64-musl as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=aarch64-unknown-linux-musl +WORKDIR /src + +COPY . . + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-musl-x64 b/build-scripts/Dockerfile.linux-musl-x64 index 9c6c604341..73e64b4d67 100644 --- a/build-scripts/Dockerfile.linux-musl-x64 +++ b/build-scripts/Dockerfile.linux-musl-x64 @@ -1,23 +1,23 @@ -FROM rust:stretch as build +FROM rust:alpine as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' - +ARG BUILD_DIR=/build +ARG TARGET=x86_64-unknown-linux-musl WORKDIR /src COPY . . -RUN rustup target add x86_64-unknown-linux-musl - -RUN apt-get update && apt-get install -y git musl-tools - -RUN CC=musl-gcc \ - CC_x86_64_unknown_linux_musl=musl-gcc \ - CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER=musl-gcc \ - cargo build --release --workspace --target x86_64-unknown-linux-musl +RUN apk update && apk add git musl-dev -RUN mkdir /out && cp -R /src/target/x86_64-unknown-linux-musl/release/. /out +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-x64 b/build-scripts/Dockerfile.linux-x64 deleted file mode 100644 index b4abb08aed..0000000000 --- a/build-scripts/Dockerfile.linux-x64 +++ /dev/null @@ -1,20 +0,0 @@ -FROM rust:stretch as build - -ARG STACKS_NODE_VERSION="No Version Info" -ARG GIT_BRANCH='No Branch Info' -ARG GIT_COMMIT='No Commit Info' - -WORKDIR /src - -COPY . . - -RUN apt-get update && apt-get install -y git - -RUN rustup target add x86_64-unknown-linux-gnu - -RUN cargo build --release --workspace --target x86_64-unknown-linux-gnu - -RUN mkdir /out && cp -R /src/target/x86_64-unknown-linux-gnu/release/. /out - -FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file diff --git a/build-scripts/Dockerfile.macos-arm64 b/build-scripts/Dockerfile.macos-arm64 index 56cfe684a3..d6b80f267a 100644 --- a/build-scripts/Dockerfile.macos-arm64 +++ b/build-scripts/Dockerfile.macos-arm64 @@ -3,22 +3,27 @@ FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' - +ARG BUILD_DIR=/build +ARG OSXCROSS="https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" +ARG TARGET=aarch64-apple-darwin WORKDIR /src COPY . . -RUN rustup target add aarch64-apple-darwin - RUN apt-get update && apt-get install -y clang zstd -RUN wget -nc -O /tmp/osxcross.tar.zst "https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" -RUN mkdir /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross - -RUN . /opt/osxcross/env-macos-aarch64 && \ - cargo build --target aarch64-apple-darwin --release --workspace - -RUN mkdir /out && cp -R /src/target/aarch64-apple-darwin/release/. /out +# Retrieve and install osxcross +RUN wget -nc -O /tmp/osxcross.tar.zst ${OSXCROSS} \ + && mkdir -p /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && . /opt/osxcross/env-macos-aarch64 \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.macos-x64 b/build-scripts/Dockerfile.macos-x64 index 29038b6967..5403b2fe87 100644 --- a/build-scripts/Dockerfile.macos-x64 +++ b/build-scripts/Dockerfile.macos-x64 @@ -3,22 +3,27 @@ FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' - +ARG BUILD_DIR=/build +ARG OSXCROSS="https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" +ARG TARGET=x86_64-apple-darwin WORKDIR /src COPY . . -RUN rustup target add x86_64-apple-darwin - RUN apt-get update && apt-get install -y clang zstd -RUN wget -nc -O /tmp/osxcross.tar.zst "https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" -RUN mkdir /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross - -RUN . /opt/osxcross/env-macos-x86_64 && \ - cargo build --target x86_64-apple-darwin --release --workspace +# Retrieve and install osxcross +RUN wget -nc -O /tmp/osxcross.tar.zst ${OSXCROSS} \ + && mkdir -p /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross -RUN mkdir /out && cp -R /src/target/x86_64-apple-darwin/release/. /out +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && . /opt/osxcross/env-macos-x86_64 \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage -COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / \ No newline at end of file +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.windows-x64 b/build-scripts/Dockerfile.windows-x64 index 58785ccba7..c3ffcd5d29 100644 --- a/build-scripts/Dockerfile.windows-x64 +++ b/build-scripts/Dockerfile.windows-x64 @@ -1,22 +1,25 @@ -FROM rust:stretch as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' - +ARG BUILD_DIR=/build +ARG TARGET=x86_64-pc-windows-gnu WORKDIR /src COPY . . -RUN rustup target add x86_64-pc-windows-gnu - RUN apt-get update && apt-get install -y git gcc-mingw-w64-x86-64 -RUN CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc \ +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc \ CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER=x86_64-w64-mingw32-gcc \ - cargo build --release --workspace --target x86_64-pc-windows-gnu - -RUN mkdir /out && cp -R /src/target/x86_64-pc-windows-gnu/release/. /out + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM scratch AS export-stage COPY --from=build /out/stacks-inspect.exe /out/blockstack-cli.exe /out/clarity-cli.exe /out/stacks-node.exe / \ No newline at end of file diff --git a/build-scripts/build-dist.sh b/build-scripts/build-dist.sh index 760d0ea613..ac2c8bcd5c 100755 --- a/build-scripts/build-dist.sh +++ b/build-scripts/build-dist.sh @@ -15,13 +15,13 @@ build_platform () { case $DIST_TARGET_FILTER in (*[![:blank:]]*) case $DIST_TARGET_FILTER in - linux-x64) build_platform linux-x64 ;; - linux-musl-x64) build_platform linux-musl-x64 ;; - linux-armv7) build_platform linux-armv7 ;; - linux-arm64) build_platform linux-arm64 ;; - windows-x64) build_platform windows-x64 ;; - macos-x64) build_platform macos-x64 ;; - macos-arm64) build_platform macos-arm64 ;; + linux-glibc-x64) build_platform linux-glibc-x64 ;; + linux-glibc-arm64) build_platform linux-glibc-arm64 ;; + linux-musl-x64) build_platform linux-musl-x64 ;; + linux-musl-arm64) build_platform linux-musl-arm64 ;; + windows-x64) build_platform windows-x64 ;; + macos-x64) build_platform macos-x64 ;; + macos-arm64) build_platform macos-arm64 ;; *) echo "Invalid dist target filter '$DIST_TARGET_FILTER'" exit 1 @@ -30,10 +30,10 @@ case $DIST_TARGET_FILTER in ;; (*) echo "Building distrubtions for all targets." - build_platform linux-x64 + build_platform linux-glibc-x64 + build_platform linux-glibc-arm64 build_platform linux-musl-x64 - build_platform linux-armv7 - build_platform linux-arm64 + build_platform linux-musl-arm64 build_platform windows-x64 build_platform macos-x64 build_platform macos-arm64 diff --git a/circle.yml b/circle.yml deleted file mode 100644 index 131712a04a..0000000000 --- a/circle.yml +++ /dev/null @@ -1,76 +0,0 @@ -version: 2.1 -executors: - docker-publisher: - docker: - - image: circleci/buildpack-deps:stretch -jobs: - test_demo: - working_directory: /test - docker: - - image: rust:1.45-stretch - steps: - - checkout - - run: - command: | - cargo build - - run: - command: | - ./target/debug/stacks-inspect local initialize db && - ./target/debug/stacks-inspect local check sample-contracts/tokens.clar db && - ./target/debug/stacks-inspect local launch S1G2081040G2081040G2081040G208105NK8PE5.tokens sample-contracts/tokens.clar db && - ./target/debug/stacks-inspect local check sample-contracts/names.clar db && - ./target/debug/stacks-inspect local launch S1G2081040G2081040G2081040G208105NK8PE5.names sample-contracts/names.clar db && - ./target/debug/stacks-inspect local execute db S1G2081040G2081040G2081040G208105NK8PE5.tokens mint! SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR u100000 - - run: - command: | - echo "(get-balance 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR)" | ./target/debug/stacks-inspect local eval S1G2081040G2081040G2081040G208105NK8PE5.tokens db - unit_tests_with_cov: - machine: true - working_directory: ~/blockstack - steps: - - checkout - - run: - name: Coverage via tarpaulin - command: | - docker run --security-opt seccomp=unconfined -v "${PWD}:/volume" xd009642/tarpaulin \ - bash -c "cargo tarpaulin -v --workspace -t 300 -o Xml" - no_output_timeout: 200m - - run: - name: Upload to codecov.io - command: | - bash <(curl -s https://codecov.io/bash) - unit_tests: - docker: - - image: rust:1.40-stretch - working_directory: ~/blockstack - steps: - - checkout - - run: - no_output_timeout: 200m - command: | - cargo test -j 1 --workspace --exclude clarity - all_tests: - docker: - - image: rust:1.40-stretch - working_directory: ~/blockstack - steps: - - checkout - - run: - no_output_timeout: 200m - command: | - cargo test --workspace && cargo test -- --ignored --test-threads 1 -workflows: - version: 2 - test: - jobs: - - unit_tests - - test_demo -# disable `all_tests` for now, because the circle builder -# OOMs on compile... -# - all_tests: -# filters: -# branches: -# only: -# - master -# - /.*net.*/ -# - /.*marf.*/ diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index ce4fd7146e..b812ef735a 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -28,8 +28,8 @@ lazy_static = "1.4.0" integer-sqrt = "0.1.3" slog = { version = "2.5.2", features = [ "max_level_trace" ] } stacks_common = { package = "stacks-common", path = "../stacks-common/." } -rstest = "0.11.0" -rstest_reuse = "0.1.3" +rstest = "0.17.0" +rstest_reuse = "0.5.0" [dependencies.serde_json] version = "1.0" @@ -44,8 +44,6 @@ version = "0.2.23" features = ["std"] [dev-dependencies] -rstest = "0.11.0" -rstest_reuse = "0.1.3" assert-json-diff = "1.0.0" # a nightly rustc regression (35dbef235 2021-03-02) prevents criterion from compiling # but it isn't necessary for tests: only benchmarks. therefore, commenting out for now. diff --git a/clarity/src/libclarity.rs b/clarity/src/libclarity.rs index 2d0685bd76..903caa8503 100644 --- a/clarity/src/libclarity.rs +++ b/clarity/src/libclarity.rs @@ -33,7 +33,7 @@ extern crate rstest; #[cfg(any(test, feature = "testing"))] #[macro_use] -extern crate rstest_reuse; +pub extern crate rstest_reuse; #[macro_use] extern crate stacks_common; diff --git a/clarity/src/vm/analysis/arithmetic_checker/tests.rs b/clarity/src/vm/analysis/arithmetic_checker/tests.rs index 54bafe51dd..819e1e86b7 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/tests.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/tests.rs @@ -29,21 +29,11 @@ use crate::vm::ast::parse; use crate::vm::costs::LimitedCostTracker; use crate::vm::functions::define::DefineFunctions; use crate::vm::functions::NativeFunctions; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::QualifiedContractIdentifier; use crate::vm::variables::NativeVariables; use crate::vm::ClarityVersion; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_arith_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - /// Checks whether or not a contract only contains arithmetic expressions (for example, defining a /// map would not pass this check). /// This check is useful in determining the validity of new potential cost functions. @@ -71,7 +61,7 @@ fn check_good(contract: &str, version: ClarityVersion, epoch: StacksEpochId) { ArithmeticOnlyChecker::run(&analysis).expect("Should pass arithmetic checks"); } -#[apply(test_clarity_versions_arith_checker)] +#[apply(test_clarity_versions)] fn test_bad_defines(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tests = [ ("(define-public (foo) (ok 1))", DefineTypeForbidden(DefineFunctions::PublicFunction)), diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index 31f90f1ee5..72c64fbed6 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -36,6 +36,7 @@ pub enum CheckErrors { ValueOutOfBounds, TypeSignatureTooDeep, ExpectedName, + SupertypeTooLarge, // match errors BadMatchOptionSyntax(Box), @@ -321,6 +322,7 @@ impl DiagnosableError for CheckErrors { fn message(&self) -> String { match &self { CheckErrors::ExpectedLiteral => "expected a literal argument".into(), + CheckErrors::SupertypeTooLarge => "supertype of two types is too large".into(), CheckErrors::BadMatchOptionSyntax(source) => format!("match on a optional type uses the following syntax: (match input some-name if-some-expression if-none-expression). Caused by: {}", source.message()), diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 24681e12bf..94883ce9c3 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -133,7 +133,12 @@ pub fn run_analysis( StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { TypeChecker2_05::run_pass(&epoch, &mut contract_analysis, db) } - StacksEpochId::Epoch21 => TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db), + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { + TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db) + } StacksEpochId::Epoch10 => unreachable!("Epoch 1.0 is not a valid epoch for analysis"), }?; TraitChecker::run_pass(&epoch, &mut contract_analysis, db)?; diff --git a/clarity/src/vm/analysis/read_only_checker/tests.rs b/clarity/src/vm/analysis/read_only_checker/tests.rs index f4532075fc..0a5044e0bd 100644 --- a/clarity/src/vm/analysis/read_only_checker/tests.rs +++ b/clarity/src/vm/analysis/read_only_checker/tests.rs @@ -25,20 +25,10 @@ use crate::vm::analysis::type_checker::v2_1::tests::mem_type_check; use crate::vm::analysis::{CheckError, CheckErrors}; use crate::vm::ast::parse; use crate::vm::database::MemoryBackingStore; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::QualifiedContractIdentifier; use crate::vm::ClarityVersion; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_read_only_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - #[test] fn test_argument_count_violations() { let examples = [ @@ -197,7 +187,7 @@ fn test_nested_writing_closure() { } } -#[apply(test_clarity_versions_read_only_checker)] +#[apply(test_clarity_versions)] fn test_contract_call_read_only_violations( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/trait_checker/tests.rs b/clarity/src/vm/analysis/trait_checker/tests.rs index 0c348385e7..d720e0e720 100644 --- a/clarity/src/vm/analysis/trait_checker/tests.rs +++ b/clarity/src/vm/analysis/trait_checker/tests.rs @@ -26,21 +26,11 @@ use crate::vm::analysis::{type_check, CheckError}; use crate::vm::ast::errors::ParseErrors; use crate::vm::ast::{build_ast, parse}; use crate::vm::database::MemoryBackingStore; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; use crate::vm::ClarityVersion; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_trait_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_defining_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -88,7 +78,7 @@ fn test_dynamic_dispatch_by_defining_trait( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_incomplete_impl_trait_1(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) @@ -114,7 +104,7 @@ fn test_incomplete_impl_trait_1(#[case] version: ClarityVersion, #[case] epoch: } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_incomplete_impl_trait_2(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) @@ -141,7 +131,7 @@ fn test_incomplete_impl_trait_2(#[case] version: ClarityVersion, #[case] epoch: } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_impl_trait_arg_admission_1(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 ((list 10 uint)) (response uint uint))))"; @@ -165,7 +155,7 @@ fn test_impl_trait_arg_admission_1(#[case] version: ClarityVersion, #[case] epoc } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_impl_trait_arg_admission_2(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 ((list 5 uint)) (response uint uint))))"; @@ -184,7 +174,7 @@ fn test_impl_trait_arg_admission_2(#[case] version: ClarityVersion, #[case] epoc .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_impl_trait_arg_admission_3(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 ((list 5 uint)) (response uint uint))))"; @@ -203,7 +193,7 @@ fn test_impl_trait_arg_admission_3(#[case] version: ClarityVersion, #[case] epoc .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_complete_impl_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) @@ -226,7 +216,7 @@ fn test_complete_impl_trait(#[case] version: ClarityVersion, #[case] epoch: Stac .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_complete_impl_trait_mixing_readonly( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -252,7 +242,7 @@ fn test_complete_impl_trait_mixing_readonly( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_get_trait_reference_from_tuple( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -305,7 +295,7 @@ fn test_get_trait_reference_from_tuple( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_defining_and_impl_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -348,7 +338,7 @@ fn test_dynamic_dispatch_by_defining_and_impl_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_define_map_storing_trait_references( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -375,7 +365,7 @@ fn test_define_map_storing_trait_references( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_cycle_in_traits_1_contract(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let dispatching_contract_src = "(define-trait trait-1 ( (get-1 () (response uint uint)))) @@ -399,7 +389,7 @@ fn test_cycle_in_traits_1_contract(#[case] version: ClarityVersion, #[case] epoc } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_cycle_in_traits_2_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let dispatching_contract_src = "(use-trait trait-2 .target-contract.trait-2) (define-trait trait-1 ( @@ -450,7 +440,7 @@ fn test_cycle_in_traits_2_contracts(#[case] version: ClarityVersion, #[case] epo } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_unknown_method( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -503,7 +493,7 @@ fn test_dynamic_dispatch_unknown_method( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_nested_literal_implicitly_compliant( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -571,7 +561,7 @@ fn test_nested_literal_implicitly_compliant( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_passing_trait_reference_instances( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -609,7 +599,7 @@ fn test_passing_trait_reference_instances( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_passing_nested_trait_reference_instances( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -648,7 +638,7 @@ fn test_passing_nested_trait_reference_instances( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_collision_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -687,7 +677,7 @@ fn test_dynamic_dispatch_collision_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_collision_defined_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -716,7 +706,7 @@ fn test_dynamic_dispatch_collision_defined_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_collision_imported_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -756,7 +746,7 @@ fn test_dynamic_dispatch_collision_imported_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_importing_non_existant_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -828,7 +818,7 @@ fn test_dynamic_dispatch_importing_non_existant_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_importing_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -895,7 +885,7 @@ fn test_dynamic_dispatch_importing_trait( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_including_nested_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1002,7 +992,7 @@ fn test_dynamic_dispatch_including_nested_trait( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_including_wrong_nested_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1121,7 +1111,7 @@ fn test_dynamic_dispatch_including_wrong_nested_trait( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_mismatched_args( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1175,7 +1165,7 @@ fn test_dynamic_dispatch_mismatched_args( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_mismatched_returns( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1229,7 +1219,7 @@ fn test_dynamic_dispatch_mismatched_returns( } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_bad_call_with_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1265,7 +1255,7 @@ fn test_bad_call_with_trait(#[case] version: ClarityVersion, #[case] epoch: Stac } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_good_call_with_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1298,7 +1288,7 @@ fn test_good_call_with_trait(#[case] version: ClarityVersion, #[case] epoch: Sta .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_good_call_2_with_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1332,7 +1322,7 @@ fn test_good_call_2_with_trait(#[case] version: ClarityVersion, #[case] epoch: S .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functions( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1400,7 +1390,7 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_pass_bound_principal_as_trait_in_user_defined_functions( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1478,7 +1468,7 @@ fn test_dynamic_dispatch_pass_bound_principal_as_trait_in_user_defined_functions } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_contract_of_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1499,7 +1489,7 @@ fn test_contract_of_good(#[case] version: ClarityVersion, #[case] epoch: StacksE .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_contract_of_wrong_type(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; @@ -1628,7 +1618,7 @@ fn test_contract_of_wrong_type(#[case] version: ClarityVersion, #[case] epoch: S } } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1678,7 +1668,7 @@ fn test_return_trait_with_contract_of( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of_wrapped_in_begin( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1728,7 +1718,7 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of_wrapped_in_let( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1778,7 +1768,7 @@ fn test_return_trait_with_contract_of_wrapped_in_let( .unwrap(); } -#[apply(test_clarity_versions_trait_checker)] +#[apply(test_clarity_versions)] fn test_trait_contract_not_found(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let trait_contract_src = "(define-trait my-trait ((hello (int) (response uint uint))) diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index d1f87df625..f5848090d8 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -49,7 +49,10 @@ impl FunctionType { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { self.check_args_2_05(accounting, args) } - StacksEpochId::Epoch21 => self.check_args_2_1(accounting, args, clarity_version), + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => self.check_args_2_1(accounting, args, clarity_version), StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), } } @@ -65,7 +68,10 @@ impl FunctionType { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { self.check_args_by_allowing_trait_cast_2_05(db, func_args) } - StacksEpochId::Epoch21 => { + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index 5f841a167e..35fc90c872 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -15,7 +15,6 @@ // along with this program. If not, see . pub mod contexts; -//mod maps; pub mod natives; use std::collections::{BTreeMap, HashMap}; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs index bed41a305f..d39445255e 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/assets.rs @@ -21,13 +21,6 @@ use rstest_reuse::{self, *}; use super::contracts::type_check; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_assets(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - use std::convert::TryInto; use stacks_common::types::StacksEpochId; @@ -37,6 +30,7 @@ use crate::vm::analysis::type_checker::v2_1::tests::mem_type_check; use crate::vm::analysis::AnalysisDatabase; use crate::vm::ast::parse; use crate::vm::database::MemoryBackingStore; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::{ QualifiedContractIdentifier, SequenceSubtype, StringSubtype, TypeSignature, }; @@ -121,7 +115,7 @@ const ASSET_NAMES: &str = "(define-constant burn-address 'SP00000000000000000000 (nft-burn? names name tx-sender)) "; -#[apply(test_clarity_versions_assets)] +#[apply(test_clarity_versions)] fn test_names_tokens_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tokens_contract_id = QualifiedContractIdentifier::local("tokens").unwrap(); let names_contract_id = QualifiedContractIdentifier::local("names").unwrap(); diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 8b330130c5..668b77d153 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -28,6 +28,7 @@ use crate::vm::analysis::{mem_type_check as mem_run_analysis, run_analysis, Chec use crate::vm::ast::parse; use crate::vm::database::MemoryBackingStore; use crate::vm::errors::Error; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::signatures::CallableSubtype; use crate::vm::types::{ PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, TypeSignature, @@ -43,13 +44,6 @@ fn mem_type_check_v1(snippet: &str) -> CheckResult<(Option, Contr mem_run_analysis(snippet, ClarityVersion::Clarity1, StacksEpochId::latest()) } -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - #[template] #[rstest] #[case(ClarityVersion::Clarity1)] @@ -445,7 +439,7 @@ fn test_names_tokens_contracts_interface() { assert_json_eq!(test_contract_json, test_contract_json_expected); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn test_names_tokens_contracts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tokens_contract_id = QualifiedContractIdentifier::local("tokens").unwrap(); let names_contract_id = QualifiedContractIdentifier::local("names").unwrap(); @@ -462,7 +456,7 @@ fn test_names_tokens_contracts(#[case] version: ClarityVersion, #[case] epoch: S .unwrap(); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn test_names_tokens_contracts_bad(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let broken_public = " (define-public (broken-cross-contract (name-hash (buff 20)) (name-price uint)) @@ -557,7 +551,7 @@ fn test_bad_map_usage() { }); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn test_same_function_name(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let ca_id = QualifiedContractIdentifier::local("contract-a").unwrap(); let cb_id = QualifiedContractIdentifier::local("contract-b").unwrap(); @@ -1772,7 +1766,7 @@ fn call_versioned( .map_err(|e| e.to_string()) } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); @@ -1787,7 +1781,7 @@ fn clarity_trait_experiments_impl(#[case] version: ClarityVersion, #[case] epoch }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let mut marf = MemoryBackingStore::new(); let mut db = marf.as_analysis_db(); @@ -1802,7 +1796,7 @@ fn clarity_trait_experiments_use(#[case] version: ClarityVersion, #[case] epoch: }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_empty_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1818,7 +1812,7 @@ fn clarity_trait_experiments_empty_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_duplicate_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1837,7 +1831,7 @@ fn clarity_trait_experiments_duplicate_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_undefined( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1854,7 +1848,7 @@ fn clarity_trait_experiments_use_undefined( )); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_circular( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1872,7 +1866,7 @@ fn clarity_trait_experiments_circular( assert!(err.starts_with("ASTError(ParseError { err: CircularReference([\"circular\"])")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_no_response( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1887,7 +1881,7 @@ fn clarity_trait_experiments_no_response( assert!(err.starts_with("DefineTraitBadSignature")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_out_of_order( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1903,7 +1897,7 @@ fn clarity_trait_experiments_out_of_order( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1921,7 +1915,7 @@ fn clarity_trait_experiments_double_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_double_trait_both( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1942,7 +1936,7 @@ fn clarity_trait_experiments_impl_double_trait_both( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_double_trait_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1965,7 +1959,7 @@ fn clarity_trait_experiments_impl_double_trait_1( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_double_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -1986,7 +1980,7 @@ fn clarity_trait_experiments_impl_double_trait_2( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2010,7 +2004,7 @@ fn clarity_trait_experiments_use_double_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_partial_double_trait_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2034,7 +2028,7 @@ fn clarity_trait_experiments_use_partial_double_trait_1( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_partial_double_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2056,7 +2050,7 @@ fn clarity_trait_experiments_use_partial_double_trait_2( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_identical_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2074,7 +2068,7 @@ fn clarity_trait_experiments_identical_double_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_identical_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2095,7 +2089,7 @@ fn clarity_trait_experiments_impl_identical_double_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_selfret_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2110,7 +2104,7 @@ fn clarity_trait_experiments_selfret_trait( assert!(err.starts_with("ASTError(ParseError { err: CircularReference([\"self-return\"])")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_math_trait_transitive_alias( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2130,7 +2124,7 @@ fn clarity_trait_experiments_use_math_trait_transitive_alias( assert!(err.starts_with("TraitReferenceUnknown(\"math-alias\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_math_trait_transitive_name( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2153,7 +2147,7 @@ fn clarity_trait_experiments_use_math_trait_transitive_name( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_original_and_define_a_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2175,7 +2169,7 @@ fn clarity_trait_experiments_use_original_and_define_a_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_redefined_and_define_a_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2197,7 +2191,7 @@ fn clarity_trait_experiments_use_redefined_and_define_a_trait( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_a_trait_transitive_original( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2216,7 +2210,7 @@ fn clarity_trait_experiments_use_a_trait_transitive_original( assert!(err.starts_with("TraitMethodUnknown(\"a\", \"do-it\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_use_a_trait_transitive_redefined( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2236,7 +2230,7 @@ fn clarity_trait_experiments_use_a_trait_transitive_redefined( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_nested_traits( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2258,7 +2252,7 @@ fn clarity_trait_experiments_nested_traits( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2291,7 +2285,7 @@ fn clarity_trait_experiments_call_nested_trait_1( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2317,7 +2311,7 @@ fn clarity_trait_experiments_call_nested_trait_2( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_3_ok( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2343,7 +2337,7 @@ fn clarity_trait_experiments_call_nested_trait_3_ok( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_3_err( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2366,7 +2360,7 @@ fn clarity_trait_experiments_call_nested_trait_3_err( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_nested_trait_4( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2399,7 +2393,7 @@ fn clarity_trait_experiments_call_nested_trait_4( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_impl_math_trait_incomplete( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2417,7 +2411,7 @@ fn clarity_trait_experiments_impl_math_trait_incomplete( assert!(err.starts_with("BadTraitImplementation(\"math\", \"sub\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_literal( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2437,7 +2431,7 @@ fn clarity_trait_experiments_trait_literal( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_pass_let_rename_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2456,7 +2450,7 @@ fn clarity_trait_experiments_pass_let_rename_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_literal_incomplete( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2475,7 +2469,7 @@ fn clarity_trait_experiments_trait_literal_incomplete( assert!(err.starts_with("BadTraitImplementation(\"math\", \"sub\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_let_rename_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2497,7 +2491,7 @@ fn clarity_trait_experiments_call_let_rename_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_data_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2516,7 +2510,7 @@ fn clarity_trait_experiments_trait_data_1( assert!(err.starts_with("ASTError(ParseError { err: TraitReferenceNotAllowed")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_data_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2535,7 +2529,7 @@ fn clarity_trait_experiments_trait_data_2( assert!(err.starts_with("ASTError(ParseError { err: TraitReferenceNotAllowed")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_upcast_trait_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2551,14 +2545,14 @@ fn clarity_trait_experiments_upcast_trait_1( load_versioned(db, "upcast-trait-1", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(PrincipalType, TraitReferenceType")); } else { assert!(err.starts_with("TypeError(PrincipalType, CallableType")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_upcast_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2577,7 +2571,7 @@ fn clarity_trait_experiments_upcast_trait_2( assert!(err.starts_with("TypeError(TupleType(TupleTypeSignature { \"val\": principal,}), TupleType(TupleTypeSignature { \"val\": ,}))")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_upcast_trait_3( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2593,14 +2587,14 @@ fn clarity_trait_experiments_upcast_trait_3( load_versioned(db, "upcast-trait-3", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(PrincipalType, TraitReferenceType")); } else { assert!(err.starts_with("TypeError(PrincipalType, CallableType")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_return_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2619,7 +2613,7 @@ fn clarity_trait_experiments_return_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_upcast_renamed( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2635,14 +2629,14 @@ fn clarity_trait_experiments_upcast_renamed( load_versioned(db, "upcast-renamed", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(PrincipalType, TraitReferenceType")); } else { assert!(err.starts_with("TypeError(PrincipalType, CallableType")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_constant_call( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2665,7 +2659,7 @@ fn clarity_trait_experiments_constant_call( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_constant_to_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2681,7 +2675,7 @@ fn clarity_trait_experiments_constant_to_trait( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -2691,7 +2685,7 @@ fn clarity_trait_experiments_constant_to_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_constant_to_constant_call( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2708,7 +2702,7 @@ fn clarity_trait_experiments_constant_to_constant_call( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -2718,7 +2712,7 @@ fn clarity_trait_experiments_constant_to_constant_call( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_literal_1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2735,7 +2729,7 @@ fn clarity_trait_experiments_downcast_literal_1( load_versioned(db, "downcast-literal-1", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { println!("err: {}", err); assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { @@ -2743,7 +2737,7 @@ fn clarity_trait_experiments_downcast_literal_1( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_literal_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2767,7 +2761,7 @@ fn clarity_trait_experiments_downcast_literal_2( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_literal_3( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2786,7 +2780,7 @@ fn clarity_trait_experiments_downcast_literal_3( assert!(err.starts_with("TraitReferenceUnknown(\"p\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_trait_2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2803,14 +2797,14 @@ fn clarity_trait_experiments_downcast_trait_2( load_versioned(db, "downcast-trait-2", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } })), PrincipalType)")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_trait_3( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2826,14 +2820,14 @@ fn clarity_trait_experiments_downcast_trait_3( load_versioned(db, "downcast-trait-3", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } })), PrincipalType)")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_trait_4( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2849,14 +2843,14 @@ fn clarity_trait_experiments_downcast_trait_4( load_versioned(db, "downcast-trait-4", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } })), PrincipalType)")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_downcast_trait_5( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2872,14 +2866,14 @@ fn clarity_trait_experiments_downcast_trait_5( load_versioned(db, "downcast-trait-5", version, epoch) }) .unwrap_err(); - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } }), PrincipalType)")); } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier { name: ClarityName(\"math\"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S1G2081040G2081040G2081040G208105NK8PE5), name: ContractName(\"math-trait\") } })), PrincipalType)")); } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_identical_trait_cast( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2895,7 +2889,7 @@ fn clarity_trait_experiments_identical_trait_cast( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -2905,7 +2899,7 @@ fn clarity_trait_experiments_identical_trait_cast( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_cast( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2921,7 +2915,7 @@ fn clarity_trait_experiments_trait_cast( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -2931,7 +2925,7 @@ fn clarity_trait_experiments_trait_cast( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_cast_incompatible( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2949,7 +2943,7 @@ fn clarity_trait_experiments_trait_cast_incompatible( .unwrap_err(); match version { ClarityVersion::Clarity1 => { - if epoch == StacksEpochId::Epoch2_05 { + if epoch <= StacksEpochId::Epoch2_05 { assert!(err.starts_with("TypeError(TraitReferenceType(TraitIdentifier")) } else { assert!(err.starts_with("TypeError(CallableType(Trait(TraitIdentifier")) @@ -2959,7 +2953,7 @@ fn clarity_trait_experiments_trait_cast_incompatible( } } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_renamed_trait_cast( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2978,7 +2972,7 @@ fn clarity_trait_experiments_renamed_trait_cast( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_use_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -2997,7 +2991,7 @@ fn clarity_trait_experiments_readonly_use_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_pass_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3017,7 +3011,7 @@ fn clarity_trait_experiments_readonly_pass_trait( } // TODO: This should be allowed -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_call_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3036,7 +3030,7 @@ fn clarity_trait_experiments_readonly_call_trait( } // TODO: This should be allowed -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_static_call( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3056,7 +3050,7 @@ fn clarity_trait_experiments_readonly_static_call( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_readonly_static_call_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3075,7 +3069,7 @@ fn clarity_trait_experiments_readonly_static_call_trait( assert!(err.starts_with("WriteAttemptedInReadOnly")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_dyn_call_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3103,7 +3097,7 @@ fn clarity_trait_experiments_dyn_call_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_dyn_call_trait_partial( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3130,7 +3124,7 @@ fn clarity_trait_experiments_dyn_call_trait_partial( assert!(err.starts_with("BadTraitImplementation(\"math\", \"sub\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_dyn_call_not_implemented( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3157,7 +3151,7 @@ fn clarity_trait_experiments_dyn_call_not_implemented( assert!(err.starts_with("BadTraitImplementation(\"math\", \"add\")")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_use_principal( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3176,7 +3170,7 @@ fn clarity_trait_experiments_call_use_principal( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_return_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3204,7 +3198,7 @@ fn clarity_trait_experiments_call_return_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_full_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3235,7 +3229,7 @@ fn clarity_trait_experiments_call_full_double_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_call_partial_double_trait( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3266,7 +3260,7 @@ fn clarity_trait_experiments_call_partial_double_trait( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_trait_recursion( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3295,7 +3289,7 @@ fn clarity_trait_experiments_trait_recursion( } // Additional tests using this framework -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_principals_list_to_traits_list( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3318,7 +3312,7 @@ fn clarity_trait_experiments_principals_list_to_traits_list( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_traits_list_to_traits_list( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3338,7 +3332,7 @@ fn clarity_trait_experiments_traits_list_to_traits_list( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_mixed_list_to_traits_list( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3354,7 +3348,7 @@ fn clarity_trait_experiments_mixed_list_to_traits_list( }); match result { Ok(_) if version == ClarityVersion::Clarity2 => (), - Err(err) if epoch == StacksEpochId::Epoch2_05 => { + Err(err) if epoch <= StacksEpochId::Epoch2_05 => { assert!(err.starts_with("TypeError(TraitReferenceType")) } Err(err) if version == ClarityVersion::Clarity1 => { @@ -3364,7 +3358,7 @@ fn clarity_trait_experiments_mixed_list_to_traits_list( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait_method1_v1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3399,7 +3393,7 @@ fn clarity_trait_experiments_double_trait_method1_v1( assert!(err.starts_with("TypeError(BoolType, UIntType)")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait_method2_v1( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3435,7 +3429,7 @@ fn clarity_trait_experiments_double_trait_method2_v1( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait_method1_v1_v2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3470,7 +3464,7 @@ fn clarity_trait_experiments_double_trait_method1_v1_v2( assert!(err.starts_with("TypeError(BoolType, UIntType)")); } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_double_trait_method2_v1_v2( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, @@ -3506,7 +3500,7 @@ fn clarity_trait_experiments_double_trait_method2_v1_v2( }; } -#[apply(test_clarity_versions_contracts)] +#[apply(test_clarity_versions)] fn clarity_trait_experiments_cross_epochs( #[case] version: ClarityVersion, #[case] epoch: StacksEpochId, diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs index 66730addce..ba01e9e72a 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/mod.rs @@ -47,20 +47,11 @@ use crate::vm::types::{SequenceSubtype::*, StringSubtype::*}; use crate::vm::ClarityVersion; use crate::vm::{execute_v2, ClarityName}; +use crate::vm::tests::test_clarity_versions; + mod assets; pub mod contracts; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_type_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - /// Backwards-compatibility shim for type_checker tests. Runs at latest Clarity version. pub fn mem_type_check(exp: &str) -> CheckResult<(Option, ContractAnalysis)> { mem_run_analysis( @@ -340,7 +331,7 @@ fn test_get_burn_block_info() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_define_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(define-trait trait-1 ((get-1 (uint) (response uint uint))))", @@ -384,7 +375,7 @@ fn test_define_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpoch } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_use_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let bad = [ "(use-trait trait-1 ((get-1 (uint) (response uint uint))))", @@ -406,7 +397,7 @@ fn test_use_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_impl_trait(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let bad = ["(impl-trait trait-1)", "(impl-trait)"]; let bad_expected = [ @@ -530,7 +521,7 @@ fn test_tx_sponsor() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_destructuring_opts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(unwrap! (some 1) 2)", @@ -744,7 +735,7 @@ fn test_at_block() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_trait_reference_unknown(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let bad = [( "(+ 1 )", @@ -1148,7 +1139,7 @@ fn test_element_at() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_eqs(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(is-eq (list 1 2 3 4 5) (list 1 2 3 4 5 6 7))", @@ -2193,7 +2184,7 @@ fn test_string_to_ints() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_response_inference(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(define-private (foo (x int)) (err x)) @@ -2323,7 +2314,7 @@ fn test_factorial() { mem_type_check(contract).unwrap(); } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_options(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let contract = " (define-private (foo (id (optional int))) @@ -3542,7 +3533,7 @@ fn test_let_bind_trait() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_trait_same_contract(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = ["(define-trait trait-foo ((foo () (response uint uint)))) (define-public (call-foo (f )) @@ -3588,7 +3579,7 @@ fn test_tuple_arg() { } } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_list_arg(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let good = [ "(define-private (foo (l (list 3 int))) diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 453aaa3f40..2f46fdb249 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -1160,7 +1160,16 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { let value = arg.match_atom_value() .ok_or_else(|| InterpreterError::InterpreterError(format!("Passed non-value expression to exec_tx on {}!", tx_name)))?; - Ok(value.clone()) + // sanitize contract-call inputs in epochs >= 2.4 + // testing todo: ensure sanitize_value() preserves trait callability! + let expected_type = TypeSignature::type_of(value); + let (sanitized_value, _) = Value::sanitize_value( + self.epoch(), + &expected_type, + value.clone(), + ).ok_or_else(|| CheckErrors::TypeValueError(expected_type, value.clone()))?; + + Ok(sanitized_value) }) .collect(); diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 994353eb7e..5b8270ff21 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -326,12 +326,15 @@ pub enum CostErrors { fn load_state_summary(mainnet: bool, clarity_db: &mut ClarityDatabase) -> Result { let cost_voting_contract = boot_code_id("cost-voting", mainnet); + let clarity_epoch = clarity_db.get_clarity_epoch_version(); let last_processed_at = match clarity_db.get_value( "vm-costs::last-processed-at-height", &TypeSignature::UIntType, + &clarity_epoch, ) { - Some(v) => u32::try_from(v.value.expect_u128()).expect("Block height overflowed u32"), - None => return Ok(CostStateSummary::empty()), + Ok(Some(v)) => u32::try_from(v.value.expect_u128()).expect("Block height overflowed u32"), + Ok(None) => return Ok(CostStateSummary::empty()), + Err(e) => return Err(CostErrors::CostComputationFailed(e.to_string())), }; let metadata_result = clarity_db @@ -355,11 +358,14 @@ fn store_state_summary( ) -> Result<()> { let block_height = clarity_db.get_current_block_height(); let cost_voting_contract = boot_code_id("cost-voting", mainnet); - - clarity_db.put( - "vm-costs::last-processed-at-height", - &Value::UInt(block_height as u128), - ); + let epoch = clarity_db.get_clarity_epoch_version(); + clarity_db + .put_value( + "vm-costs::last-processed-at-height", + Value::UInt(block_height as u128), + &epoch, + ) + .map_err(|_e| CostErrors::CostContractLoadFailure)?; let serialized_summary = serde_json::to_string(&SerializedCostStateSummary::from(to_store.clone())) .expect("BUG: failure to serialize cost state summary struct"); @@ -387,14 +393,24 @@ fn load_cost_functions( clarity_db: &mut ClarityDatabase, apply_updates: bool, ) -> Result { + let clarity_epoch = clarity_db.get_clarity_epoch_version(); let last_processed_count = clarity_db - .get_value("vm-costs::last_processed_count", &TypeSignature::UIntType) + .get_value( + "vm-costs::last_processed_count", + &TypeSignature::UIntType, + &clarity_epoch, + ) + .map_err(|_e| CostErrors::CostContractLoadFailure)? .map(|result| result.value) .unwrap_or(Value::UInt(0)) .expect_u128(); let cost_voting_contract = boot_code_id("cost-voting", mainnet); let confirmed_proposals_count = clarity_db - .lookup_variable_unknown_descriptor(&cost_voting_contract, "confirmed-proposal-count") + .lookup_variable_unknown_descriptor( + &cost_voting_contract, + "confirmed-proposal-count", + &clarity_epoch, + ) .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? .expect_u128(); debug!("Check cost voting contract"; @@ -421,6 +437,7 @@ fn load_cost_functions( )]) .expect("BUG: failed to construct simple tuple"), ), + &clarity_epoch, ) .expect("BUG: Failed querying confirmed-proposals") .expect_optional() @@ -614,10 +631,13 @@ fn load_cost_functions( } if confirmed_proposals_count > last_processed_count { store_state_summary(mainnet, clarity_db, &state_summary)?; - clarity_db.put( - "vm-costs::last_processed_count", - &Value::UInt(confirmed_proposals_count), - ); + clarity_db + .put_value( + "vm-costs::last_processed_count", + Value::UInt(confirmed_proposals_count), + &clarity_epoch, + ) + .map_err(|_e| CostErrors::CostContractLoadFailure)?; } Ok(state_summary) @@ -700,7 +720,10 @@ impl LimitedCostTracker { } StacksEpochId::Epoch20 => COSTS_1_NAME.to_string(), StacksEpochId::Epoch2_05 => COSTS_2_NAME.to_string(), - StacksEpochId::Epoch21 => COSTS_3_NAME.to_string(), + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => COSTS_3_NAME.to_string(), } } } diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 9facb6c3d9..95ff31eade 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -51,11 +51,11 @@ use crate::vm::errors::{ RuntimeErrorType, }; use crate::vm::representations::ClarityName; -use crate::vm::types::byte_len_of_serialization; use crate::vm::types::{ - serialization::NONE_SERIALIZATION_LEN, OptionalData, PrincipalData, - QualifiedContractIdentifier, SequenceData, StandardPrincipalData, TupleData, - TupleTypeSignature, TypeSignature, Value, NONE, + byte_len_of_serialization, + serialization::{SerializationError, NONE_SERIALIZATION_LEN}, + OptionalData, PrincipalData, QualifiedContractIdentifier, SequenceData, StandardPrincipalData, + TupleData, TupleTypeSignature, TypeSignature, Value, NONE, }; pub const STORE_CONTRACT_SRC_INTERFACE: bool = true; @@ -107,6 +107,8 @@ pub trait HeadersDB { pub trait BurnStateDB { fn get_v1_unlock_height(&self) -> u32; + fn get_v2_unlock_height(&self) -> u32; + fn get_pox_3_activation_height(&self) -> u32; /// Returns the *burnchain block height* for the `sortition_id` is associated with. fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option; @@ -191,6 +193,14 @@ impl BurnStateDB for &dyn BurnStateDB { (*self).get_v1_unlock_height() } + fn get_v2_unlock_height(&self) -> u32 { + (*self).get_v2_unlock_height() + } + + fn get_pox_3_activation_height(&self) -> u32 { + (*self).get_pox_3_activation_height() + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { (*self).get_burn_block_height(sortition_id) } @@ -358,7 +368,15 @@ impl BurnStateDB for NullBurnStateDB { } fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_v2_unlock_height(&self) -> u32 { + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { @@ -457,8 +475,50 @@ impl<'a> ClarityDatabase<'a> { self.store.get::(key) } - pub fn get_value(&mut self, key: &str, expected: &TypeSignature) -> Option { - self.store.get_value(key, expected) + pub fn put_value(&mut self, key: &str, value: Value, epoch: &StacksEpochId) -> Result<()> { + self.put_value_with_size(key, value, epoch)?; + Ok(()) + } + + pub fn put_value_with_size( + &mut self, + key: &str, + value: Value, + epoch: &StacksEpochId, + ) -> Result { + let sanitize = epoch.value_sanitizing(); + let mut pre_sanitized_size = None; + + let serialized = if sanitize { + let value_size = value.serialized_size() as u64; + let (sanitized_value, did_sanitize) = + Value::sanitize_value(epoch, &TypeSignature::type_of(&value), value) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; + // if data needed to be sanitized *charge* for the unsanitized cost + if did_sanitize { + pre_sanitized_size = Some(value_size); + } + sanitized_value.serialize_to_vec() + } else { + value.serialize_to_vec() + }; + + let size = serialized.len() as u64; + let hex_serialized = to_hex(serialized.as_slice()); + self.store.put(&key, &hex_serialized); + + Ok(pre_sanitized_size.unwrap_or(size)) + } + + pub fn get_value( + &mut self, + key: &str, + expected: &TypeSignature, + epoch: &StacksEpochId, + ) -> Result> { + self.store + .get_value(key, expected, epoch) + .map_err(|e| InterpreterError::DBError(e.to_string()).into()) } pub fn get_with_proof(&mut self, key: &str) -> Option<(T, Vec)> @@ -680,16 +740,21 @@ impl<'a> ClarityDatabase<'a> { self.get_value( ClarityDatabase::ustx_liquid_supply_key(), &TypeSignature::UIntType, + &StacksEpochId::latest(), ) + .expect("FATAL: failed to load ustx_liquid_supply Clarity key") .map(|v| v.value.expect_u128()) .unwrap_or(0) } fn set_ustx_liquid_supply(&mut self, set_to: u128) { - self.put( + self.put_value( ClarityDatabase::ustx_liquid_supply_key(), - &Value::UInt(set_to), + Value::UInt(set_to), + // okay to pin epoch, because ustx_liquid_supply does not need to sanitize + &StacksEpochId::Epoch21, ) + .expect("FATAL: Failed to store STX liquid supply"); } pub fn increment_ustx_liquid_supply(&mut self, incr_by: u128) -> Result<()> { @@ -746,6 +811,21 @@ impl<'a> ClarityDatabase<'a> { self.burn_state_db.get_v1_unlock_height() } + /// Return the height for PoX 3 activation from the burn state db + pub fn get_pox_3_activation_height(&self) -> u32 { + self.burn_state_db.get_pox_3_activation_height() + } + + /// Return the height for PoX v2 -> v3 auto unlocks + /// from the burn state db + pub fn get_v2_unlock_height(&mut self) -> u32 { + if self.get_clarity_epoch_version() >= StacksEpochId::Epoch22 { + self.burn_state_db.get_v2_unlock_height() + } else { + u32::MAX + } + } + /// Get the last-known burnchain block height. /// Note that this is _not_ the burnchain height in which this block was mined! /// This is the burnchain block height of the parent of the Stacks block at the current Stacks @@ -1072,8 +1152,14 @@ impl<'a> ClarityDatabase<'a> { value: Value, ) -> Result { let descriptor = self.load_variable(contract_identifier, variable_name)?; - self.set_variable(contract_identifier, variable_name, value, &descriptor) - .map(|data| data.value) + self.set_variable( + contract_identifier, + variable_name, + value, + &descriptor, + &StacksEpochId::latest(), + ) + .map(|data| data.value) } pub fn set_variable( @@ -1082,6 +1168,7 @@ impl<'a> ClarityDatabase<'a> { variable_name: &str, value: Value, variable_descriptor: &DataVariableMetadata, + epoch: &StacksEpochId, ) -> Result { if !variable_descriptor .value_type @@ -1098,7 +1185,7 @@ impl<'a> ClarityDatabase<'a> { variable_name, ); - let size = self.put_with_size(&key, &value); + let size = self.put_value_with_size(&key, value, epoch)?; Ok(ValueResult { value: Value::Bool(true), @@ -1110,9 +1197,10 @@ impl<'a> ClarityDatabase<'a> { &mut self, contract_identifier: &QualifiedContractIdentifier, variable_name: &str, + epoch: &StacksEpochId, ) -> Result { let descriptor = self.load_variable(contract_identifier, variable_name)?; - self.lookup_variable(contract_identifier, variable_name, &descriptor) + self.lookup_variable(contract_identifier, variable_name, &descriptor, epoch) } pub fn lookup_variable( @@ -1120,6 +1208,7 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, variable_name: &str, variable_descriptor: &DataVariableMetadata, + epoch: &StacksEpochId, ) -> Result { let key = ClarityDatabase::make_key_for_trip( contract_identifier, @@ -1127,7 +1216,7 @@ impl<'a> ClarityDatabase<'a> { variable_name, ); - let result = self.get_value(&key, &variable_descriptor.value_type); + let result = self.get_value(&key, &variable_descriptor.value_type, epoch)?; match result { None => Ok(Value::none()), @@ -1142,6 +1231,7 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, variable_name: &str, variable_descriptor: &DataVariableMetadata, + epoch: &StacksEpochId, ) -> Result { let key = ClarityDatabase::make_key_for_trip( contract_identifier, @@ -1149,7 +1239,7 @@ impl<'a> ClarityDatabase<'a> { variable_name, ); - let result = self.get_value(&key, &variable_descriptor.value_type); + let result = self.get_value(&key, &variable_descriptor.value_type, epoch)?; match result { None => Ok(ValueResult { @@ -1200,7 +1290,7 @@ impl<'a> ClarityDatabase<'a> { ClarityDatabase::make_key_for_data_map_entry_serialized( contract_identifier, map_name, - &key_value.serialize(), + &key_value.serialize_to_hex(), ) } @@ -1222,9 +1312,10 @@ impl<'a> ClarityDatabase<'a> { contract_identifier: &QualifiedContractIdentifier, map_name: &str, key_value: &Value, + epoch: &StacksEpochId, ) -> Result { let descriptor = self.load_map(contract_identifier, map_name)?; - self.fetch_entry(contract_identifier, map_name, key_value, &descriptor) + self.fetch_entry(contract_identifier, map_name, key_value, &descriptor, epoch) } /// Returns a Clarity optional type wrapping a found or not found result @@ -1234,6 +1325,7 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key_value: &Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { if !map_descriptor .key_type @@ -1250,7 +1342,7 @@ impl<'a> ClarityDatabase<'a> { ClarityDatabase::make_key_for_data_map_entry(contract_identifier, map_name, key_value); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - let result = self.get_value(&key, &stored_type); + let result = self.get_value(&key, &stored_type, epoch)?; match result { None => Ok(Value::none()), @@ -1264,6 +1356,7 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key_value: &Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { if !map_descriptor .key_type @@ -1276,7 +1369,7 @@ impl<'a> ClarityDatabase<'a> { .into()); } - let key_serialized = key_value.serialize(); + let key_serialized = key_value.serialize_to_hex(); let key = ClarityDatabase::make_key_for_data_map_entry_serialized( contract_identifier, map_name, @@ -1284,7 +1377,7 @@ impl<'a> ClarityDatabase<'a> { ); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - let result = self.get_value(&key, &stored_type); + let result = self.get_value(&key, &stored_type, epoch)?; match result { None => Ok(ValueResult { @@ -1310,6 +1403,7 @@ impl<'a> ClarityDatabase<'a> { key: Value, value: Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { self.inner_set_entry( contract_identifier, @@ -1318,6 +1412,7 @@ impl<'a> ClarityDatabase<'a> { value, false, map_descriptor, + epoch, ) } @@ -1327,10 +1422,18 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key: Value, value: Value, + epoch: &StacksEpochId, ) -> Result { let descriptor = self.load_map(contract_identifier, map_name)?; - self.set_entry(contract_identifier, map_name, key, value, &descriptor) - .map(|data| data.value) + self.set_entry( + contract_identifier, + map_name, + key, + value, + &descriptor, + epoch, + ) + .map(|data| data.value) } pub fn insert_entry_unknown_descriptor( @@ -1339,10 +1442,18 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key: Value, value: Value, + epoch: &StacksEpochId, ) -> Result { let descriptor = self.load_map(contract_identifier, map_name)?; - self.insert_entry(contract_identifier, map_name, key, value, &descriptor) - .map(|data| data.value) + self.insert_entry( + contract_identifier, + map_name, + key, + value, + &descriptor, + epoch, + ) + .map(|data| data.value) } pub fn insert_entry( @@ -1352,6 +1463,7 @@ impl<'a> ClarityDatabase<'a> { key: Value, value: Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { self.inner_set_entry( contract_identifier, @@ -1360,11 +1472,17 @@ impl<'a> ClarityDatabase<'a> { value, true, map_descriptor, + epoch, ) } - fn data_map_entry_exists(&mut self, key: &str, expected_value: &TypeSignature) -> Result { - match self.get_value(key, expected_value) { + fn data_map_entry_exists( + &mut self, + key: &str, + expected_value: &TypeSignature, + epoch: &StacksEpochId, + ) -> Result { + match self.get_value(key, expected_value, epoch)? { None => Ok(false), Some(value) => Ok(value.value != Value::none()), } @@ -1378,6 +1496,7 @@ impl<'a> ClarityDatabase<'a> { value: Value, return_if_exists: bool, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { if !map_descriptor .key_type @@ -1396,7 +1515,7 @@ impl<'a> ClarityDatabase<'a> { ); } - let key_serialized = key_value.serialize(); + let key_serialized = key_value.serialize_to_hex(); let key_serialized_byte_len = byte_len_of_serialization(&key_serialized); let key = ClarityDatabase::make_key_for_quad( contract_identifier, @@ -1406,7 +1525,7 @@ impl<'a> ClarityDatabase<'a> { ); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - if return_if_exists && self.data_map_entry_exists(&key, &stored_type)? { + if return_if_exists && self.data_map_entry_exists(&key, &stored_type, epoch)? { return Ok(ValueResult { value: Value::Bool(false), serialized_byte_len: key_serialized_byte_len, @@ -1414,7 +1533,7 @@ impl<'a> ClarityDatabase<'a> { } let placed_value = Value::some(value)?; - let placed_size = self.put_with_size(&key, &placed_value); + let placed_size = self.put_value_with_size(&key, placed_value, epoch)?; Ok(ValueResult { value: Value::Bool(true), @@ -1430,6 +1549,7 @@ impl<'a> ClarityDatabase<'a> { map_name: &str, key_value: &Value, map_descriptor: &DataMapMetadata, + epoch: &StacksEpochId, ) -> Result { if !map_descriptor .key_type @@ -1442,7 +1562,7 @@ impl<'a> ClarityDatabase<'a> { .into()); } - let key_serialized = key_value.serialize(); + let key_serialized = key_value.serialize_to_hex(); let key_serialized_byte_len = byte_len_of_serialization(&key_serialized); let key = ClarityDatabase::make_key_for_quad( contract_identifier, @@ -1451,14 +1571,14 @@ impl<'a> ClarityDatabase<'a> { &key_serialized, ); let stored_type = TypeSignature::new_option(map_descriptor.value_type.clone())?; - if !self.data_map_entry_exists(&key, &stored_type)? { + if !self.data_map_entry_exists(&key, &stored_type, epoch)? { return Ok(ValueResult { value: Value::Bool(false), serialized_byte_len: key_serialized_byte_len, }); } - self.put(&key, &(Value::none())); + self.put_value(&key, Value::none(), epoch)?; Ok(ValueResult { value: Value::Bool(true), @@ -1662,12 +1782,17 @@ impl<'a> ClarityDatabase<'a> { contract_identifier, StoreType::NonFungibleToken, asset_name, - &asset.serialize(), + &asset.serialize_to_hex(), ); - let value: Option = self.get(&key); + let epoch = self.get_clarity_epoch_version(); + let value: Option = self.get_value( + &key, + &TypeSignature::new_option(TypeSignature::PrincipalType).unwrap(), + &epoch, + )?; let owner = match value { - Some(owner) => owner.expect_optional(), + Some(owner) => owner.value.expect_optional(), None => return Err(RuntimeErrorType::NoSuchToken.into()), }; @@ -1695,6 +1820,7 @@ impl<'a> ClarityDatabase<'a> { asset: &Value, principal: &PrincipalData, key_type: &TypeSignature, + epoch: &StacksEpochId, ) -> Result<()> { if !key_type.admits(&self.get_clarity_epoch_version(), asset)? { return Err(CheckErrors::TypeValueError(key_type.clone(), (*asset).clone()).into()); @@ -1704,11 +1830,11 @@ impl<'a> ClarityDatabase<'a> { contract_identifier, StoreType::NonFungibleToken, asset_name, - &asset.serialize(), + &asset.serialize_to_hex(), ); let value = Value::some(Value::Principal(principal.clone()))?; - self.put(&key, &value); + self.put_value(&key, value, epoch)?; Ok(()) } @@ -1719,6 +1845,7 @@ impl<'a> ClarityDatabase<'a> { asset_name: &str, asset: &Value, key_type: &TypeSignature, + epoch: &StacksEpochId, ) -> Result<()> { if !key_type.admits(&self.get_clarity_epoch_version(), asset)? { return Err(CheckErrors::TypeValueError(key_type.clone(), (*asset).clone()).into()); @@ -1728,10 +1855,10 @@ impl<'a> ClarityDatabase<'a> { contract_identifier, StoreType::NonFungibleToken, asset_name, - &asset.serialize(), + &asset.serialize_to_hex(), ); - self.put(&key, &(Value::none())); + self.put_value(&key, Value::none(), epoch)?; Ok(()) } } @@ -1771,8 +1898,8 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) } @@ -1790,8 +1917,8 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) } @@ -1827,15 +1954,6 @@ impl<'a> ClarityDatabase<'a> { self.burn_state_db.get_burn_block_height(sortition_id) } - pub fn get_burn_header_hash( - &self, - height: u32, - sortition_id: &SortitionId, - ) -> Option { - self.burn_state_db - .get_burn_header_hash(height, sortition_id) - } - /// This function obtains the stacks epoch version, which is based on the burn block height. /// Valid epochs include stacks 1.0, 2.0, 2.05, and so on. pub fn get_stacks_epoch(&self, height: u32) -> Option { diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 614cb33396..af49cd2f8e 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -23,9 +23,13 @@ use super::clarity_store::SpecialCaseHandler; use super::{ClarityBackingStore, ClarityDeserializable}; use crate::types::chainstate::StacksBlockId; use crate::vm::database::clarity_store::make_contract_hash_key; -use crate::vm::errors::InterpreterResult as Result; -use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; -use crate::vm::Value; +use crate::vm::errors::InterpreterResult; +use crate::vm::types::serialization::SerializationError; +use crate::vm::types::{ + QualifiedContractIdentifier, SequenceData, SequenceSubtype, TupleData, TypeSignature, +}; +use crate::vm::{StacksEpoch, Value}; +use stacks_common::types::StacksEpochId; #[cfg(rollback_value_check)] type RollbackValueCheck = String; @@ -100,6 +104,7 @@ where /// Result structure for fetched values from the /// underlying store. +#[derive(Debug)] pub struct ValueResult { pub value: Value, pub serialized_byte_len: u64, @@ -322,7 +327,7 @@ impl<'a> RollbackWrapper<'a> { &mut self, bhh: StacksBlockId, query_pending_data: bool, - ) -> Result { + ) -> InterpreterResult { self.store.set_block_hash(bhh).and_then(|x| { // use and_then so that query_pending_data is only set once set_block_hash succeeds // this doesn't matter in practice, because a set_block_hash failure always aborts @@ -364,31 +369,43 @@ impl<'a> RollbackWrapper<'a> { lookup_result.or_else(|| self.store.get(key).map(|x| T::deserialize(&x))) } + pub fn deserialize_value( + value_hex: &str, + expected: &TypeSignature, + epoch: &StacksEpochId, + ) -> Result { + let serialized_byte_len = value_hex.len() as u64 / 2; + let sanitize = epoch.value_sanitizing(); + let value = Value::try_deserialize_hex(value_hex, expected, sanitize)?; + + Ok(ValueResult { + value, + serialized_byte_len, + }) + } + /// Get a Clarity value from the underlying Clarity KV store. /// Returns Some if found, with the Clarity Value and the serialized byte length of the value. - pub fn get_value(&mut self, key: &str, expected: &TypeSignature) -> Option { + pub fn get_value( + &mut self, + key: &str, + expected: &TypeSignature, + epoch: &StacksEpochId, + ) -> Result, SerializationError> { self.stack .last() .expect("ERROR: Clarity VM attempted GET on non-nested context."); - let lookup_result = if self.query_pending_data { - self.lookup_map - .get(key) - .and_then(|x| x.last()) - .map(|x| ValueResult { - value: Value::deserialize(x, expected), - serialized_byte_len: x.len() as u64 / 2, - }) - } else { - None - }; + if self.query_pending_data { + if let Some(x) = self.lookup_map.get(key).and_then(|x| x.last()) { + return Ok(Some(Self::deserialize_value(x, expected, epoch)?)); + } + } - lookup_result.or_else(|| { - self.store.get(key).map(|x| ValueResult { - value: Value::deserialize(&x, expected), - serialized_byte_len: x.len() as u64 / 2, - }) - }) + match self.store.get(key) { + Some(x) => Ok(Some(Self::deserialize_value(&x, expected, epoch)?)), + None => Ok(None), + } } /// This is the height we are currently constructing. It comes from the MARF. @@ -438,7 +455,7 @@ impl<'a> RollbackWrapper<'a> { &mut self, contract: &QualifiedContractIdentifier, key: &str, - ) -> Result> { + ) -> InterpreterResult> { self.stack .last() .expect("ERROR: Clarity VM attempted GET on non-nested context."); @@ -467,7 +484,7 @@ impl<'a> RollbackWrapper<'a> { at_height: u32, contract: &QualifiedContractIdentifier, key: &str, - ) -> Result> { + ) -> InterpreterResult> { self.stack .last() .expect("ERROR: Clarity VM attempted GET on non-nested context."); diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 5fa6a46f3c..8b80504b3d 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -141,6 +141,11 @@ pub enum STXBalance { amount_locked: u128, unlock_height: u64, }, + LockedPoxThree { + amount_unlocked: u128, + amount_locked: u128, + unlock_height: u64, + }, } /// Lifetime-limited handle to an uncommitted balance structure. @@ -202,6 +207,24 @@ impl ClaritySerializable for STXBalance { .write_all(&unlock_height.to_be_bytes()) .expect("STXBalance serialization: failed writing unlock_height."); } + STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + unlock_height, + } => { + buffer + .write_all(&[STXBalance::pox_3_version]) + .expect("STXBalance serialization: failed to write PoX version byte"); + buffer + .write_all(&amount_unlocked.to_be_bytes()) + .expect("STXBalance serialization: failed writing amount_unlocked."); + buffer + .write_all(&amount_locked.to_be_bytes()) + .expect("STXBalance serialization: failed writing amount_locked."); + buffer + .write_all(&unlock_height.to_be_bytes()) + .expect("STXBalance serialization: failed writing unlock_height."); + } } to_hex(buffer.as_slice()) } @@ -238,9 +261,9 @@ impl ClarityDeserializable for STXBalance { unlock_height, } } - } else if bytes.len() == STXBalance::v2_size { + } else if bytes.len() == STXBalance::v2_and_v3_size { let version = &bytes[0]; - if version != &STXBalance::pox_2_version { + if version != &STXBalance::pox_2_version && version != &STXBalance::pox_3_version { panic!( "Bad version byte in STX Balance serialization = {}", version @@ -266,12 +289,20 @@ impl ClarityDeserializable for STXBalance { STXBalance::Unlocked { amount: amount_unlocked, } - } else { + } else if version == &STXBalance::pox_2_version { STXBalance::LockedPoxTwo { amount_unlocked, amount_locked, unlock_height, } + } else if version == &STXBalance::pox_3_version { + STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + unlock_height, + } + } else { + unreachable!("Version is checked for pox_3 or pox_2 version compliance above"); } } else { panic!("Bad STX Balance serialization size = {}", bytes.len()); @@ -324,32 +355,45 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { Ok(()) } - pub fn get_available_balance(&self) -> u128 { + pub fn get_available_balance(&mut self) -> u128 { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - self.balance - .get_available_balance_at_burn_block(self.burn_block_height, v1_unlock_height) + let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + self.balance.get_available_balance_at_burn_block( + self.burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) } - pub fn canonical_balance_repr(&self) -> STXBalance { + pub fn canonical_balance_repr(&mut self) -> STXBalance { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); + let v2_unlock_height = self.db_ref.get_v2_unlock_height(); self.balance - .canonical_repr_at_block(self.burn_block_height, v1_unlock_height) + .canonical_repr_at_block(self.burn_block_height, v1_unlock_height, v2_unlock_height) .0 } - pub fn has_locked_tokens(&self) -> bool { + pub fn has_locked_tokens(&mut self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - self.balance - .has_locked_tokens_at_burn_block(self.burn_block_height, v1_unlock_height) + let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + self.balance.has_locked_tokens_at_burn_block( + self.burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) } - pub fn has_unlockable_tokens(&self) -> bool { + pub fn has_unlockable_tokens(&mut self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); - self.balance - .has_unlockable_tokens_at_burn_block(self.burn_block_height, v1_unlock_height) + let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + self.balance.has_unlockable_tokens_at_burn_block( + self.burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) } - pub fn can_transfer(&self, amount: u128) -> bool { + pub fn can_transfer(&mut self, amount: u128) -> bool { self.get_available_balance() >= amount } @@ -412,13 +456,14 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { }; } + ////////////// Pox-2 ///////////////// + /// Return true iff `self` represents a snapshot that has a lock /// created by PoX v2. - pub fn is_v2_locked(&self) -> bool { + pub fn is_v2_locked(&mut self) -> bool { match self.canonical_balance_repr() { - STXBalance::Unlocked { .. } => false, - STXBalance::LockedPoxOne { .. } => false, STXBalance::LockedPoxTwo { .. } => true, + _ => false, } } @@ -525,6 +570,122 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { }; } + //////////////// Pox-3 ////////////////// + + /// Lock `amount_to_lock` tokens on this account until `unlock_burn_height`. + /// After calling, this method will set the balance to a "LockedPoxThree" balance, + /// because this method is only invoked as a result of PoX3 interactions + pub fn lock_tokens_v3(&mut self, amount_to_lock: u128, unlock_burn_height: u64) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after account-token-lock"); + } + + // caller needs to have checked this + assert!(amount_to_lock > 0, "BUG: cannot lock 0 tokens"); + + if unlock_burn_height <= self.burn_block_height { + // caller needs to have checked this + panic!("FATAL: cannot set a lock with expired unlock burn height"); + } + + if self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account already has locked tokens"); + } + + // from `unlock_available_tokens_if_any` call above, `self.balance` should + // be canonicalized already + + let new_amount_unlocked = self + .balance + .get_total_balance() + .checked_sub(amount_to_lock) + .expect("FATAL: account locks more STX than balance possessed"); + + self.balance = STXBalance::LockedPoxThree { + amount_unlocked: new_amount_unlocked, + amount_locked: amount_to_lock, + unlock_height: unlock_burn_height, + }; + } + + /// Extend this account's current lock to `unlock_burn_height`. + /// After calling, this method will set the balance to a "LockedPoxThree" balance, + /// because this method is only invoked as a result of PoX3 interactions + pub fn extend_lock_v3(&mut self, unlock_burn_height: u64) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after extend-token-lock"); + } + + if !self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account does not have locked tokens"); + } + + if unlock_burn_height <= self.burn_block_height { + // caller needs to have checked this + panic!("FATAL: cannot set a lock with expired unlock burn height"); + } + + self.balance = STXBalance::LockedPoxThree { + amount_unlocked: self.balance.amount_unlocked(), + amount_locked: self.balance.amount_locked(), + unlock_height: unlock_burn_height, + }; + } + + /// Increase the account's current lock to `new_total_locked`. + /// Panics if `self` was not locked by V3 PoX. + pub fn increase_lock_v3(&mut self, new_total_locked: u128) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after extend-token-lock"); + } + + if !self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account does not have locked tokens"); + } + + if !self.is_v3_locked() { + // caller needs to have checked this + panic!("FATAL: account must be locked by pox-3"); + } + + assert!( + self.balance.amount_locked() <= new_total_locked, + "FATAL: account must lock more after `increase_lock_v3`" + ); + + let total_amount = self + .balance + .amount_unlocked() + .checked_add(self.balance.amount_locked()) + .expect("STX balance overflowed u128"); + let amount_unlocked = total_amount + .checked_sub(new_total_locked) + .expect("STX underflow: more is locked than total balance"); + + self.balance = STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked: new_total_locked, + unlock_height: self.balance.unlock_height(), + }; + } + + /// Return true iff `self` represents a snapshot that has a lock + /// created by PoX v3. + pub fn is_v3_locked(&mut self) -> bool { + match self.canonical_balance_repr() { + STXBalance::LockedPoxThree { .. } => true, + _ => false, + } + } + + /////////////// GENERAL ////////////////////// + /// If this snapshot is locked, then alter the lock height to be /// the next burn block (i.e., `self.burn_block_height + 1`) pub fn accelerate_unlock(&mut self) { @@ -548,15 +709,26 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { amount_locked, unlock_height: new_unlock_height, }, + STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + .. + } => STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + unlock_height: new_unlock_height, + }, }; } /// Unlock any tokens that are unlockable at the current /// burn block height, and return the amount newly unlocked fn unlock_available_tokens_if_any(&mut self) -> u128 { - let (new_balance, unlocked) = self - .balance - .canonical_repr_at_block(self.burn_block_height, self.db_ref.get_v1_unlock_height()); + let (new_balance, unlocked) = self.balance.canonical_repr_at_block( + self.burn_block_height, + self.db_ref.get_v1_unlock_height(), + self.db_ref.get_v2_unlock_height(), + ); self.balance = new_balance; unlocked } @@ -565,8 +737,9 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { // NOTE: do _not_ add mutation methods to this struct. Put them in STXBalanceSnapshot! impl STXBalance { pub const unlocked_and_v1_size: usize = 40; - pub const v2_size: usize = 41; + pub const v2_and_v3_size: usize = 41; pub const pox_2_version: u8 = 0; + pub const pox_3_version: u8 = 1; pub fn zero() -> STXBalance { STXBalance::Unlocked { amount: 0 } @@ -582,15 +755,16 @@ impl STXBalance { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } - | STXBalance::LockedPoxTwo { unlock_height, .. } => *unlock_height, + | STXBalance::LockedPoxTwo { unlock_height, .. } + | STXBalance::LockedPoxThree { unlock_height, .. } => *unlock_height, } } /// This method returns the datastructure's lazy view of the unlock_height - /// *while* factoring in the PoX 2 early unlock for PoX 1. + /// *while* factoring in the PoX 2 early unlock for PoX 1 and PoX 3 early unlock for PoX 2. /// This value is still lazy: this unlock height may be less than the current /// burn block height, if so it will be updated in a canonicalized view. - pub fn effective_unlock_height(&self, v1_unlock_height: u32) -> u64 { + pub fn effective_unlock_height(&self, v1_unlock_height: u32, v2_unlock_height: u32) -> u64 { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } => { @@ -600,7 +774,14 @@ impl STXBalance { *unlock_height } } - STXBalance::LockedPoxTwo { unlock_height, .. } => *unlock_height, + STXBalance::LockedPoxTwo { unlock_height, .. } => { + if *unlock_height >= (v2_unlock_height as u64) { + v2_unlock_height as u64 + } else { + *unlock_height + } + } + STXBalance::LockedPoxThree { unlock_height, .. } => *unlock_height, } } @@ -610,7 +791,8 @@ impl STXBalance { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { amount_locked, .. } - | STXBalance::LockedPoxTwo { amount_locked, .. } => *amount_locked, + | STXBalance::LockedPoxTwo { amount_locked, .. } + | STXBalance::LockedPoxThree { amount_locked, .. } => *amount_locked, } } @@ -626,6 +808,9 @@ impl STXBalance { } | STXBalance::LockedPoxTwo { amount_unlocked, .. + } + | STXBalance::LockedPoxThree { + amount_unlocked, .. } => *amount_unlocked, } } @@ -640,6 +825,9 @@ impl STXBalance { } | STXBalance::LockedPoxTwo { amount_unlocked, .. + } + | STXBalance::LockedPoxThree { + amount_unlocked, .. } => { *amount_unlocked = amount_unlocked.checked_sub(delta).expect("STX underflow"); } @@ -656,6 +844,9 @@ impl STXBalance { } | STXBalance::LockedPoxTwo { amount_unlocked, .. + } + | STXBalance::LockedPoxThree { + amount_unlocked, .. } => { if let Some(new_amount) = amount_unlocked.checked_add(delta) { *amount_unlocked = new_amount; @@ -667,28 +858,6 @@ impl STXBalance { } } - fn set_locked(&mut self) { - match self { - STXBalance::Unlocked { .. } => {} - STXBalance::LockedPoxOne { - unlock_height, - amount_locked, - .. - } => { - *unlock_height = 0; - *amount_locked = 0; - } - STXBalance::LockedPoxTwo { - unlock_height, - amount_locked, - .. - } => { - *unlock_height = 0; - *amount_locked = 0; - } - } - } - /// Returns a canonicalized STXBalance at a given burn_block_height /// (i.e., if burn_block_height >= unlock_height, then return struct where /// amount_unlocked = 0, unlock_height = 0), and the amount of tokens which @@ -697,8 +866,13 @@ impl STXBalance { &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> (STXBalance, u128) { - if self.has_unlockable_tokens_at_burn_block(burn_block_height, v1_unlock_height) { + if self.has_unlockable_tokens_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) { ( STXBalance::Unlocked { amount: self.get_total_balance(), @@ -714,8 +888,13 @@ impl STXBalance { &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> u128 { - if self.has_unlockable_tokens_at_burn_block(burn_block_height, v1_unlock_height) { + if self.has_unlockable_tokens_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) { self.get_total_balance() } else { match self { @@ -726,6 +905,9 @@ impl STXBalance { STXBalance::LockedPoxTwo { amount_unlocked, .. } => *amount_unlocked, + STXBalance::LockedPoxThree { + amount_unlocked, .. + } => *amount_unlocked, } } } @@ -734,8 +916,13 @@ impl STXBalance { &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> (u128, u64) { - if self.has_unlockable_tokens_at_burn_block(burn_block_height, v1_unlock_height) { + if self.has_unlockable_tokens_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) { (0, 0) } else { match self { @@ -750,6 +937,11 @@ impl STXBalance { unlock_height, .. } => (*amount_locked, *unlock_height), + STXBalance::LockedPoxThree { + amount_locked, + unlock_height, + .. + } => (*amount_locked, *unlock_height), } } } @@ -767,10 +959,23 @@ impl STXBalance { amount_locked, .. } => (*amount_unlocked, *amount_locked), + STXBalance::LockedPoxThree { + amount_unlocked, + amount_locked, + .. + } => (*amount_unlocked, *amount_locked), }; unlocked.checked_add(locked).expect("STX overflow") } + pub fn was_locked_by_v1(&self) -> bool { + if let STXBalance::LockedPoxOne { .. } = self { + true + } else { + false + } + } + pub fn was_locked_by_v2(&self) -> bool { if let STXBalance::LockedPoxTwo { .. } = self { true @@ -779,10 +984,19 @@ impl STXBalance { } } + pub fn was_locked_by_v3(&self) -> bool { + if let STXBalance::LockedPoxThree { .. } = self { + true + } else { + false + } + } + pub fn has_locked_tokens_at_burn_block( &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> bool { match self { STXBalance::Unlocked { .. } => false, @@ -808,7 +1022,32 @@ impl STXBalance { amount_locked, unlock_height, .. - } => *amount_locked > 0 && *unlock_height > burn_block_height, + } => { + if *amount_locked == 0 { + return false; + } + if *unlock_height <= burn_block_height { + return false; + } + // if unlockable due to Stacks 2.2 early unlock + if v2_unlock_height as u64 <= burn_block_height { + return false; + } + true + } + STXBalance::LockedPoxThree { + amount_locked, + unlock_height, + .. + } => { + if *amount_locked == 0 { + return false; + } + if *unlock_height <= burn_block_height { + return false; + } + true + } } } @@ -816,6 +1055,7 @@ impl STXBalance { &self, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> bool { match self { STXBalance::Unlocked { .. } => false, @@ -841,7 +1081,34 @@ impl STXBalance { amount_locked, unlock_height, .. - } => *amount_locked > 0 && *unlock_height <= burn_block_height, + } => { + if *amount_locked == 0 { + return false; + } + // if normally unlockable, return true + if *unlock_height <= burn_block_height { + return true; + } + // if unlockable due to Stacks 2.2 early unlock + if v2_unlock_height as u64 <= burn_block_height { + return true; + } + false + } + STXBalance::LockedPoxThree { + amount_locked, + unlock_height, + .. + } => { + if *amount_locked == 0 { + return false; + } + // if normally unlockable, return true + if *unlock_height <= burn_block_height { + return true; + } + false + } } } @@ -850,7 +1117,12 @@ impl STXBalance { amount: u128, burn_block_height: u64, v1_unlock_height: u32, + v2_unlock_height: u32, ) -> bool { - self.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height) >= amount + self.get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ) >= amount } } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 9c8a8bc37d..b6b5d51205 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -319,7 +319,7 @@ Note: This function is only available starting with Stacks 2.1.", "#, }; -const principal_destruct_API: SimpleFunctionAPI = SimpleFunctionAPI { +const PRINCPIPAL_DESTRUCT_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "principal-destruct? ${1:principal-address}", signature: "(principal-destruct? principal-address)", @@ -355,58 +355,6 @@ Note: This function is only available starting with Stacks 2.1.", "#, }; -const PRINCIPAL_CONSTRUCT_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: None, - snippet: "principal-construct? ${1:version} ${2:pub-key-hash}", - signature: "(principal-construct? (buff 1) (buff 20) [(string-ascii 40)])", - description: "A principal value represents either a set of keys, or a smart contract. -The former, called a _standard principal_, -is encoded as a `(buff 1)` *version byte*, indicating the type of account -and the type of network that this principal can spend tokens on, -and a `(buff 20)` *public key hash*, characterizing the principal's unique identity. -The latter, a _contract principal_, is encoded as a standard principal concatenated with -a `(string-ascii 40)` *contract name* that identifies the code body. - -The `principal-construct?` function allows users to create either standard or contract principals, -depending on which form is used. To create a standard principal, -`principal-construct?` would be called with two arguments: it -takes as input a `(buff 1)` which encodes the principal address's -`version-byte`, a `(buff 20)` which encodes the principal address's `hash-bytes`. -To create a contract principal, `principal-construct?` would be called with -three arguments: the `(buff 1)` and `(buff 20)` to represent the standard principal -that created the contract, and a `(string-ascii 40)` which encodes the contract's name. -On success, this function returns either a standard principal or contract principal, -depending on whether or not the third `(string-ascii 40)` argument is given. - -This function returns a `Response`. On success, the `ok` value is a `Principal`. -The `err` value is a value tuple with the form `{ error_code: uint, value: (optional principal) }`. - -If the single-byte `version-byte` is in the valid range `0x00` to `0x1f`, but is not an appropriate -version byte for the current network, then the error will be `u0`, and `value` will contain -`(some principal)`, where the wrapped value is the principal. If the `version-byte` is not in this range, -however, then the `value` will be `none`. - -If the `version-byte` is a `buff` of length 0, if the single-byte `version-byte` is a -value greater than `0x1f`, or the `hash-bytes` is a `buff` of length not equal to 20, then `error_code` -will be `u1` and `value` will be `None`. - -If a name is given, and the name is either an empty string or contains ASCII characters -that are not allowed in contract names, then `error_code` will be `u2`. - -Note: This function is only available starting with Stacks 2.1.", - example: r#" -(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (ok ST3X6QWWETNBZWGBK6DRGTR1KX50S74D3425Q1TPK) -(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo") ;; Returns (ok ST3X6QWWETNBZWGBK6DRGTR1KX50S74D3425Q1TPK.foo) -(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u0) (value (some SP3X6QWWETNBZWGBK6DRGTR1KX50S74D3433WDGJY)))) -(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo") ;; Returns (err (tuple (error_code u0) (value (some SP3X6QWWETNBZWGBK6DRGTR1KX50S74D3433WDGJY.foo)))) -(principal-construct? 0x 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u1) (value none))) -(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a3) ;; Returns (err (tuple (error_code u1) (value none))) -(principal-construct? 0x20 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u1) (value none))) -(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "") ;; Returns (err (tuple (error_code u2) (value none))) -(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo[") ;; Returns (err (tuple (error_code u2) (value none))) -"#, -}; - const STRING_TO_INT_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "string-to-int? ${1:string}", @@ -577,7 +525,7 @@ const XOR_API: SimpleFunctionAPI = SimpleFunctionAPI { }; const BITWISE_XOR_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Xor"), + name: None, snippet: "bit-xor ${1:expr-1} ${2:expr-2}", signature: "(bit-xor i1 i2...)", description: @@ -591,7 +539,7 @@ const BITWISE_XOR_API: SimpleFunctionAPI = SimpleFunctionAPI { }; const BITWISE_AND_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise And"), + name: None, snippet: "bit-and ${1:expr-1} ${2:expr-2}", signature: "(bit-and i1 i2...)", description: "Returns the result of bitwise and'ing a variable number of integer inputs.", @@ -604,7 +552,7 @@ const BITWISE_AND_API: SimpleFunctionAPI = SimpleFunctionAPI { }; const BITWISE_OR_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Or"), + name: None, snippet: "bit-or ${1:expr-1} ${2:expr-2}", signature: "(bit-or i1 i2...)", description: @@ -617,7 +565,7 @@ const BITWISE_OR_API: SimpleFunctionAPI = SimpleFunctionAPI { }; const BITWISE_NOT_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Not"), + name: None, snippet: "bit-not ${1:expr-1}", signature: "(bit-not i1)", description: "Returns the one's compliement (sometimes also called the bitwise compliment or not operator) of `i1`, effectively reversing the bits in `i1`. @@ -631,7 +579,7 @@ In other words, every bit that is `1` in ì1` will be `0` in the result. Conver }; const BITWISE_LEFT_SHIFT_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Left Shift"), + name: None, snippet: "bit-shift-left ${1:expr-1} ${2:expr-2}", signature: "(bit-shift-left i1 shamt)", description: "Shifts all the bits in `i1` to the left by the number of places specified in `shamt` modulo 128 (the bit width of Clarity integers). @@ -651,7 +599,7 @@ should use `*`, `/`, and `pow` instead of the shift operators. }; const BITWISE_RIGHT_SHIFT_API: SimpleFunctionAPI = SimpleFunctionAPI { - name: Some("Bitwise Right Shift"), + name: None, snippet: "bit-shift-right ${1:expr-1} ${2:expr-2}", signature: "(bit-shift-right i1 shamt)", description: "Shifts all the bits in `i1` to the right by the number of places specified in `shamt` modulo 128 (the bit width of Clarity integers). @@ -892,7 +840,7 @@ fn make_for_simple_native( } else { panic!( "Attempted to auto-generate docs for non-simple native function: {:?}", - api.name + name ) } }; @@ -1817,6 +1765,59 @@ The `addrs` list contains the same PoX address values passed into the PoX smart " }; +const PRINCIPAL_CONSTRUCT_API: SpecialAPI = SpecialAPI { + input_type: "(buff 1), (buff 20), [(string-ascii 40)]", + output_type: "(response principal { error_code: uint, principal: (option principal) })", + snippet: "principal-construct? ${1:version} ${2:pub-key-hash}", + signature: "(principal-construct? (buff 1) (buff 20) [(string-ascii 40)])", + description: "A principal value represents either a set of keys, or a smart contract. +The former, called a _standard principal_, +is encoded as a `(buff 1)` *version byte*, indicating the type of account +and the type of network that this principal can spend tokens on, +and a `(buff 20)` *public key hash*, characterizing the principal's unique identity. +The latter, a _contract principal_, is encoded as a standard principal concatenated with +a `(string-ascii 40)` *contract name* that identifies the code body. + +The `principal-construct?` function allows users to create either standard or contract principals, +depending on which form is used. To create a standard principal, +`principal-construct?` would be called with two arguments: it +takes as input a `(buff 1)` which encodes the principal address's +`version-byte`, a `(buff 20)` which encodes the principal address's `hash-bytes`. +To create a contract principal, `principal-construct?` would be called with +three arguments: the `(buff 1)` and `(buff 20)` to represent the standard principal +that created the contract, and a `(string-ascii 40)` which encodes the contract's name. +On success, this function returns either a standard principal or contract principal, +depending on whether or not the third `(string-ascii 40)` argument is given. + +This function returns a `Response`. On success, the `ok` value is a `Principal`. +The `err` value is a value tuple with the form `{ error_code: uint, value: (optional principal) }`. + +If the single-byte `version-byte` is in the valid range `0x00` to `0x1f`, but is not an appropriate +version byte for the current network, then the error will be `u0`, and `value` will contain +`(some principal)`, where the wrapped value is the principal. If the `version-byte` is not in this range, +however, then the `value` will be `none`. + +If the `version-byte` is a `buff` of length 0, if the single-byte `version-byte` is a +value greater than `0x1f`, or the `hash-bytes` is a `buff` of length not equal to 20, then `error_code` +will be `u1` and `value` will be `None`. + +If a name is given, and the name is either an empty string or contains ASCII characters +that are not allowed in contract names, then `error_code` will be `u2`. + +Note: This function is only available starting with Stacks 2.1.", + example: r#" +(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (ok ST3X6QWWETNBZWGBK6DRGTR1KX50S74D3425Q1TPK) +(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo") ;; Returns (ok ST3X6QWWETNBZWGBK6DRGTR1KX50S74D3425Q1TPK.foo) +(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u0) (value (some SP3X6QWWETNBZWGBK6DRGTR1KX50S74D3433WDGJY)))) +(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo") ;; Returns (err (tuple (error_code u0) (value (some SP3X6QWWETNBZWGBK6DRGTR1KX50S74D3433WDGJY.foo)))) +(principal-construct? 0x 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u1) (value none))) +(principal-construct? 0x16 0xfa6bf38ed557fe417333710d6033e9419391a3) ;; Returns (err (tuple (error_code u1) (value none))) +(principal-construct? 0x20 0xfa6bf38ed557fe417333710d6033e9419391a320) ;; Returns (err (tuple (error_code u1) (value none))) +(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "") ;; Returns (err (tuple (error_code u2) (value none))) +(principal-construct? 0x1a 0xfa6bf38ed557fe417333710d6033e9419391a320 "foo[") ;; Returns (err (tuple (error_code u2) (value none))) +"#, +}; + const DEFINE_TOKEN_API: DefineAPI = DefineAPI { input_type: "TokenName, ", snippet: "define-fungible-token ${1:token-name} ${2:total-supply}", @@ -2414,35 +2415,35 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { use crate::vm::functions::NativeFunctions::*; let name = function.get_name(); match function { - Add => make_for_simple_native(&ADD_API, &Add, name), - ToUInt => make_for_simple_native(&TO_UINT_API, &ToUInt, name), - ToInt => make_for_simple_native(&TO_INT_API, &ToInt, name), - Subtract => make_for_simple_native(&SUB_API, &Subtract, name), - Multiply => make_for_simple_native(&MUL_API, &Multiply, name), - Divide => make_for_simple_native(&DIV_API, &Divide, name), - BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, &BuffToIntLe, name), - BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, &BuffToUIntLe, name), - BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, &BuffToIntBe, name), - BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, &BuffToUIntBe, name), - IsStandard => make_for_simple_native(&IS_STANDARD_API, &IsStandard, name), - PrincipalDestruct => make_for_simple_native(&principal_destruct_API, &IsStandard, name), - PrincipalConstruct => make_for_simple_native(&PRINCIPAL_CONSTRUCT_API, &IsStandard, name), - StringToInt => make_for_simple_native(&STRING_TO_INT_API, &StringToInt, name), - StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, &StringToUInt, name), - IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, &IntToAscii, name), - IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, &IntToUtf8, name), - CmpGeq => make_for_simple_native(&GEQ_API, &CmpGeq, name), - CmpLeq => make_for_simple_native(&LEQ_API, &CmpLeq, name), - CmpLess => make_for_simple_native(&LESS_API, &CmpLess, name), - CmpGreater => make_for_simple_native(&GREATER_API, &CmpGreater, name), - Modulo => make_for_simple_native(&MOD_API, &Modulo, name), - Power => make_for_simple_native(&POW_API, &Power, name), - Sqrti => make_for_simple_native(&SQRTI_API, &Sqrti, name), - Log2 => make_for_simple_native(&LOG2_API, &Log2, name), - BitwiseXor => make_for_simple_native(&XOR_API, &BitwiseXor, name), - And => make_for_simple_native(&AND_API, &And, name), - Or => make_for_simple_native(&OR_API, &Or, name), - Not => make_for_simple_native(&NOT_API, &Not, name), + Add => make_for_simple_native(&ADD_API, &function, name), + ToUInt => make_for_simple_native(&TO_UINT_API, &function, name), + ToInt => make_for_simple_native(&TO_INT_API, &function, name), + Subtract => make_for_simple_native(&SUB_API, &function, name), + Multiply => make_for_simple_native(&MUL_API, &function, name), + Divide => make_for_simple_native(&DIV_API, &function, name), + BuffToIntLe => make_for_simple_native(&BUFF_TO_INT_LE_API, &function, name), + BuffToUIntLe => make_for_simple_native(&BUFF_TO_UINT_LE_API, &function, name), + BuffToIntBe => make_for_simple_native(&BUFF_TO_INT_BE_API, &function, name), + BuffToUIntBe => make_for_simple_native(&BUFF_TO_UINT_BE_API, &function, name), + IsStandard => make_for_simple_native(&IS_STANDARD_API, &function, name), + PrincipalDestruct => make_for_simple_native(&PRINCPIPAL_DESTRUCT_API, &function, name), + PrincipalConstruct => make_for_special(&PRINCIPAL_CONSTRUCT_API, &function), + StringToInt => make_for_simple_native(&STRING_TO_INT_API, &function, name), + StringToUInt => make_for_simple_native(&STRING_TO_UINT_API, &function, name), + IntToAscii => make_for_simple_native(&INT_TO_ASCII_API, &function, name), + IntToUtf8 => make_for_simple_native(&INT_TO_UTF8_API, &function, name), + CmpGeq => make_for_simple_native(&GEQ_API, &function, name), + CmpLeq => make_for_simple_native(&LEQ_API, &function, name), + CmpLess => make_for_simple_native(&LESS_API, &function, name), + CmpGreater => make_for_simple_native(&GREATER_API, &function, name), + Modulo => make_for_simple_native(&MOD_API, &function, name), + Power => make_for_simple_native(&POW_API, &function, name), + Sqrti => make_for_simple_native(&SQRTI_API, &function, name), + Log2 => make_for_simple_native(&LOG2_API, &function, name), + BitwiseXor => make_for_simple_native(&XOR_API, &function, name), + And => make_for_simple_native(&AND_API, &function, name), + Or => make_for_simple_native(&OR_API, &function, name), + Not => make_for_simple_native(&NOT_API, &function, name), Equals => make_for_special(&EQUALS_API, function), If => make_for_special(&IF_API, function), Let => make_for_special(&LET_API, function), @@ -2506,20 +2507,20 @@ pub fn make_api_reference(function: &NativeFunctions) -> FunctionAPI { BurnAsset => make_for_special(&BURN_ASSET, function), GetTokenSupply => make_for_special(&GET_TOKEN_SUPPLY, function), AtBlock => make_for_special(&AT_BLOCK, function), - GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, &GetStxBalance, name), - StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, &StxGetAccount, name), + GetStxBalance => make_for_simple_native(&STX_GET_BALANCE, &function, name), + StxGetAccount => make_for_simple_native(&STX_GET_ACCOUNT, &function, name), StxTransfer => make_for_special(&STX_TRANSFER, function), StxTransferMemo => make_for_special(&STX_TRANSFER_MEMO, function), - StxBurn => make_for_simple_native(&STX_BURN, &StxBurn, name), + StxBurn => make_for_simple_native(&STX_BURN, &function, name), ToConsensusBuff => make_for_special(&TO_CONSENSUS_BUFF, function), FromConsensusBuff => make_for_special(&FROM_CONSENSUS_BUFF, function), ReplaceAt => make_for_special(&REPLACE_AT, function), - BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, &BitwiseXor2, name), - BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, &BitwiseAnd, name), - BitwiseOr => make_for_simple_native(&BITWISE_OR_API, &BitwiseOr, name), - BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, &BitwiseNot, name), - BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, &BitwiseLShift, name), - BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, &BitwiseRShift, name), + BitwiseXor2 => make_for_simple_native(&BITWISE_XOR_API, &function, name), + BitwiseAnd => make_for_simple_native(&BITWISE_AND_API, &function, name), + BitwiseOr => make_for_simple_native(&BITWISE_OR_API, &function, name), + BitwiseNot => make_for_simple_native(&BITWISE_NOT_API, &function, name), + BitwiseLShift => make_for_simple_native(&BITWISE_LEFT_SHIFT_API, &function, name), + BitwiseRShift => make_for_simple_native(&BITWISE_RIGHT_SHIFT_API, &function, name), } } @@ -2763,7 +2764,15 @@ mod test { } fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_v2_unlock_height(&self) -> u32 { + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { diff --git a/clarity/src/vm/events.rs b/clarity/src/vm/events.rs index 76892e5d0e..0a2f04b517 100644 --- a/clarity/src/vm/events.rs +++ b/clarity/src/vm/events.rs @@ -224,7 +224,7 @@ impl NFTTransferEventData { pub fn json_serialize(&self) -> serde_json::Value { let raw_value = { let mut bytes = vec![]; - self.value.consensus_serialize(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes).unwrap(); let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; @@ -249,7 +249,7 @@ impl NFTMintEventData { pub fn json_serialize(&self) -> serde_json::Value { let raw_value = { let mut bytes = vec![]; - self.value.consensus_serialize(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes).unwrap(); let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; @@ -273,7 +273,7 @@ impl NFTBurnEventData { pub fn json_serialize(&self) -> serde_json::Value { let raw_value = { let mut bytes = vec![]; - self.value.consensus_serialize(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes).unwrap(); let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; @@ -349,7 +349,7 @@ impl SmartContractEventData { pub fn json_serialize(&self) -> serde_json::Value { let raw_value = { let mut bytes = vec![]; - self.value.consensus_serialize(&mut bytes).unwrap(); + self.value.serialize_write(&mut bytes).unwrap(); let formatted_bytes: Vec = bytes.iter().map(|b| format!("{:02x}", b)).collect(); formatted_bytes }; diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index 06d0ffc4a6..0b76317a83 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -104,7 +104,7 @@ pub fn special_stx_balance( if let Value::Principal(ref principal) = owner { let balance = { - let snapshot = env + let mut snapshot = env .global_context .database .get_stx_balance_snapshot(principal); @@ -147,7 +147,7 @@ pub fn stx_transfer_consolidated( env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; env.add_memory(STXBalance::unlocked_and_v1_size as u64)?; - let sender_snapshot = env.global_context.database.get_stx_balance_snapshot(from); + let mut sender_snapshot = env.global_context.database.get_stx_balance_snapshot(from); if !sender_snapshot.can_transfer(amount) { return clarity_ecode!(StxErrorCodes::NOT_ENOUGH_BALANCE); } @@ -234,6 +234,7 @@ pub fn special_stx_account( .get_stx_balance_snapshot(&principal) .canonical_balance_repr(); let v1_unlock_ht = env.global_context.database.get_v1_unlock_height(); + let v2_unlock_ht = env.global_context.database.get_v2_unlock_height(); TupleData::from_data(vec![ ( @@ -246,7 +247,7 @@ pub fn special_stx_account( ), ( "unlock-height".try_into().unwrap(), - Value::UInt(stx_balance.effective_unlock_height(v1_unlock_ht) as u128), + Value::UInt(stx_balance.effective_unlock_height(v1_unlock_ht, v2_unlock_ht) as u128), ), ]) .map(|t| Value::Tuple(t)) @@ -404,12 +405,14 @@ pub fn special_mint_asset_v200( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(expected_asset_type.size() as u64)?; + let epoch = env.epoch().clone(); env.global_context.database.set_nft_owner( &env.contract_context.contract_identifier, asset_name, &asset, to_principal, expected_asset_type, + &epoch, )?; let asset_identifier = AssetIdentifier { @@ -467,12 +470,14 @@ pub fn special_mint_asset_v205( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(asset_size)?; + let epoch = env.epoch().clone(); env.global_context.database.set_nft_owner( &env.contract_context.contract_identifier, asset_name, &asset, to_principal, expected_asset_type, + &epoch, )?; let asset_identifier = AssetIdentifier { @@ -542,12 +547,14 @@ pub fn special_transfer_asset_v200( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(expected_asset_type.size() as u64)?; + let epoch = env.epoch().clone(); env.global_context.database.set_nft_owner( &env.contract_context.contract_identifier, asset_name, &asset, to_principal, expected_asset_type, + &epoch, )?; env.global_context.log_asset_transfer( @@ -628,12 +635,14 @@ pub fn special_transfer_asset_v205( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(asset_size)?; + let epoch = env.epoch().clone(); env.global_context.database.set_nft_owner( &env.contract_context.contract_identifier, asset_name, &asset, to_principal, expected_asset_type, + &epoch, )?; env.global_context.log_asset_transfer( @@ -1015,11 +1024,13 @@ pub fn special_burn_asset_v200( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(expected_asset_type.size() as u64)?; + let epoch = env.epoch().clone(); env.global_context.database.burn_nft( &env.contract_context.contract_identifier, asset_name, &asset, expected_asset_type, + &epoch, )?; env.global_context.log_asset_transfer( @@ -1092,11 +1103,13 @@ pub fn special_burn_asset_v205( env.add_memory(TypeSignature::PrincipalType.size() as u64)?; env.add_memory(asset_size)?; + let epoch = env.epoch().clone(); env.global_context.database.burn_nft( &env.contract_context.contract_identifier, asset_name, &asset, expected_asset_type, + &epoch, )?; env.global_context.log_asset_transfer( diff --git a/clarity/src/vm/functions/conversions.rs b/clarity/src/vm/functions/conversions.rs index a6cec48dd8..40a373e789 100644 --- a/clarity/src/vm/functions/conversions.rs +++ b/clarity/src/vm/functions/conversions.rs @@ -17,6 +17,7 @@ use std::convert::TryFrom; use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::StacksEpochId; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::runtime_cost; @@ -217,7 +218,12 @@ pub fn native_int_to_utf8(value: Value) -> Result { /// If the value cannot fit as serialized into the maximum buffer size, /// this returns `none`, otherwise, it will be `(some consensus-serialized-buffer)` pub fn to_consensus_buff(value: Value) -> Result { - let clar_buff_serialized = match Value::buff_from(value.serialize_to_vec()) { + let mut clar_buff_serialized = vec![]; + value + .serialize_write(&mut clar_buff_serialized) + .expect("FATAL: failed to serialize to vec"); + + let clar_buff_serialized = match Value::buff_from(clar_buff_serialized) { Ok(x) => x, Err(_) => return Ok(Value::none()), }; @@ -261,7 +267,11 @@ pub fn from_consensus_buff( // Perform the deserialization and check that it deserialized to the expected // type. A type mismatch at this point is an error that should be surfaced in // Clarity (as a none return). - let result = match Value::try_deserialize_bytes_exact(&input_bytes, &type_arg) { + let result = match Value::try_deserialize_bytes_exact( + &input_bytes, + &type_arg, + env.epoch().value_sanitizing(), + ) { Ok(value) => value, Err(_) => return Ok(Value::none()), }; diff --git a/clarity/src/vm/functions/database.rs b/clarity/src/vm/functions/database.rs index 98d6e28775..dda4e1debd 100644 --- a/clarity/src/vm/functions/database.rs +++ b/clarity/src/vm/functions/database.rs @@ -203,6 +203,11 @@ pub fn special_contract_call( nested_env.execute_contract(&contract_identifier, function_name, &rest_args, false) }?; + // sanitize contract-call outputs in epochs >= 2.4 + let result_type = TypeSignature::type_of(&result); + let (result, _) = Value::sanitize_value(env.epoch(), &result_type, result) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; + // Ensure that the expected type from the trait spec admits // the type of the value returned by the dynamic dispatch. if let Some(returns_type_signature) = type_returns_constraint { @@ -240,9 +245,10 @@ pub fn special_fetch_variable_v200( data_types.value_type.size(), )?; + let epoch = env.epoch().clone(); env.global_context .database - .lookup_variable(contract, var_name, data_types) + .lookup_variable(contract, var_name, data_types, &epoch) } /// The Stacks v205 version of fetch_variable uses the actual stored size of the @@ -264,10 +270,11 @@ pub fn special_fetch_variable_v205( .get(var_name) .ok_or(CheckErrors::NoSuchDataVariable(var_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .lookup_variable_with_size(contract, var_name, data_types); + .lookup_variable_with_size(contract, var_name, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -310,9 +317,10 @@ pub fn special_set_variable_v200( env.add_memory(value.get_memory_use())?; + let epoch = env.epoch().clone(); env.global_context .database - .set_variable(contract, var_name, value, data_types) + .set_variable(contract, var_name, value, data_types, &epoch) .map(|data| data.value) } @@ -341,10 +349,11 @@ pub fn special_set_variable_v205( .get(var_name) .ok_or(CheckErrors::NoSuchDataVariable(var_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .set_variable(contract, var_name, value, data_types); + .set_variable(contract, var_name, value, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -383,9 +392,10 @@ pub fn special_fetch_entry_v200( data_types.value_type.size() + data_types.key_type.size(), )?; + let epoch = env.epoch().clone(); env.global_context .database - .fetch_entry(contract, map_name, &key, data_types) + .fetch_entry(contract, map_name, &key, data_types, &epoch) } /// The Stacks v205 version of fetch_entry uses the actual stored size of the @@ -409,10 +419,11 @@ pub fn special_fetch_entry_v205( .get(map_name) .ok_or(CheckErrors::NoSuchMap(map_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .fetch_entry_with_size(contract, map_name, &key, data_types); + .fetch_entry_with_size(contract, map_name, &key, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -485,9 +496,10 @@ pub fn special_set_entry_v200( env.add_memory(key.get_memory_use())?; env.add_memory(value.get_memory_use())?; + let epoch = env.epoch().clone(); env.global_context .database - .set_entry(contract, map_name, key, value, data_types) + .set_entry(contract, map_name, key, value, data_types, &epoch) .map(|data| data.value) } @@ -518,10 +530,11 @@ pub fn special_set_entry_v205( .get(map_name) .ok_or(CheckErrors::NoSuchMap(map_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .set_entry(contract, map_name, key, value, data_types); + .set_entry(contract, map_name, key, value, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -569,9 +582,11 @@ pub fn special_insert_entry_v200( env.add_memory(key.get_memory_use())?; env.add_memory(value.get_memory_use())?; + let epoch = env.epoch().clone(); + env.global_context .database - .insert_entry(contract, map_name, key, value, data_types) + .insert_entry(contract, map_name, key, value, data_types, &epoch) .map(|data| data.value) } @@ -602,10 +617,11 @@ pub fn special_insert_entry_v205( .get(map_name) .ok_or(CheckErrors::NoSuchMap(map_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .insert_entry(contract, map_name, key, value, data_types); + .insert_entry(contract, map_name, key, value, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -650,9 +666,10 @@ pub fn special_delete_entry_v200( env.add_memory(key.get_memory_use())?; + let epoch = env.epoch().clone(); env.global_context .database - .delete_entry(contract, map_name, &key, data_types) + .delete_entry(contract, map_name, &key, data_types, &epoch) .map(|data| data.value) } @@ -681,10 +698,11 @@ pub fn special_delete_entry_v205( .get(map_name) .ok_or(CheckErrors::NoSuchMap(map_name.to_string()))?; + let epoch = env.epoch().clone(); let result = env .global_context .database - .delete_entry(contract, map_name, &key, data_types); + .delete_entry(contract, map_name, &key, data_types, &epoch); let result_size = match &result { Ok(data) => data.serialized_byte_len, @@ -875,11 +893,12 @@ pub fn special_get_burn_block_info( TupleData::from_data(vec![ ( "addrs".into(), - Value::list_from( + Value::cons_list( addrs .into_iter() .map(|addr_tuple| Value::Tuple(addr_tuple)) .collect(), + env.epoch(), ) .expect("FATAL: could not convert address list to Value"), ), diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index b44a9454e9..b943c40ab9 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -55,6 +55,12 @@ macro_rules! switch_on_global_epoch { StacksEpochId::Epoch2_05 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 2.1. StacksEpochId::Epoch21 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 2.2. + StacksEpochId::Epoch22 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 2.3. + StacksEpochId::Epoch23 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 2.4. + StacksEpochId::Epoch24 => $Epoch205Version(args, env, context), } } }; diff --git a/clarity/src/vm/functions/sequences.rs b/clarity/src/vm/functions/sequences.rs index 780750e7db..be6209895e 100644 --- a/clarity/src/vm/functions/sequences.rs +++ b/clarity/src/vm/functions/sequences.rs @@ -47,7 +47,7 @@ pub fn list_cons( runtime_cost(ClarityCostFunction::ListCons, env, arg_size)?; - Value::list_from(args) + Value::cons_list(args, env.epoch()) } pub fn special_filter( @@ -169,7 +169,7 @@ pub fn special_map( mapped_results.push(res); } - Value::list_from(mapped_results) + Value::cons_list(mapped_results, env.epoch()) } pub fn special_append( @@ -196,11 +196,14 @@ pub fn special_append( )?; if entry_type.is_no_type() { assert_eq!(size, 0); - return Value::list_from(vec![element]); + return Value::cons_list(vec![element], env.epoch()); } if let Ok(next_entry_type) = TypeSignature::least_supertype(env.epoch(), &entry_type, &element_type) { + let (element, _) = Value::sanitize_value(env.epoch(), &next_entry_type, element) + .ok_or_else(|| CheckErrors::ListTypesMustMatch)?; + let next_type_signature = ListTypeData::new_list(next_entry_type, size + 1)?; data.push(element); Ok(Value::Sequence(SequenceData::List(ListData { @@ -225,7 +228,7 @@ pub fn special_concat_v200( check_argument_count(2, args)?; let mut wrapped_seq = eval(&args[0], env, context)?; - let mut other_wrapped_seq = eval(&args[1], env, context)?; + let other_wrapped_seq = eval(&args[1], env, context)?; runtime_cost( ClarityCostFunction::Concat, @@ -233,9 +236,9 @@ pub fn special_concat_v200( u64::from(wrapped_seq.size()).cost_overflow_add(u64::from(other_wrapped_seq.size()))?, )?; - match (&mut wrapped_seq, &mut other_wrapped_seq) { - (Value::Sequence(ref mut seq), Value::Sequence(ref mut other_seq)) => { - seq.append(env.epoch(), other_seq) + match (&mut wrapped_seq, other_wrapped_seq) { + (Value::Sequence(ref mut seq), Value::Sequence(other_seq)) => { + seq.concat(env.epoch(), other_seq) } _ => Err(RuntimeErrorType::BadTypeConstruction.into()), }?; @@ -251,17 +254,17 @@ pub fn special_concat_v205( check_argument_count(2, args)?; let mut wrapped_seq = eval(&args[0], env, context)?; - let mut other_wrapped_seq = eval(&args[1], env, context)?; + let other_wrapped_seq = eval(&args[1], env, context)?; - match (&mut wrapped_seq, &mut other_wrapped_seq) { - (Value::Sequence(ref mut seq), Value::Sequence(ref mut other_seq)) => { + match (&mut wrapped_seq, other_wrapped_seq) { + (Value::Sequence(ref mut seq), Value::Sequence(other_seq)) => { runtime_cost( ClarityCostFunction::Concat, env, (seq.len() as u64).cost_overflow_add(other_seq.len() as u64)?, )?; - seq.append(env.epoch(), other_seq) + seq.concat(env.epoch(), other_seq) } _ => { runtime_cost(ClarityCostFunction::Concat, env, 1)?; @@ -383,7 +386,8 @@ pub fn special_slice( env, (right_position - left_position) * seq.element_size(), )?; - let seq_value = seq.slice(left_position as usize, right_position as usize)?; + let seq_value = + seq.slice(env.epoch(), left_position as usize, right_position as usize)?; Value::some(seq_value) } _ => return Err(RuntimeErrorType::BadTypeConstruction.into()), diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index e2440b475d..2941af2b1f 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -179,12 +179,15 @@ fn lookup_variable(name: &str, context: &LocalContext, env: &mut Environment) -> env, context.depth(), )?; - if let Some(value) = context - .lookup_variable(name) - .or_else(|| env.contract_context.lookup_variable(name)) - { + if let Some(value) = context.lookup_variable(name) { runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size())?; Ok(value.clone()) + } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size())?; + let (value, _) = + Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value), value) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; + Ok(value) } else if let Some(callable_data) = context.lookup_callable_contract(name) { if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { Ok(callable_data.contract_identifier.clone().into()) @@ -406,7 +409,7 @@ pub fn eval_all( global_context.add_memory(value.size() as u64)?; let data_type = global_context.database.create_variable(&contract_context.contract_identifier, &name, value_type); - global_context.database.set_variable(&contract_context.contract_identifier, &name, value, &data_type)?; + global_context.database.set_variable(&contract_context.contract_identifier, &name, value, &data_type, &global_context.epoch_id)?; contract_context.meta_data_var.insert(name, data_type); }, diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 35670c44a0..6a4e49dc5d 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -40,6 +40,24 @@ pub const TEST_BURN_STATE_DB_21: UnitTestBurnStateDB = UnitTestBurnStateDB { ast_rules: ASTRules::PrecheckSize, }; +pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnStateDB { + match epoch_id { + StacksEpochId::Epoch20 => UnitTestBurnStateDB { + epoch_id, + ast_rules: ASTRules::Typical, + }, + StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => UnitTestBurnStateDB { + epoch_id, + ast_rules: ASTRules::PrecheckSize, + }, + _ => panic!("Epoch {} not covered", &epoch_id), + } +} + pub fn execute(s: &str) -> Value { vm_execute(s).unwrap().unwrap() } @@ -208,7 +226,15 @@ impl BurnStateDB for UnitTestBurnStateDB { } fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_v2_unlock_height(&self) -> u32 { + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { diff --git a/clarity/src/vm/tests/assets.rs b/clarity/src/vm/tests/assets.rs index 39056b69ff..848616c4b2 100644 --- a/clarity/src/vm/tests/assets.rs +++ b/clarity/src/vm/tests/assets.rs @@ -18,18 +18,16 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::hex_bytes; use crate::vm::ast::ASTRules; -use crate::vm::contexts::{AssetMap, AssetMapEntry, GlobalContext, OwnedEnvironment}; -use crate::vm::contracts::Contract; +use crate::vm::contexts::{AssetMap, AssetMapEntry, OwnedEnvironment}; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::events::StacksTransactionEvent; -use crate::vm::execute as vm_execute; use crate::vm::representations::SymbolicExpression; +use crate::vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; use crate::vm::tests::{ - execute, is_committed, is_err_code, symbols_from_values, with_memory_environment, -}; -use crate::vm::types::{ - AssetIdentifier, PrincipalData, QualifiedContractIdentifier, ResponseData, Value, + test_clarity_versions, test_epochs, tl_env_factory as env_factory, + TopLevelMemoryEnvironmentGenerator, }; +use crate::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier, Value}; use crate::vm::version::ClarityVersion; use crate::vm::ContractContext; @@ -138,7 +136,9 @@ fn execute_transaction( env.execute_transaction(issuer, None, contract_identifier.clone(), tx, args) } -fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_native_stx_ops(epoch: StacksEpochId, mut env_factory: TopLevelMemoryEnvironmentGenerator) { + let mut owned_env = env_factory.get_env(epoch); let contract = r#"(define-public (burn-stx (amount uint) (p principal)) (stx-burn? amount p)) (define-public (xfer-stx (amount uint) (p principal) (t principal)) (stx-transfer? amount p t)) (define-read-only (balance-stx (p principal)) (stx-get-balance p)) @@ -205,7 +205,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 1: send 0 let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id, "xfer-stx", @@ -217,7 +217,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id, "burn-stx", @@ -231,7 +231,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 2: from = to let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "xfer-stx", @@ -245,7 +245,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 3: sender is not tx-sender let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "xfer-stx", @@ -257,7 +257,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "burn-stx", @@ -271,7 +271,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 4: amount > balance let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "xfer-stx", @@ -283,7 +283,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "burn-stx", @@ -299,7 +299,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // will overflow before such an overflowing transfer is allowed. // assert_eq!( // execute_transaction( - // owned_env, + // &mut owned_env, // p2.clone(), // &token_contract_id, // "xfer-stx", @@ -312,7 +312,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // test 6: check balance let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "balance-stx", @@ -330,7 +330,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { let nonexistent_principal = Value::Principal(PrincipalData::Standard(sp_data)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "balance-stx", @@ -343,7 +343,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // now, let's actually do a couple transfers/burns and check the asset maps. let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "burn-stx", @@ -359,7 +359,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "xfer-stx", @@ -375,7 +375,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p3_principal.clone(), &token_contract_id, "xfer-stx", @@ -393,7 +393,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // let's try a user -> contract transfer let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "to-contract", @@ -417,7 +417,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { let contract_principal = Value::Principal(cp_data); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id, "balance-stx", @@ -430,7 +430,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // now let's do a contract -> user transfer let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p3_principal.clone(), &token_contract_id, "from-contract", @@ -457,7 +457,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // now, to transfer let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &second_contract_id, "send-to-other", @@ -478,7 +478,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // now, let's send some back let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p3_principal.clone(), &token_contract_id, "from-contract", @@ -497,7 +497,7 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { // and, one more time for good measure let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &second_contract_id, "send-to-other", @@ -516,7 +516,12 @@ fn test_native_stx_ops(owned_env: &mut OwnedEnvironment) { ); } -fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_simple_token_system( + epoch: StacksEpochId, + mut env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let tokens_contract = FIRST_CLASS_TOKENS; let p1 = execute("'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"); @@ -557,7 +562,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { .unwrap(); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -569,7 +574,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -585,7 +590,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -597,7 +602,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -609,7 +614,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let err = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-token-transfer", @@ -623,7 +628,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { }); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-ft-get-balance", @@ -635,7 +640,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-ft-get-balance", @@ -647,7 +652,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "faucet", @@ -664,7 +669,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "faucet", @@ -680,7 +685,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "faucet", @@ -696,7 +701,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-ft-get-balance", @@ -708,7 +713,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Get the total supply - Total minted so far = 10204 let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "get-total-supply", @@ -719,7 +724,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Burn 100 tokens from p2's balance (out of 9200) let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id.clone(), "burn", @@ -736,7 +741,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Get p2's balance we should get 9200 - 100 = 9100 let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "my-ft-get-balance", @@ -748,7 +753,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Get the new total supply let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "get-total-supply", @@ -759,7 +764,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Burn 9101 tokens from p2's balance (out of 9100) - Should fail with error code 1 let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id.clone(), "burn", @@ -772,7 +777,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Try to burn 0 tokens from p2's balance - Should fail with error code 1 let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &token_contract_id.clone(), "burn", @@ -786,7 +791,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { // Try to burn 1 tokens from p2's balance (out of 9100) - Should pass even though // sender != tx sender let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "burn", @@ -802,7 +807,7 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { ); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "mint-after", @@ -814,7 +819,9 @@ fn test_simple_token_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); } -fn test_total_supply(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_total_supply(epoch: StacksEpochId, mut env_factory: TopLevelMemoryEnvironmentGenerator) { + let mut owned_env = env_factory.get_env(epoch); let bad_0 = "(define-fungible-token stackaroos (- 5))"; let bad_1 = "(define-fungible-token stackaroos true)"; @@ -879,7 +886,7 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { .unwrap(); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "gated-faucet", @@ -889,7 +896,7 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "gated-faucet", @@ -899,7 +906,7 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { assert!(!is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "gated-faucet", @@ -909,7 +916,7 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let err = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &token_contract_id.clone(), "gated-faucet", @@ -923,7 +930,12 @@ fn test_total_supply(owned_env: &mut OwnedEnvironment) { }); } -fn test_overlapping_nfts(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_overlapping_nfts( + epoch: StacksEpochId, + mut env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let tokens_contract = FIRST_CLASS_TOKENS; let names_contract = ASSET_NAMES; @@ -967,7 +979,13 @@ fn test_overlapping_nfts(owned_env: &mut OwnedEnvironment) { .unwrap(); } -fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { +#[apply(test_clarity_versions)] +fn test_simple_naming_system( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: TopLevelMemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let tokens_contract = FIRST_CLASS_TOKENS; let names_contract = ASSET_NAMES; @@ -990,10 +1008,8 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { _ => panic!(), }; - let mut placeholder_context = ContractContext::new( - QualifiedContractIdentifier::transient(), - ClarityVersion::Clarity2, - ); + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); let tokens_contract_id = QualifiedContractIdentifier::new(p1_std_principal_data.clone(), "tokens".into()); @@ -1035,7 +1051,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { .unwrap(); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "preorder", @@ -1046,7 +1062,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_err_code(&result, 1)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "preorder", @@ -1057,7 +1073,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "preorder", @@ -1070,7 +1086,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // shouldn't be able to register a name you didn't preorder! let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "register", @@ -1083,7 +1099,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // should work! let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "register", @@ -1105,7 +1121,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // let's try some token-transfers let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "try-bad-transfers", @@ -1116,7 +1132,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "try-bad-transfers-but-ok", @@ -1135,7 +1151,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // let's mint some names let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-mint", @@ -1147,7 +1163,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-mint", @@ -1161,7 +1177,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // let's transfer name let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "transfer", @@ -1173,7 +1189,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "transfer", @@ -1185,7 +1201,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "transfer", @@ -1197,7 +1213,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert_eq!(asset_map.to_table().len(), 0); let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "transfer", @@ -1220,7 +1236,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // try to underpay! let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "preorder", @@ -1231,7 +1247,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "register", @@ -1244,7 +1260,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // register a cheap name! let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "preorder", @@ -1255,7 +1271,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "register", @@ -1266,7 +1282,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { assert!(is_committed(&result)); let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "register", @@ -1279,7 +1295,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p1 burning 5 should fail (not owner anymore). let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-burn", @@ -1292,7 +1308,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p1 minting 8 should succeed let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-mint", @@ -1305,7 +1321,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p2 burning 8 (which belongs to p1) should succeed even though sender != tx_sender. let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "force-burn", @@ -1323,7 +1339,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p2 burning 5 should succeed. let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "force-burn", @@ -1341,7 +1357,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p2 re-burning 5 should succeed. let (result, _asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p2_principal.clone(), &names_contract_id, "force-burn", @@ -1353,7 +1369,7 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { // p1 re-minting 5 should succeed let (result, asset_map, _events) = execute_transaction( - owned_env, + &mut owned_env, p1_principal.clone(), &names_contract_id, "force-mint", @@ -1373,17 +1389,3 @@ fn test_simple_naming_system(owned_env: &mut OwnedEnvironment) { ); } } - -#[test] -fn test_all() { - let to_test = [ - test_overlapping_nfts, - test_simple_token_system, - test_simple_naming_system, - test_total_supply, - test_native_stx_ops, - ]; - for test in to_test.iter() { - with_memory_environment(test, StacksEpochId::latest(), true); - } -} diff --git a/clarity/src/vm/tests/contracts.rs b/clarity/src/vm/tests/contracts.rs index 455683d1ae..f230b39a25 100644 --- a/clarity/src/vm/tests/contracts.rs +++ b/clarity/src/vm/tests/contracts.rs @@ -27,18 +27,15 @@ use crate::types::chainstate::StacksBlockId; use crate::vm::ast; use crate::vm::ast::errors::ParseErrors; use crate::vm::ast::ASTRules; -use crate::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; -use crate::vm::contracts::Contract; -use crate::vm::costs::ExecutionCost; -use crate::vm::database::ClarityDatabase; +use crate::vm::contexts::Environment; use crate::vm::database::MemoryBackingStore; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::execute as vm_execute; -use crate::vm::representations::SymbolicExpression; use crate::vm::tests::{ - execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, - with_memory_environment, BurnStateDB, TEST_BURN_STATE_DB, TEST_HEADER_DB, + env_factory, execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, + tl_env_factory, MemoryEnvironmentGenerator, TopLevelMemoryEnvironmentGenerator, }; +use crate::vm::tests::{test_clarity_versions, test_epochs}; use crate::vm::types::serialization::TypePrefix::Buffer; use crate::vm::types::BuffData; use crate::vm::types::{ @@ -107,8 +104,11 @@ fn get_principal_as_principal_data() -> PrincipalData { StandardPrincipalData::transient().into() } -#[test] -fn test_get_block_info_eval() { +#[apply(test_epochs)] +fn test_get_block_info_eval( + epoch: StacksEpochId, + mut tl_env_factory: TopLevelMemoryEnvironmentGenerator, +) { let contracts = [ "(define-private (test-func) (get-block-info? time u1))", "(define-private (test-func) (get-block-info? time block-height))", @@ -136,10 +136,10 @@ fn test_get_block_info_eval() { ClarityVersion::Clarity2, ); + let mut owned_env = tl_env_factory.get_env(epoch); for i in 0..contracts.len() { - let mut marf = MemoryBackingStore::new(); - let mut owned_env = OwnedEnvironment::new(marf.as_clarity_db(), StacksEpochId::latest()); - let contract_identifier = QualifiedContractIdentifier::local("test-contract").unwrap(); + let contract_identifier = + QualifiedContractIdentifier::local(&format!("test-contract-{}", i)).unwrap(); owned_env .initialize_contract( contract_identifier.clone(), @@ -172,11 +172,9 @@ fn test_get_block_info_eval() { } } -fn test_block_headers(n: u8) -> StacksBlockId { - StacksBlockId([n as u8; 32]) -} - -fn test_contract_caller(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_contract_caller(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { + let mut owned_env = env_factory.get_env(epoch); let contract_a = "(define-read-only (get-caller) (list contract-caller tx-sender))"; let contract_b = "(define-read-only (get-caller) @@ -227,7 +225,7 @@ fn test_contract_caller(owned_env: &mut OwnedEnvironment) { false ) .unwrap(), - Value::list_from(vec![p1.clone(), p1.clone()]).unwrap() + Value::cons_list_unsanitized(vec![p1.clone(), p1.clone()]).unwrap() ); assert_eq!( env.execute_contract( @@ -237,7 +235,7 @@ fn test_contract_caller(owned_env: &mut OwnedEnvironment) { false ) .unwrap(), - Value::list_from(vec![c_b.clone(), c_b.clone()]).unwrap() + Value::cons_list_unsanitized(vec![c_b.clone(), c_b.clone()]).unwrap() ); assert_eq!( env.execute_contract( @@ -247,7 +245,7 @@ fn test_contract_caller(owned_env: &mut OwnedEnvironment) { false ) .unwrap(), - Value::list_from(vec![c_b.clone(), p1.clone()]).unwrap() + Value::cons_list_unsanitized(vec![c_b.clone(), p1.clone()]).unwrap() ); assert_eq!( env.execute_contract( @@ -257,7 +255,7 @@ fn test_contract_caller(owned_env: &mut OwnedEnvironment) { false ) .unwrap(), - Value::list_from(vec![c_b.clone(), c_b.clone()]).unwrap() + Value::cons_list_unsanitized(vec![c_b.clone(), c_b.clone()]).unwrap() ); } } @@ -275,7 +273,7 @@ fn tx_sponsor_contract_asserts(env: &mut Environment, sponsor: Option assert_eq!( - x, - RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash::from( - vec![2 as u8; 32].as_slice() - )) - ), - _ => panic!("Unexpected error"), - } + let err = owned_env + .initialize_contract( + QualifiedContractIdentifier::local("contract").unwrap(), + &contract, + None, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + eprintln!("{}", err); + match err { + Error::Runtime(x, _) => assert_eq!( + x, + RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash::from( + vec![2 as u8; 32].as_slice() + )) + ), + _ => panic!("Unexpected error"), } - - with_memory_environment(test, StacksEpochId::latest(), true); } -#[test] -fn test_as_max_len() { - fn test(owned_env: &mut OwnedEnvironment) { - let contract = "(define-data-var token-ids (list 10 uint) (list)) +#[apply(test_epochs)] +fn test_as_max_len(epoch: StacksEpochId, mut tl_env_factory: TopLevelMemoryEnvironmentGenerator) { + let mut owned_env = tl_env_factory.get_env(epoch); + let contract = "(define-data-var token-ids (list 10 uint) (list)) (var-set token-ids (unwrap! (as-max-len? (append (var-get token-ids) u1) u10) (err 10)))"; - owned_env - .initialize_contract( - QualifiedContractIdentifier::local("contract").unwrap(), - &contract, - None, - ASTRules::PrecheckSize, - ) - .unwrap(); - } - - with_memory_environment(test, StacksEpochId::latest(), true); + owned_env + .initialize_contract( + QualifiedContractIdentifier::local("contract").unwrap(), + &contract, + None, + ASTRules::PrecheckSize, + ) + .unwrap(); } #[test] @@ -1077,8 +1093,13 @@ fn test_arg_stack_depth() { ); } -#[test] -fn test_cc_stack_depth() { +#[apply(test_clarity_versions)] +fn test_cc_stack_depth( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_one = "(define-public (foo) (ok (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ @@ -1093,33 +1114,30 @@ fn test_cc_stack_depth() { 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1) 1)) (bar) "; + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); + + let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); + env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) + .unwrap(); - with_memory_environment( - |owned_env| { - let mut placeholder_context = ContractContext::new( - QualifiedContractIdentifier::transient(), - ClarityVersion::Clarity2, - ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); - - let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); - env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) - .unwrap(); - - let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); - assert_eq!( - env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) - .unwrap_err(), - RuntimeErrorType::MaxStackDepthReached.into() - ); - }, - StacksEpochId::latest(), - false, + let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); + assert_eq!( + env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) + .unwrap_err(), + RuntimeErrorType::MaxStackDepthReached.into() ); } -#[test] -fn test_cc_trait_stack_depth() { +#[apply(test_clarity_versions)] +fn test_cc_trait_stack_depth( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); + let contract_one = "(define-public (foo) (ok (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ (+ @@ -1137,43 +1155,18 @@ fn test_cc_trait_stack_depth() { (bar .c-foo) "; - with_memory_environment( - |owned_env| { - let mut placeholder_context = ContractContext::new( - QualifiedContractIdentifier::transient(), - ClarityVersion::Clarity2, - ); - let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); - - let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); - env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) - .unwrap(); - - let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); - assert_eq!( - env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) - .unwrap_err(), - RuntimeErrorType::MaxStackDepthReached.into() - ); - }, - StacksEpochId::latest(), - false, - ); -} + let mut placeholder_context = + ContractContext::new(QualifiedContractIdentifier::transient(), version); + let mut env = owned_env.get_exec_environment(None, None, &mut placeholder_context); -#[test] -fn test_all() { - let to_test = [ - test_factorial_contract, - test_aborts, - test_contract_caller, - test_tx_sponsor, - test_fully_qualified_contract_call, - test_simple_naming_system, - test_simple_contract_call, - ]; - for test in to_test.iter() { - eprintln!(".."); - with_memory_environment(test, StacksEpochId::latest(), false); - } + let contract_identifier = QualifiedContractIdentifier::local("c-foo").unwrap(); + env.initialize_contract(contract_identifier, contract_one, ASTRules::PrecheckSize) + .unwrap(); + + let contract_identifier = QualifiedContractIdentifier::local("c-bar").unwrap(); + assert_eq!( + env.initialize_contract(contract_identifier, contract_two, ASTRules::PrecheckSize) + .unwrap_err(), + RuntimeErrorType::MaxStackDepthReached.into() + ); } diff --git a/clarity/src/vm/tests/datamaps.rs b/clarity/src/vm/tests/datamaps.rs index 8c8c5728ed..00891ad68a 100644 --- a/clarity/src/vm/tests/datamaps.rs +++ b/clarity/src/vm/tests/datamaps.rs @@ -17,13 +17,10 @@ use std::convert::From; use std::convert::TryFrom; -use crate::vm::contexts::OwnedEnvironment; -use crate::vm::database::MemoryBackingStore; -use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType, ShortReturnType}; +use crate::vm::errors::{CheckErrors, Error, ShortReturnType}; use crate::vm::execute; use crate::vm::types::{ - ListData, QualifiedContractIdentifier, SequenceData, StandardPrincipalData, TupleData, - TupleTypeSignature, TypeSignature, Value, + ListData, SequenceData, TupleData, TupleTypeSignature, TypeSignature, Value, }; use crate::vm::ClarityName; diff --git a/clarity/src/vm/tests/defines.rs b/clarity/src/vm/tests/defines.rs index 3c28847bd5..aca37472cc 100644 --- a/clarity/src/vm/tests/defines.rs +++ b/clarity/src/vm/tests/defines.rs @@ -14,18 +14,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::vm::tests::test_clarity_versions; + #[cfg(test)] use rstest::rstest; #[cfg(test)] use rstest_reuse::{self, *}; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_defines(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - use stacks_common::types::StacksEpochId; use crate::vm::ast::build_ast; @@ -61,7 +56,7 @@ fn test_defines() { assert_eq!(Ok(Some(Value::Int(1))), execute(&tests)); } -#[apply(test_clarity_versions_defines)] +#[apply(test_clarity_versions)] fn test_accept_options(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let defun = "(define-private (f (b (optional int))) (* 10 (default-to 0 b)))"; let tests = [ @@ -194,7 +189,7 @@ fn test_stack_depth() { }) } -#[apply(test_clarity_versions_defines)] +#[apply(test_clarity_versions)] fn test_recursive_panic(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tests = "(define-private (factorial (a int)) (if (is-eq a 0) diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index 85f853fb66..518b063e41 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -13,36 +13,18 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::vm::contexts::OwnedEnvironment; +use crate::vm::database::MemoryBackingStore; +use crate::vm::errors::Error; +use crate::vm::types::Value; -use stacks_common::consts::{ - BITCOIN_REGTEST_FIRST_BLOCK_HASH, BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, - BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; +use stacks_common::types::StacksEpochId; + +pub use crate::vm::database::BurnStateDB; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, -}; -use stacks_common::types::{StacksEpochId, PEER_VERSION_EPOCH_2_0}; -use stacks_common::util::hash::hex_bytes; -use super::events::StacksTransactionEvent; pub use super::test_util::*; use super::ClarityVersion; -use crate::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; -use crate::vm::contracts::Contract; -pub use crate::vm::database::BurnStateDB; -use crate::vm::database::ClarityDatabase; -use crate::vm::database::MemoryBackingStore; -use crate::vm::errors::Error; -use crate::vm::representations::SymbolicExpression; -use crate::vm::types::{PrincipalData, ResponseData, Value}; -use crate::vm::StacksEpoch; -use crate::vm::{ - analysis::AnalysisDatabase, - clarity::{ClarityConnection, TransactionConnection}, - contexts::AssetMap, - costs::{ExecutionCost, LimitedCostTracker}, -}; mod assets; mod contracts; @@ -50,41 +32,124 @@ mod datamaps; mod defines; mod principals; mod sequences; +#[cfg(test)] mod simple_apply_eval; mod traits; -pub fn with_memory_environment(f: F, epoch: StacksEpochId, top_level: bool) -where - F: FnOnce(&mut OwnedEnvironment) -> (), -{ - let mut marf_kv = MemoryBackingStore::new(); +macro_rules! epochs_template { + ($($epoch:ident,)*) => { + #[template] + #[export] + #[rstest] + $( + #[case::$epoch(StacksEpochId::$epoch)] + )* + pub fn test_epochs(#[case] epoch: StacksEpochId) {} - let mut owned_env = OwnedEnvironment::new(marf_kv.as_clarity_db(), epoch); - // start an initial transaction. - if !top_level { - owned_env.begin(); + #[test] + fn epochs_covered() { + let epoch = StacksEpochId::latest(); + match epoch { + // don't test Epoch-1.0 + StacksEpochId::Epoch10 => (), + // this will lead to a compile time failure if an epoch is left out + // of the epochs_template! macro list + $(StacksEpochId::$epoch)|* => (), + } + } } +} + +macro_rules! clarity_template { + ($(($epoch:ident, $clarity:ident),)*) => { + #[template] + #[export] + #[rstest] + $( + #[case::$epoch(ClarityVersion::$clarity, StacksEpochId::$epoch)] + )* + pub fn test_clarity_versions(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - f(&mut owned_env) + #[test] + fn epoch_clarity_pairs_covered() { + let epoch = StacksEpochId::latest(); + let clarity = ClarityVersion::latest(); + match (epoch, clarity) { + // don't test Epoch-1.0 + (StacksEpochId::Epoch10, _) => (), + // don't test these pairs, because they aren't supported: + (StacksEpochId::Epoch20, ClarityVersion::Clarity2) => (), + (StacksEpochId::Epoch2_05, ClarityVersion::Clarity2) => (), + // this will lead to a compile time failure if a pair is left out + // of the clarity_template! macro list + $((StacksEpochId::$epoch, ClarityVersion::$clarity))|* => (), + } + } + } } -pub fn with_versioned_memory_environment( - f: F, - epoch: StacksEpochId, - version: ClarityVersion, - top_level: bool, -) where - F: FnOnce(&mut OwnedEnvironment, ClarityVersion) -> (), -{ - let mut marf_kv = MemoryBackingStore::new(); - - let mut owned_env = OwnedEnvironment::new(marf_kv.as_clarity_db(), epoch); - // start an initial transaction. - if !top_level { +// Define two rstest templates for Clarity tests: `test_epochs` and `test_clarity_versions` +// these templates test all epochs (except 1.0) and all valid epoch/clarity-version pairs. +// +// The macro definitions ensure that we get compile time errors in testing if there is a +// non-covered case in the rstest template. This *could* have been written as a derive macro, +// but then it would need to be defined in the `stacks-common` library (where it would have to +// get a `testing` feature flag). This seems less obtuse. +epochs_template! { + Epoch20, + Epoch2_05, + Epoch21, + Epoch22, + Epoch23, + Epoch24, +} + +clarity_template! { + (Epoch20, Clarity1), + (Epoch2_05, Clarity1), + (Epoch21, Clarity1), + (Epoch21, Clarity2), + (Epoch22, Clarity1), + (Epoch22, Clarity2), + (Epoch23, Clarity1), + (Epoch23, Clarity2), + (Epoch24, Clarity1), + (Epoch24, Clarity2), +} + +#[cfg(test)] +impl Value { + pub fn list_from(list_data: Vec) -> Result { + Value::cons_list_unsanitized(list_data) + } +} + +#[fixture] +pub fn env_factory() -> MemoryEnvironmentGenerator { + MemoryEnvironmentGenerator(MemoryBackingStore::new()) +} + +#[fixture] +pub fn tl_env_factory() -> TopLevelMemoryEnvironmentGenerator { + TopLevelMemoryEnvironmentGenerator(MemoryBackingStore::new()) +} + +pub struct MemoryEnvironmentGenerator(MemoryBackingStore); +impl MemoryEnvironmentGenerator { + fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment { + let mut owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch); + // start an initial transaction. owned_env.begin(); + owned_env } +} - f(&mut owned_env, version) +pub struct TopLevelMemoryEnvironmentGenerator(MemoryBackingStore); +impl TopLevelMemoryEnvironmentGenerator { + fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment { + let owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch); + owned_env + } } /// Determine whether or not to use the testnet or mainnet chain ID, given whether or not the diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 912987f138..9c91e6b19a 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -1,29 +1,22 @@ +use crate::vm::ast::ASTRules; +use crate::vm::execute_with_parameters; +use crate::vm::types::TypeSignature::PrincipalType; +use crate::vm::types::{ASCIIData, BuffData, CharType, SequenceData, Value}; +use crate::vm::ClarityVersion; use std::collections::HashMap; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::hex_bytes; -use crate::vm::ast::ASTRules; -use crate::vm::callables::{DefineType, DefinedFunction}; -use crate::vm::costs::LimitedCostTracker; -use crate::vm::database::MemoryBackingStore; -use crate::vm::errors::{ - CheckErrors, Error, InterpreterError, InterpreterResult as Result, RuntimeErrorType, -}; -use crate::vm::eval; -use crate::vm::execute; -use crate::vm::execute_with_parameters; +use crate::vm::errors::CheckErrors; use crate::vm::functions::principals::PrincipalConstructErrorCode; use crate::vm::types::BufferLength; use crate::vm::types::SequenceSubtype::{BufferType, StringType}; use crate::vm::types::StringSubtype::ASCII; -use crate::vm::types::TypeSignature::{PrincipalType, SequenceType}; -use crate::vm::types::{ASCIIData, BuffData, CharType, SequenceData, Value}; use crate::vm::types::{ OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, StandardPrincipalData, TupleData, TypeSignature, BUFF_1, BUFF_20, }; -use crate::vm::ClarityVersion; use crate::vm::{ CallStack, ContractContext, Environment, GlobalContext, LocalContext, SymbolicExpression, }; diff --git a/clarity/src/vm/tests/sequences.rs b/clarity/src/vm/tests/sequences.rs index f3ef58f7e5..6864223fd8 100644 --- a/clarity/src/vm/tests/sequences.rs +++ b/clarity/src/vm/tests/sequences.rs @@ -14,32 +14,24 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::vm::types::signatures::SequenceSubtype; +use crate::vm::types::TypeSignature::{BoolType, IntType, SequenceType, UIntType}; +use crate::vm::types::{StringSubtype, StringUTF8Length, TypeSignature, Value}; use std::convert::{TryFrom, TryInto}; #[cfg(test)] use rstest::rstest; -#[cfg(test)] use rstest_reuse::{self, *}; use stacks_common::types::StacksEpochId; -use crate::vm::analysis::errors::CheckError; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; -use crate::vm::types::signatures::SequenceSubtype::{BufferType, ListType, StringType}; +use crate::vm::tests::test_clarity_versions; +use crate::vm::types::signatures::ListTypeData; +use crate::vm::types::signatures::SequenceSubtype::{BufferType, StringType}; use crate::vm::types::signatures::StringSubtype::ASCII; -use crate::vm::types::signatures::{ListTypeData, SequenceSubtype}; use crate::vm::types::BufferLength; -use crate::vm::types::CharType::UTF8; -use crate::vm::types::TypeSignature::{BoolType, IntType, SequenceType, UIntType}; -use crate::vm::types::{StringSubtype, StringUTF8Length, TypeSignature, Value}; use crate::vm::{execute, execute_v2, ClarityVersion}; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_sequences(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - #[test] fn test_simple_list_admission() { let defines = "(define-private (square (x int)) (* x x)) @@ -1167,7 +1159,7 @@ fn test_buff_len() { assert_eq!(expected, execute(test2).unwrap().unwrap()); } -#[apply(test_clarity_versions_sequences)] +#[apply(test_clarity_versions)] fn test_construct_bad_list(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let test1 = "(list 1 2 3 true)"; assert_eq!( diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index a650043c82..fc97e2880d 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -14,11 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; - -#[cfg(test)] use rstest::rstest; -#[cfg(test)] use rstest_reuse::{self, *}; use stacks_common::address::c32; use stacks_common::address::AddressHashMode; @@ -31,37 +27,25 @@ use stacks_common::types::chainstate::StacksPublicKey; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::{hex_bytes, to_hex}; -#[cfg(test)] use crate::vm::ast::parse; use crate::vm::ast::ASTRules; use crate::vm::callables::DefinedFunction; use crate::vm::contexts::OwnedEnvironment; use crate::vm::costs::LimitedCostTracker; +use crate::vm::database::MemoryBackingStore; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType, ShortReturnType}; use crate::vm::tests::execute; +use crate::vm::tests::test_clarity_versions; use crate::vm::types::signatures::*; +use crate::vm::types::StacksAddressExtensions; use crate::vm::types::{ASCIIData, BuffData, CharType, QualifiedContractIdentifier, TypeSignature}; -use crate::vm::types::{PrincipalData, ResponseData, SequenceData, SequenceSubtype, StringSubtype}; +use crate::vm::types::{PrincipalData, SequenceData}; use crate::vm::ClarityVersion; use crate::vm::{ eval, execute as vm_execute, execute_v2 as vm_execute_v2, execute_with_parameters, }; use crate::vm::{CallStack, ContractContext, Environment, GlobalContext, LocalContext, Value}; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_simple_apply_eval( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - -use crate::vm::database::MemoryBackingStore; -use crate::vm::types::StacksAddressExtensions; - #[test] fn test_doubly_defined_persisted_vars() { let tests = [ @@ -77,7 +61,7 @@ fn test_doubly_defined_persisted_vars() { } } -#[apply(test_clarity_versions_simple_apply_eval)] +#[apply(test_clarity_versions)] fn test_simple_let(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { /* test program: @@ -647,7 +631,7 @@ fn test_principal_equality() { .for_each(|(program, expectation)| assert_eq!(expectation.clone(), execute(program))); } -#[apply(test_clarity_versions_simple_apply_eval)] +#[apply(test_clarity_versions)] fn test_simple_if_functions(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { // // test program: @@ -1091,7 +1075,7 @@ fn test_sequence_comparisons_mismatched_types() { }); } -#[apply(test_clarity_versions_simple_apply_eval)] +#[apply(test_clarity_versions)] fn test_simple_arithmetic_errors(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let tests = [ "(>= 1)", diff --git a/clarity/src/vm/tests/traits.rs b/clarity/src/vm/tests/traits.rs index 38f41966b2..7cc22b7a07 100644 --- a/clarity/src/vm/tests/traits.rs +++ b/clarity/src/vm/tests/traits.rs @@ -18,82 +18,30 @@ use std::convert::TryInto; use stacks_common::types::StacksEpochId; -use crate::vm::analysis::errors::CheckError; use crate::vm::ast::ASTRules; use crate::vm::contexts::{Environment, GlobalContext, OwnedEnvironment}; use crate::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use crate::vm::execute as vm_execute; -use crate::vm::tests::{ - execute, symbols_from_values, with_memory_environment, with_versioned_memory_environment, -}; +use crate::vm::tests::{execute, symbols_from_values}; use crate::vm::types::{ PrincipalData, QualifiedContractIdentifier, ResponseData, TypeSignature, Value, }; + +use crate::vm::tests::env_factory; +use crate::vm::tests::test_clarity_versions; +use crate::vm::tests::test_epochs; use crate::vm::version::ClarityVersion; use crate::vm::ContractContext; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_epoch_clarity_versions(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} - -#[apply(test_epoch_clarity_versions)] -fn test_trait_basics(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { - let to_test = [ - test_dynamic_dispatch_pass_trait_nested_in_let, - test_dynamic_dispatch_pass_trait, - test_dynamic_dispatch_intra_contract_call, - test_dynamic_dispatch_by_defining_trait, - test_dynamic_dispatch_by_implementing_imported_trait, - test_dynamic_dispatch_by_importing_trait, - test_dynamic_dispatch_including_nested_trait, - test_dynamic_dispatch_mismatched_args, - test_dynamic_dispatch_mismatched_returned, - test_reentrant_dynamic_dispatch, - test_readwrite_dynamic_dispatch, - test_readwrite_violation_dynamic_dispatch, - test_bad_call_with_trait, - test_good_call_with_trait, - test_good_call_2_with_trait, - test_contract_of_value, - test_contract_of_no_impl, - test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs, - test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functions, - test_return_trait_with_contract_of, - test_return_trait_with_contract_of_wrapped_in_begin, - test_return_trait_with_contract_of_wrapped_in_let, - ]; - for test in to_test.iter() { - with_versioned_memory_environment(test, epoch, version, false); - } -} - -#[test] -fn test_clarity2() { - let to_test = [ - test_pass_principal_literal_to_trait, - test_pass_trait_to_subtrait, - test_embedded_trait, - test_pass_embedded_trait_to_subtrait_optional, - test_pass_embedded_trait_to_subtrait_ok, - test_pass_embedded_trait_to_subtrait_err, - test_pass_embedded_trait_to_subtrait_list, - test_pass_embedded_trait_to_subtrait_list_option, - test_pass_embedded_trait_to_subtrait_option_list, - test_let_trait, - test_let3_trait, - ]; - for test in to_test.iter() { - with_memory_environment(test, StacksEpochId::latest(), false); - } -} +use super::MemoryEnvironmentGenerator; +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_defining_trait( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -142,10 +90,13 @@ fn test_dynamic_dispatch_by_defining_trait( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_pass_trait_nested_in_let( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -197,7 +148,13 @@ fn test_dynamic_dispatch_pass_trait_nested_in_let( } } -fn test_dynamic_dispatch_pass_trait(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_dynamic_dispatch_pass_trait( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -248,10 +205,13 @@ fn test_dynamic_dispatch_pass_trait(owned_env: &mut OwnedEnvironment, version: C } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_intra_contract_call( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .contract-defining-trait.trait-1) @@ -303,10 +263,13 @@ fn test_dynamic_dispatch_intra_contract_call( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_implementing_imported_trait( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .contract-defining-trait.trait-1) @@ -363,10 +326,13 @@ fn test_dynamic_dispatch_by_implementing_imported_trait( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) (get-2 (uint) (response uint uint))))"; @@ -425,10 +391,13 @@ fn test_dynamic_dispatch_by_implementing_imported_trait_mul_funcs( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_by_importing_trait( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .contract-defining-trait.trait-1) @@ -484,10 +453,13 @@ fn test_dynamic_dispatch_by_importing_trait( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_including_nested_trait( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_nested_trait = "(define-trait trait-a ( (get-a (uint) (response uint uint))))"; let contract_defining_trait = "(use-trait trait-a .contract-defining-nested-trait.trait-a) @@ -565,10 +537,13 @@ fn test_dynamic_dispatch_including_nested_trait( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_mismatched_args( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -619,10 +594,13 @@ fn test_dynamic_dispatch_mismatched_args( } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_mismatched_returned( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -673,7 +651,13 @@ fn test_dynamic_dispatch_mismatched_returned( } } -fn test_reentrant_dynamic_dispatch(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_reentrant_dynamic_dispatch( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -727,7 +711,13 @@ fn test_reentrant_dynamic_dispatch(owned_env: &mut OwnedEnvironment, version: Cl } } -fn test_readwrite_dynamic_dispatch(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_readwrite_dynamic_dispatch( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-read-only (wrapped-get-1 (contract )) @@ -778,10 +768,13 @@ fn test_readwrite_dynamic_dispatch(owned_env: &mut OwnedEnvironment, version: Cl } } +#[apply(test_clarity_versions)] fn test_readwrite_violation_dynamic_dispatch( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-read-only (wrapped-get-1 (contract )) @@ -832,7 +825,13 @@ fn test_readwrite_violation_dynamic_dispatch( } } -fn test_bad_call_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_bad_call_with_trait( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); // This set of contracts should be working in this context, // the analysis is not being performed. let contract_defining_trait = "(define-trait trait-1 ( @@ -897,7 +896,13 @@ fn test_bad_call_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityVe } } -fn test_good_call_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_good_call_with_trait( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .defun.trait-1) @@ -959,7 +964,13 @@ fn test_good_call_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityV } } -fn test_good_call_2_with_trait(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_good_call_2_with_trait( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .defun.trait-1) @@ -1026,10 +1037,13 @@ fn test_good_call_2_with_trait(owned_env: &mut OwnedEnvironment, version: Clarit } } +#[apply(test_clarity_versions)] fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functions( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .contract-defining-trait.trait-1) @@ -1087,7 +1101,13 @@ fn test_dynamic_dispatch_pass_literal_principal_as_trait_in_user_defined_functio } } -fn test_contract_of_value(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_contract_of_value( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .defun.trait-1) @@ -1146,7 +1166,13 @@ fn test_contract_of_value(owned_env: &mut OwnedEnvironment, version: ClarityVers } } -fn test_contract_of_no_impl(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_contract_of_no_impl( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let contract_defining_trait = "(define-trait trait-1 ( (get-1 (uint) (response uint uint))))"; let dispatching_contract = "(use-trait trait-1 .defun.trait-1) @@ -1207,10 +1233,13 @@ fn test_contract_of_no_impl(owned_env: &mut OwnedEnvironment, version: ClarityVe } } +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of_wrapped_in_begin( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -1261,10 +1290,13 @@ fn test_return_trait_with_contract_of_wrapped_in_begin( } } +#[apply(test_clarity_versions)] fn test_return_trait_with_contract_of_wrapped_in_let( - owned_env: &mut OwnedEnvironment, version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, ) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -1315,7 +1347,13 @@ fn test_return_trait_with_contract_of_wrapped_in_let( } } -fn test_return_trait_with_contract_of(owned_env: &mut OwnedEnvironment, version: ClarityVersion) { +#[apply(test_clarity_versions)] +fn test_return_trait_with_contract_of( + version: ClarityVersion, + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)))) (define-public (wrapped-get-1 (contract )) @@ -1364,7 +1402,12 @@ fn test_return_trait_with_contract_of(owned_env: &mut OwnedEnvironment, version: } } -fn test_pass_trait_to_subtrait(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_trait_to_subtrait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1424,7 +1467,12 @@ fn test_pass_trait_to_subtrait(owned_env: &mut OwnedEnvironment) { } } -fn test_embedded_trait(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_embedded_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (echo (uint) (response uint uint)) )) @@ -1482,7 +1530,15 @@ fn test_embedded_trait(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_embedded_trait_to_subtrait_optional(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_optional( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1546,7 +1602,15 @@ fn test_pass_embedded_trait_to_subtrait_optional(owned_env: &mut OwnedEnvironmen } } -fn test_pass_embedded_trait_to_subtrait_ok(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_ok( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1610,7 +1674,15 @@ fn test_pass_embedded_trait_to_subtrait_ok(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_embedded_trait_to_subtrait_err(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_err( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1674,7 +1746,15 @@ fn test_pass_embedded_trait_to_subtrait_err(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_embedded_trait_to_subtrait_list(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_list( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1738,7 +1818,15 @@ fn test_pass_embedded_trait_to_subtrait_list(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_embedded_trait_to_subtrait_list_option(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_list_option( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1805,7 +1893,15 @@ fn test_pass_embedded_trait_to_subtrait_list_option(owned_env: &mut OwnedEnviron } } -fn test_pass_embedded_trait_to_subtrait_option_list(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_embedded_trait_to_subtrait_option_list( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) @@ -1872,7 +1968,12 @@ fn test_pass_embedded_trait_to_subtrait_option_list(owned_env: &mut OwnedEnviron } } -fn test_let_trait(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_let_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (echo (uint) (response uint uint)) )) @@ -1928,7 +2029,12 @@ fn test_let_trait(owned_env: &mut OwnedEnvironment) { } } -fn test_let3_trait(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_let3_trait(epoch: StacksEpochId, mut env_factory: MemoryEnvironmentGenerator) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (echo (uint) (response uint uint)) )) @@ -1988,7 +2094,15 @@ fn test_let3_trait(owned_env: &mut OwnedEnvironment) { } } -fn test_pass_principal_literal_to_trait(owned_env: &mut OwnedEnvironment) { +#[apply(test_epochs)] +fn test_pass_principal_literal_to_trait( + epoch: StacksEpochId, + mut env_factory: MemoryEnvironmentGenerator, +) { + if epoch < StacksEpochId::Epoch21 { + return; + } + let mut owned_env = env_factory.get_env(epoch); let dispatching_contract = "(define-trait trait-1 ( (get-1 (uint) (response uint uint)) )) diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index e021364a5c..f2009b4612 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -237,6 +237,9 @@ pub enum Value { Optional(OptionalData), Response(ResponseData), CallableContract(CallableData), + // NOTE: any new value variants which may contain _other values_ (i.e., + // compound values like `Optional`, `Tuple`, `Response`, or `Sequence(List)`) + // must be handled in the value sanitization routine! } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] @@ -459,12 +462,11 @@ impl SequenceData { Ok(()) } - pub fn append(&mut self, epoch: &StacksEpochId, other_seq: &mut SequenceData) -> Result<()> { + pub fn concat(&mut self, epoch: &StacksEpochId, other_seq: SequenceData) -> Result<()> { match (self, other_seq) { - ( - SequenceData::List(ref mut inner_data), - SequenceData::List(ref mut other_inner_data), - ) => inner_data.append(epoch, other_inner_data), + (SequenceData::List(ref mut inner_data), SequenceData::List(other_inner_data)) => { + inner_data.append(epoch, other_inner_data) + } ( SequenceData::Buffer(ref mut inner_data), SequenceData::Buffer(ref mut other_inner_data), @@ -482,7 +484,12 @@ impl SequenceData { Ok(()) } - pub fn slice(self, left_position: usize, right_position: usize) -> Result { + pub fn slice( + self, + epoch: &StacksEpochId, + left_position: usize, + right_position: usize, + ) -> Result { let empty_seq = left_position == right_position; let result = match self { @@ -500,7 +507,7 @@ impl SequenceData { } else { data.data[left_position..right_position].to_vec() }; - Value::list_from(data) + Value::cons_list(data, epoch) } SequenceData::String(CharType::ASCII(data)) => { let data = if empty_seq { @@ -906,7 +913,15 @@ impl Value { }))) } - pub fn list_from(list_data: Vec) -> Result { + pub fn cons_list_unsanitized(list_data: Vec) -> Result { + let type_sig = TypeSignature::construct_parent_list_type(&list_data)?; + Ok(Value::Sequence(SequenceData::List(ListData { + data: list_data, + type_signature: type_sig, + }))) + } + + pub fn cons_list(list_data: Vec, epoch: &StacksEpochId) -> Result { // Constructors for TypeSignature ensure that the size of the Value cannot // be greater than MAX_VALUE_SIZE (they error on such constructions) // Aaron: at this point, we've _already_ allocated memory for this type. @@ -914,6 +929,14 @@ impl Value { // this is a problem _if_ the static analyzer cannot already prevent // this case. This applies to all the constructor size checks. let type_sig = TypeSignature::construct_parent_list_type(&list_data)?; + let list_data_opt: Option<_> = list_data + .into_iter() + .map(|item| { + Value::sanitize_value(epoch, type_sig.get_list_item_type(), item) + .map(|(value, _did_sanitize)| value) + }) + .collect(); + let list_data = list_data_opt.ok_or_else(|| CheckErrors::ListTypesMustMatch)?; Ok(Value::Sequence(SequenceData::List(ListData { data: list_data, type_signature: type_sig, @@ -1184,13 +1207,18 @@ impl ListData { self.data.len().try_into().unwrap() } - fn append(&mut self, epoch: &StacksEpochId, other_seq: &mut ListData) -> Result<()> { + fn append(&mut self, epoch: &StacksEpochId, other_seq: ListData) -> Result<()> { let entry_type_a = self.type_signature.get_list_item_type(); let entry_type_b = other_seq.type_signature.get_list_item_type(); let entry_type = TypeSignature::factor_out_no_type(epoch, &entry_type_a, &entry_type_b)?; let max_len = self.type_signature.get_max_len() + other_seq.type_signature.get_max_len(); + for item in other_seq.data.into_iter() { + let (item, _) = Value::sanitize_value(epoch, &entry_type, item) + .ok_or_else(|| CheckErrors::ListTypesMustMatch)?; + self.data.push(item); + } + self.type_signature = ListTypeData::new_list(entry_type, max_len)?; - self.data.append(&mut other_seq.data); Ok(()) } } @@ -1465,6 +1493,7 @@ impl TupleData { Ok(t) } + /// Return the number of fields in this tuple value pub fn len(&self) -> u64 { self.data_map.len() as u64 } diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index b35d3ec453..df6ac58100 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -39,9 +39,11 @@ use crate::vm::types::{ BufferLength, CallableData, CharType, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, SequenceData, SequenceSubtype, StandardPrincipalData, StringSubtype, StringUTF8Length, TupleData, TypeSignature, Value, BOUND_VALUE_SERIALIZATION_BYTES, - MAX_VALUE_SIZE, + MAX_TYPE_DEPTH, MAX_VALUE_SIZE, }; +use super::{ListTypeData, TupleTypeSignature}; + /// Errors that may occur in serialization or deserialization /// If deserialization failed because the described type is a bad type and /// a CheckError is thrown, it gets wrapped in BadTypeError. @@ -61,6 +63,20 @@ lazy_static! { pub static ref NONE_SERIALIZATION_LEN: u64 = Value::none().serialize_to_vec().len() as u64; } +/// Deserialization uses a specific epoch for passing to the type signature checks +/// The reason this is pinned to Epoch21 is so that values stored before epoch-2.4 +/// can still be read from the database. +const DESERIALIZATION_TYPE_CHECK_EPOCH: StacksEpochId = StacksEpochId::Epoch21; + +/// Pre-sanitization values could end up being larger than the deserializer originally +/// supported, so we increase the bound to a higher level limit imposed by the cost checker. +const SANITIZATION_READ_BOUND: u64 = 15_000_000; + +/// Before epoch-2.4, this is the deserialization depth limit. +/// After epoch-2.4, with type sanitization support, the full +/// clarity depth limit is supported. +const UNSANITIZED_DEPTH_CHECK: usize = 16; + impl std::fmt::Display for SerializationError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { @@ -295,6 +311,84 @@ macro_rules! check_match { }; } +/// `DeserializeStackItem` objects are used by the deserializer to indicate +/// how the deserialization loop's current object is to be handled once it is +/// deserialized: i.e., is the object the top-level object for the serialization +/// or is it an entry in a composite type (e.g., a list or tuple)? +enum DeserializeStackItem { + List { + items: Vec, + expected_len: u32, + expected_type: Option, + }, + Tuple { + items: Vec<(ClarityName, Value)>, + expected_len: u64, + processed_entries: u64, + expected_type: Option, + next_name: ClarityName, + next_sanitize: bool, + }, + OptionSome { + inner_expected_type: Option, + }, + ResponseOk { + inner_expected_type: Option, + }, + ResponseErr { + inner_expected_type: Option, + }, + TopLevel { + expected_type: Option, + }, +} + +impl DeserializeStackItem { + /// What is the expected type for the child of this deserialization stack item? + /// + /// Returns `None` if this stack item either doesn't have an expected type, or the + /// next child is going to be sanitized/elided. + fn next_expected_type(&self) -> Result, SerializationError> { + match self { + DeserializeStackItem::List { expected_type, .. } => Ok(expected_type + .as_ref() + .map(|lt| lt.get_list_item_type()) + .cloned()), + DeserializeStackItem::Tuple { + expected_type, + next_name, + next_sanitize, + .. + } => match expected_type { + None => Ok(None), + Some(some_tuple) => { + // if we're sanitizing this tuple, and the `next_name` field is to be + // removed, don't return an expected type. + if *next_sanitize { + return Ok(None); + } + let field_type = some_tuple.field_type(&next_name).ok_or_else(|| { + SerializationError::DeserializeExpected(TypeSignature::TupleType( + some_tuple.clone(), + )) + })?; + Ok(Some(field_type.clone())) + } + }, + DeserializeStackItem::OptionSome { + inner_expected_type, + } => Ok(inner_expected_type.clone()), + DeserializeStackItem::ResponseOk { + inner_expected_type, + } => Ok(inner_expected_type.clone()), + DeserializeStackItem::ResponseErr { + inner_expected_type, + } => Ok(inner_expected_type.clone()), + DeserializeStackItem::TopLevel { expected_type } => Ok(expected_type.clone()), + } + } +} + impl TypeSignature { /// Return the maximum length of the consensus serialization of a /// Clarity value of this type. The returned length *may* not fit @@ -427,18 +521,28 @@ impl Value { pub fn deserialize_read( r: &mut R, expected_type: Option<&TypeSignature>, + sanitize: bool, ) -> Result { - Self::deserialize_read_count(r, expected_type).map(|(value, _)| value) + Self::deserialize_read_count(r, expected_type, sanitize).map(|(value, _)| value) } /// Deserialize just like `deserialize_read` but also - /// return the bytes read + /// return the bytes read. + /// If `sanitize` argument is set to true and `expected_type` is supplied, + /// this method will remove any extraneous tuple fields which may have been + /// allowed by `least_super_type`. pub fn deserialize_read_count( r: &mut R, expected_type: Option<&TypeSignature>, + sanitize: bool, ) -> Result<(Value, u64), SerializationError> { - let mut bound_reader = BoundReader::from_reader(r, BOUND_VALUE_SERIALIZATION_BYTES as u64); - let value = Value::inner_deserialize_read(&mut bound_reader, expected_type, 0)?; + let bound_value_serialization_bytes = if sanitize && expected_type.is_some() { + SANITIZATION_READ_BOUND + } else { + BOUND_VALUE_SERIALIZATION_BYTES as u64 + }; + let mut bound_reader = BoundReader::from_reader(r, bound_value_serialization_bytes); + let value = Value::inner_deserialize_read(&mut bound_reader, expected_type, sanitize)?; let bytes_read = bound_reader.num_read(); if let Some(expected_type) = expected_type { let expect_size = match expected_type.max_serialized_size() { @@ -452,13 +556,15 @@ impl Value { } }; - assert!( - expect_size as u64 >= bytes_read, - "Deserialized more bytes than expected size during deserialization. Expected size = {}, bytes read = {}, type = {}", - expect_size, - bytes_read, - expected_type, - ); + if expect_size as u64 > bytes_read { + // this can happen due to sanitization, so its no longer indicative of a *problem* with the node. + debug!( + "Deserialized more bytes than expected size during deserialization. Expected size = {}, bytes read = {}, type = {}", + expect_size, + bytes_read, + expected_type, + ); + } } Ok((value, bytes_read)) @@ -466,261 +572,451 @@ impl Value { fn inner_deserialize_read( r: &mut R, - expected_type: Option<&TypeSignature>, - depth: u8, + top_expected_type: Option<&TypeSignature>, + sanitize: bool, ) -> Result { use super::PrincipalData::*; use super::Value::*; - if depth >= 16 { - return Err(CheckErrors::TypeSignatureTooDeep.into()); - } - - let mut header = [0]; - r.read_exact(&mut header)?; - - let prefix = TypePrefix::from_u8(header[0]).ok_or_else(|| "Bad type prefix")?; - - match prefix { - TypePrefix::Int => { - check_match!(expected_type, TypeSignature::IntType)?; - let mut buffer = [0; 16]; - r.read_exact(&mut buffer)?; - Ok(Int(i128::from_be_bytes(buffer))) - } - TypePrefix::UInt => { - check_match!(expected_type, TypeSignature::UIntType)?; - let mut buffer = [0; 16]; - r.read_exact(&mut buffer)?; - Ok(UInt(u128::from_be_bytes(buffer))) - } - TypePrefix::Buffer => { - let mut buffer_len = [0; 4]; - r.read_exact(&mut buffer_len)?; - let buffer_len = BufferLength::try_from(u32::from_be_bytes(buffer_len))?; - - if let Some(x) = expected_type { - let passed_test = match x { - TypeSignature::SequenceType(SequenceSubtype::BufferType(expected_len)) => { - u32::from(&buffer_len) <= u32::from(expected_len) - } - _ => false, - }; - if !passed_test { - return Err(SerializationError::DeserializeExpected(x.clone())); - } - } - - let mut data = vec![0; u32::from(buffer_len) as usize]; - - r.read_exact(&mut data[..])?; + let mut stack = vec![DeserializeStackItem::TopLevel { + expected_type: top_expected_type.cloned(), + }]; - Value::buff_from(data).map_err(|_| "Bad buffer".into()) - } - TypePrefix::BoolTrue => { - check_match!(expected_type, TypeSignature::BoolType)?; - Ok(Bool(true)) - } - TypePrefix::BoolFalse => { - check_match!(expected_type, TypeSignature::BoolType)?; - Ok(Bool(false)) - } - TypePrefix::PrincipalStandard => { - check_match!(expected_type, TypeSignature::PrincipalType)?; - StandardPrincipalData::deserialize_read(r).map(Value::from) - } - TypePrefix::PrincipalContract => { - check_match!(expected_type, TypeSignature::PrincipalType)?; - let issuer = StandardPrincipalData::deserialize_read(r)?; - let name = ContractName::deserialize_read(r)?; - Ok(Value::from(QualifiedContractIdentifier { issuer, name })) + while !stack.is_empty() { + let depth_check = if sanitize { + MAX_TYPE_DEPTH as usize + } else { + UNSANITIZED_DEPTH_CHECK + }; + if stack.len() > depth_check { + return Err(CheckErrors::TypeSignatureTooDeep.into()); } - TypePrefix::ResponseOk | TypePrefix::ResponseErr => { - let committed = prefix == TypePrefix::ResponseOk; - - let expect_contained_type = match expected_type { - None => None, - Some(x) => { - let contained_type = match (committed, x) { - (true, TypeSignature::ResponseType(types)) => Ok(&types.0), - (false, TypeSignature::ResponseType(types)) => Ok(&types.1), - _ => Err(SerializationError::DeserializeExpected(x.clone())), - }?; - Some(contained_type) - } - }; - let data = Value::inner_deserialize_read(r, expect_contained_type, depth + 1)?; - let value = if committed { - Value::okay(data) - } else { - Value::error(data) + let expected_type = stack + .last() + .expect("FATAL: stack.last() should always be some() because of loop condition") + .next_expected_type()?; + + let mut header = [0]; + r.read_exact(&mut header)?; + let prefix = TypePrefix::from_u8(header[0]).ok_or_else(|| "Bad type prefix")?; + + let item = match prefix { + TypePrefix::Int => { + check_match!(expected_type, TypeSignature::IntType)?; + let mut buffer = [0; 16]; + r.read_exact(&mut buffer)?; + Ok(Int(i128::from_be_bytes(buffer))) } - .map_err(|_x| "Value too large")?; - - Ok(value) - } - TypePrefix::OptionalNone => { - check_match!(expected_type, TypeSignature::OptionalType(_))?; - Ok(Value::none()) - } - TypePrefix::OptionalSome => { - let expect_contained_type = match expected_type { - None => None, - Some(x) => { - let contained_type = match x { - TypeSignature::OptionalType(some_type) => Ok(some_type.as_ref()), - _ => Err(SerializationError::DeserializeExpected(x.clone())), - }?; - Some(contained_type) + TypePrefix::UInt => { + check_match!(expected_type, TypeSignature::UIntType)?; + let mut buffer = [0; 16]; + r.read_exact(&mut buffer)?; + Ok(UInt(u128::from_be_bytes(buffer))) + } + TypePrefix::Buffer => { + let mut buffer_len = [0; 4]; + r.read_exact(&mut buffer_len)?; + let buffer_len = BufferLength::try_from(u32::from_be_bytes(buffer_len))?; + + if let Some(x) = &expected_type { + let passed_test = match x { + TypeSignature::SequenceType(SequenceSubtype::BufferType( + expected_len, + )) => u32::from(&buffer_len) <= u32::from(expected_len), + _ => false, + }; + if !passed_test { + return Err(SerializationError::DeserializeExpected(x.clone())); + } } - }; - let value = Value::some(Value::inner_deserialize_read( - r, - expect_contained_type, - depth + 1, - )?) - .map_err(|_x| "Value too large")?; + let mut data = vec![0; u32::from(buffer_len) as usize]; - Ok(value) - } - TypePrefix::List => { - let mut len = [0; 4]; - r.read_exact(&mut len)?; - let len = u32::from_be_bytes(len); + r.read_exact(&mut data[..])?; - if len > MAX_VALUE_SIZE { - return Err("Illegal list type".into()); + Value::buff_from(data).map_err(|_| "Bad buffer".into()) } - - let (list_type, entry_type) = match expected_type { - None => (None, None), - Some(TypeSignature::SequenceType(SequenceSubtype::ListType(list_type))) => { - if len > list_type.get_max_len() { - return Err(SerializationError::DeserializeExpected( - expected_type.unwrap().clone(), - )); - } - (Some(list_type), Some(list_type.get_list_item_type())) - } - Some(x) => return Err(SerializationError::DeserializeExpected(x.clone())), - }; - - let mut items = Vec::with_capacity(len as usize); - for _i in 0..len { - items.push(Value::inner_deserialize_read(r, entry_type, depth + 1)?); + TypePrefix::BoolTrue => { + check_match!(expected_type, TypeSignature::BoolType)?; + Ok(Bool(true)) } - - if let Some(list_type) = list_type { - Value::list_with_type(&StacksEpochId::Epoch21, items, list_type.clone()) - .map_err(|_| "Illegal list type".into()) - } else { - Value::list_from(items).map_err(|_| "Illegal list type".into()) + TypePrefix::BoolFalse => { + check_match!(expected_type, TypeSignature::BoolType)?; + Ok(Bool(false)) } - } - TypePrefix::Tuple => { - let mut len = [0; 4]; - r.read_exact(&mut len)?; - let len = u32::from_be_bytes(len); - - if len > MAX_VALUE_SIZE { - return Err(SerializationError::DeserializationError( - "Illegal tuple type".to_string(), - )); + TypePrefix::PrincipalStandard => { + check_match!(expected_type, TypeSignature::PrincipalType)?; + StandardPrincipalData::deserialize_read(r).map(Value::from) + } + TypePrefix::PrincipalContract => { + check_match!(expected_type, TypeSignature::PrincipalType)?; + let issuer = StandardPrincipalData::deserialize_read(r)?; + let name = ContractName::deserialize_read(r)?; + Ok(Value::from(QualifiedContractIdentifier { issuer, name })) } + TypePrefix::ResponseOk | TypePrefix::ResponseErr => { + let committed = prefix == TypePrefix::ResponseOk; - let tuple_type = match expected_type { - None => None, - Some(TypeSignature::TupleType(tuple_type)) => { - if len as u64 != tuple_type.len() { - return Err(SerializationError::DeserializeExpected( - expected_type.unwrap().clone(), - )); + let expect_contained_type = match &expected_type { + None => None, + Some(x) => { + let contained_type = match (committed, x) { + (true, TypeSignature::ResponseType(types)) => Ok(&types.0), + (false, TypeSignature::ResponseType(types)) => Ok(&types.1), + _ => Err(SerializationError::DeserializeExpected(x.clone())), + }?; + Some(contained_type) } - Some(tuple_type) - } - Some(x) => return Err(SerializationError::DeserializeExpected(x.clone())), - }; + }; - let mut items = Vec::with_capacity(len as usize); - for _i in 0..len { - let key = ClarityName::deserialize_read(r)?; + let stack_item = if committed { + DeserializeStackItem::ResponseOk { + inner_expected_type: expect_contained_type.cloned(), + } + } else { + DeserializeStackItem::ResponseErr { + inner_expected_type: expect_contained_type.cloned(), + } + }; - let expected_field_type = match tuple_type { + stack.push(stack_item); + continue; + } + TypePrefix::OptionalNone => { + check_match!(expected_type, TypeSignature::OptionalType(_))?; + Ok(Value::none()) + } + TypePrefix::OptionalSome => { + let expect_contained_type = match &expected_type { None => None, - Some(some_tuple) => Some(some_tuple.field_type(&key).ok_or_else(|| { - SerializationError::DeserializeExpected(expected_type.unwrap().clone()) - })?), + Some(x) => { + let contained_type = match x { + TypeSignature::OptionalType(some_type) => Ok(some_type.as_ref()), + _ => Err(SerializationError::DeserializeExpected(x.clone())), + }?; + Some(contained_type) + } }; - let value = Value::inner_deserialize_read(r, expected_field_type, depth + 1)?; - items.push((key, value)) - } + let stack_item = DeserializeStackItem::OptionSome { + inner_expected_type: expect_contained_type.cloned(), + }; - if let Some(tuple_type) = tuple_type { - TupleData::from_data_typed(&StacksEpochId::latest(), items, tuple_type) - .map_err(|_| "Illegal tuple type".into()) - .map(Value::from) - } else { - TupleData::from_data(items) - .map_err(|_| "Illegal tuple type".into()) - .map(Value::from) + stack.push(stack_item); + continue; } - } - TypePrefix::StringASCII => { - let mut buffer_len = [0; 4]; - r.read_exact(&mut buffer_len)?; - let buffer_len = BufferLength::try_from(u32::from_be_bytes(buffer_len))?; - - if let Some(x) = expected_type { - let passed_test = match x { - TypeSignature::SequenceType(SequenceSubtype::StringType( - StringSubtype::ASCII(expected_len), - )) => u32::from(&buffer_len) <= u32::from(expected_len), - _ => false, + TypePrefix::List => { + let mut len = [0; 4]; + r.read_exact(&mut len)?; + let len = u32::from_be_bytes(len); + + if len > MAX_VALUE_SIZE { + return Err("Illegal list type".into()); + } + + let (list_type, _entry_type) = match expected_type.as_ref() { + None => (None, None), + Some(TypeSignature::SequenceType(SequenceSubtype::ListType(list_type))) => { + if len > list_type.get_max_len() { + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap().clone(), + )); + } + (Some(list_type), Some(list_type.get_list_item_type())) + } + Some(x) => return Err(SerializationError::DeserializeExpected(x.clone())), }; - if !passed_test { - return Err(SerializationError::DeserializeExpected(x.clone())); + + if len > 0 { + let items = Vec::with_capacity(len as usize); + let stack_item = DeserializeStackItem::List { + items, + expected_len: len, + expected_type: list_type.cloned(), + }; + + stack.push(stack_item); + continue; + } else { + let finished_list = if let Some(list_type) = list_type { + Value::list_with_type( + &DESERIALIZATION_TYPE_CHECK_EPOCH, + vec![], + list_type.clone(), + ) + .map_err(|_| "Illegal list type")? + } else { + Value::cons_list_unsanitized(vec![]).map_err(|_| "Illegal list type")? + }; + + Ok(finished_list) } } + TypePrefix::Tuple => { + let mut len = [0; 4]; + r.read_exact(&mut len)?; + let len = u32::from_be_bytes(len); + let expected_len = u64::from(len); + + if len > MAX_VALUE_SIZE { + return Err(SerializationError::DeserializationError( + "Illegal tuple type".to_string(), + )); + } - let mut data = vec![0; u32::from(buffer_len) as usize]; - - r.read_exact(&mut data[..])?; + let tuple_type = match expected_type.as_ref() { + None => None, + Some(TypeSignature::TupleType(tuple_type)) => { + if sanitize { + if u64::from(len) < tuple_type.len() { + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap().clone(), + )); + } + } else { + if len as u64 != tuple_type.len() { + return Err(SerializationError::DeserializeExpected( + expected_type.unwrap().clone(), + )); + } + } + Some(tuple_type) + } + Some(x) => return Err(SerializationError::DeserializeExpected(x.clone())), + }; - Value::string_ascii_from_bytes(data).map_err(|_| "Bad string".into()) - } - TypePrefix::StringUTF8 => { - let mut total_len = [0; 4]; - r.read_exact(&mut total_len)?; - let total_len = BufferLength::try_from(u32::from_be_bytes(total_len))?; + if len > 0 { + let items = Vec::with_capacity(expected_len as usize); + let first_key = ClarityName::deserialize_read(r)?; + // figure out if the next (key, value) pair for this + // tuple will be elided (or sanitized) from the tuple. + // the logic here is that the next pair should be elided if: + // * `sanitize` parameter is true + // * `tuple_type` is some (i.e., there is an expected type for the + // tuple) + // * `tuple_type` does not contain an entry for `key` + let next_sanitize = sanitize + && tuple_type + .map(|tt| tt.field_type(&first_key).is_none()) + .unwrap_or(false); + let stack_item = DeserializeStackItem::Tuple { + items, + expected_len, + processed_entries: 0, + expected_type: tuple_type.cloned(), + next_name: first_key, + next_sanitize, + }; + + stack.push(stack_item); + continue; + } else { + let finished_tuple = if let Some(tuple_type) = tuple_type { + TupleData::from_data_typed( + &DESERIALIZATION_TYPE_CHECK_EPOCH, + vec![], + &tuple_type, + ) + .map_err(|_| "Illegal tuple type") + .map(Value::from)? + } else { + TupleData::from_data(vec![]) + .map_err(|_| "Illegal tuple type") + .map(Value::from)? + }; + Ok(finished_tuple) + } + } + TypePrefix::StringASCII => { + let mut buffer_len = [0; 4]; + r.read_exact(&mut buffer_len)?; + let buffer_len = BufferLength::try_from(u32::from_be_bytes(buffer_len))?; - let mut data: Vec = vec![0; u32::from(total_len) as usize]; + if let Some(x) = &expected_type { + let passed_test = match x { + TypeSignature::SequenceType(SequenceSubtype::StringType( + StringSubtype::ASCII(expected_len), + )) => u32::from(&buffer_len) <= u32::from(expected_len), + _ => false, + }; + if !passed_test { + return Err(SerializationError::DeserializeExpected(x.clone())); + } + } - r.read_exact(&mut data[..])?; + let mut data = vec![0; u32::from(buffer_len) as usize]; - let value = Value::string_utf8_from_bytes(data) - .map_err(|_| "Illegal string_utf8 type".into()); + r.read_exact(&mut data[..])?; - if let Some(x) = expected_type { - let passed_test = match (x, &value) { - ( - TypeSignature::SequenceType(SequenceSubtype::StringType( - StringSubtype::UTF8(expected_len), - )), - Ok(Value::Sequence(SequenceData::String(CharType::UTF8(utf8)))), - ) => utf8.data.len() as u32 <= u32::from(expected_len), - _ => false, - }; - if !passed_test { - return Err(SerializationError::DeserializeExpected(x.clone())); + Value::string_ascii_from_bytes(data).map_err(|_| "Bad string".into()) + } + TypePrefix::StringUTF8 => { + let mut total_len = [0; 4]; + r.read_exact(&mut total_len)?; + let total_len = BufferLength::try_from(u32::from_be_bytes(total_len))?; + + let mut data: Vec = vec![0; u32::from(total_len) as usize]; + + r.read_exact(&mut data[..])?; + + let value = Value::string_utf8_from_bytes(data) + .map_err(|_| "Illegal string_utf8 type".into()); + + if let Some(x) = &expected_type { + let passed_test = match (x, &value) { + ( + TypeSignature::SequenceType(SequenceSubtype::StringType( + StringSubtype::UTF8(expected_len), + )), + Ok(Value::Sequence(SequenceData::String(CharType::UTF8(utf8)))), + ) => utf8.data.len() as u32 <= u32::from(expected_len), + _ => false, + }; + if !passed_test { + return Err(SerializationError::DeserializeExpected(x.clone())); + } } + + value } + }?; - value + let mut finished_item = Some(item); + while let Some(item) = finished_item.take() { + let stack_bottom = if let Some(stack_item) = stack.pop() { + stack_item + } else { + // this should be unreachable! + warn!( + "Deserializer reached unexpected path: item processed, but deserializer stack does not expect another value"; + "item" => %item, + ); + return Err("Deserializer processed item, but deserializer stack does not expect another value".into()); + }; + match stack_bottom { + DeserializeStackItem::TopLevel { .. } => return Ok(item), + DeserializeStackItem::List { + mut items, + expected_len, + expected_type, + } => { + items.push(item); + if expected_len as usize <= items.len() { + // list is finished! + let finished_list = if let Some(list_type) = expected_type { + Value::list_with_type( + &DESERIALIZATION_TYPE_CHECK_EPOCH, + items, + list_type.clone(), + ) + .map_err(|_| "Illegal list type")? + } else { + Value::cons_list_unsanitized(items) + .map_err(|_| "Illegal list type")? + }; + + finished_item.replace(finished_list); + } else { + // list is not finished, reinsert on stack + stack.push(DeserializeStackItem::List { + items, + expected_len, + expected_type, + }); + } + } + DeserializeStackItem::Tuple { + mut items, + expected_len, + expected_type, + next_name, + next_sanitize, + mut processed_entries, + } => { + let push_entry = if sanitize { + if let Some(_) = expected_type.as_ref() { + // if performing tuple sanitization, don't include a field + // if it was sanitized + !next_sanitize + } else { + // always push the entry if there's no type expectation + true + } + } else { + true + }; + let tuple_entry = (next_name, item); + if push_entry { + items.push(tuple_entry); + } + processed_entries += 1; + if expected_len <= processed_entries { + // tuple is finished! + let finished_tuple = if let Some(tuple_type) = expected_type { + if items.len() != tuple_type.len() as usize { + return Err(SerializationError::DeserializeExpected( + TypeSignature::TupleType(tuple_type), + )); + } + TupleData::from_data_typed( + &DESERIALIZATION_TYPE_CHECK_EPOCH, + items, + &tuple_type, + ) + .map_err(|_| "Illegal tuple type") + .map(Value::from)? + } else { + TupleData::from_data(items) + .map_err(|_| "Illegal tuple type") + .map(Value::from)? + }; + + finished_item.replace(finished_tuple); + } else { + // tuple is not finished, read the next key name and reinsert on stack + let key = ClarityName::deserialize_read(r)?; + // figure out if the next (key, value) pair for this + // tuple will be elided (or sanitized) from the tuple. + // the logic here is that the next pair should be elided if: + // * `sanitize` parameter is true + // * `tuple_type` is some (i.e., there is an expected type for the + // tuple) + // * `tuple_type` does not contain an entry for `key` + let next_sanitize = sanitize + && expected_type + .as_ref() + .map(|tt| tt.field_type(&key).is_none()) + .unwrap_or(false); + stack.push(DeserializeStackItem::Tuple { + items, + expected_type, + expected_len, + next_name: key, + next_sanitize, + processed_entries, + }); + } + } + DeserializeStackItem::OptionSome { .. } => { + let finished_some = Value::some(item).map_err(|_x| "Value too large")?; + finished_item.replace(finished_some); + } + DeserializeStackItem::ResponseOk { .. } => { + let finished_some = Value::okay(item).map_err(|_x| "Value too large")?; + finished_item.replace(finished_some); + } + DeserializeStackItem::ResponseErr { .. } => { + let finished_some = Value::error(item).map_err(|_x| "Value too large")?; + finished_item.replace(finished_some); + } + }; } } + + Err(SerializationError::DeserializationError( + "Invalid data: stack ran out before finishing parsing".into(), + )) } pub fn serialize_write(&self, w: &mut W) -> std::io::Result<()> { @@ -790,8 +1086,9 @@ impl Value { pub fn try_deserialize_bytes( bytes: &Vec, expected: &TypeSignature, + sanitize: bool, ) -> Result { - Value::deserialize_read(&mut bytes.as_slice(), Some(expected)) + Value::deserialize_read(&mut bytes.as_slice(), Some(expected), sanitize) } /// This function attempts to deserialize a hex string into a Clarity Value. @@ -801,9 +1098,10 @@ impl Value { pub fn try_deserialize_hex( hex: &str, expected: &TypeSignature, + sanitize: bool, ) -> Result { let mut data = hex_bytes(hex).map_err(|_| "Bad hex string")?; - Value::try_deserialize_bytes(&mut data, expected) + Value::try_deserialize_bytes(&mut data, expected, sanitize) } /// This function attempts to deserialize a byte buffer into a @@ -817,10 +1115,11 @@ impl Value { pub fn try_deserialize_bytes_exact( bytes: &Vec, expected: &TypeSignature, + sanitize: bool, ) -> Result { let input_length = bytes.len(); let (value, read_count) = - Value::deserialize_read_count(&mut bytes.as_slice(), Some(expected))?; + Value::deserialize_read_count(&mut bytes.as_slice(), Some(expected), sanitize)?; if read_count != (input_length as u64) { Err(SerializationError::LeftoverBytesInDeserialization) } else { @@ -828,10 +1127,14 @@ impl Value { } } - pub fn try_deserialize_bytes_untyped(bytes: &Vec) -> Result { - Value::deserialize_read(&mut bytes.as_slice(), None) + /// Try to deserialize a value without type information. This *does not* perform sanitization + /// so it should not be used when decoding clarity database values. + fn try_deserialize_bytes_untyped(bytes: &Vec) -> Result { + Value::deserialize_read(&mut bytes.as_slice(), None, false) } + /// Try to deserialize a value from a hex string without type information. This *does not* + /// perform sanitization. pub fn try_deserialize_hex_untyped(hex: &str) -> Result { let hex = if hex.starts_with("0x") { &hex[2..] @@ -842,11 +1145,6 @@ impl Value { Value::try_deserialize_bytes_untyped(&mut data) } - pub fn deserialize(hex: &str, expected: &TypeSignature) -> Self { - Value::try_deserialize_hex(hex, expected) - .expect("ERROR: Failed to parse Clarity hex string") - } - pub fn serialized_size(&self) -> u32 { let mut counter = WriteCounter { count: 0 }; self.serialize_write(&mut counter) @@ -882,18 +1180,129 @@ impl Write for WriteCounter { } } -impl ClaritySerializable for Value { - fn serialize(&self) -> String { +impl Value { + pub fn serialize_to_vec(&self) -> Vec { let mut byte_serialization = Vec::new(); self.serialize_write(&mut byte_serialization) .expect("IOError filling byte buffer."); + byte_serialization + } + + /// This does *not* perform any data sanitization + pub fn serialize_to_hex(&self) -> String { + let byte_serialization = self.serialize_to_vec(); to_hex(byte_serialization.as_slice()) } -} -impl ClarityDeserializable for Value { - fn deserialize(hex: &str) -> Self { - Value::try_deserialize_hex_untyped(hex).expect("ERROR: Failed to parse Clarity hex string") + /// Sanitize `value` against pre-2.4 serialization + /// + /// Returns Some if the sanitization is successful, or was not necessary. + /// Returns None if the sanitization failed. + /// + /// Returns the sanitized value _and_ whether or not sanitization was required. + pub fn sanitize_value( + epoch: &StacksEpochId, + expected: &TypeSignature, + value: Value, + ) -> Option<(Value, bool)> { + // in epochs before 2.4, perform no sanitization + if !epoch.value_sanitizing() { + return Some((value, false)); + } + let (output, did_sanitize) = match value { + Value::Sequence(SequenceData::List(l)) => { + let lt = match expected { + TypeSignature::SequenceType(SequenceSubtype::ListType(lt)) => lt, + _ => return None, + }; + if l.len() > lt.get_max_len() { + return None; + } + let mut sanitized_items = vec![]; + let mut did_sanitize_children = false; + for item in l.data.into_iter() { + let (sanitized_item, did_sanitize) = + Self::sanitize_value(epoch, lt.get_list_item_type(), item)?; + sanitized_items.push(sanitized_item); + did_sanitize_children = did_sanitize_children || did_sanitize; + } + // do not sanitize list before construction here, because we're already sanitizing + let output_list = Value::cons_list_unsanitized(sanitized_items).ok()?; + (output_list, did_sanitize_children) + } + Value::Tuple(tuple_data) => { + let tt = match expected { + TypeSignature::TupleType(tt) => tt, + _ => return None, + }; + let mut sanitized_tuple_entries = vec![]; + let original_tuple_len = tuple_data.len(); + let mut tuple_data_map = tuple_data.data_map; + let mut did_sanitize_children = false; + for (key, expect_key_type) in tt.get_type_map().iter() { + let field_data = tuple_data_map.remove(key)?; + let (sanitized_field, did_sanitize) = + Self::sanitize_value(epoch, expect_key_type, field_data)?; + sanitized_tuple_entries.push((key.clone(), sanitized_field)); + did_sanitize_children = did_sanitize_children || did_sanitize; + } + if sanitized_tuple_entries.len() as u64 != tt.len() { + // this code should be unreachable, because I think any case that + // could trigger this would have returned None earlier + warn!("Sanitizer handled path that should have errored earlier, skipping sanitization"); + return None; + } + let did_sanitize_tuple = did_sanitize_children || (tt.len() != original_tuple_len); + ( + Value::Tuple(TupleData::from_data(sanitized_tuple_entries).ok()?), + did_sanitize_tuple, + ) + } + Value::Optional(opt_data) => { + let inner_type = match expected { + TypeSignature::OptionalType(inner_type) => inner_type, + _ => return None, + }; + let some_data = match opt_data.data { + Some(data) => *data, + None => return Some((Value::none(), false)), + }; + let (sanitized_data, did_sanitize_child) = + Self::sanitize_value(epoch, &inner_type, some_data)?; + (Value::some(sanitized_data).ok()?, did_sanitize_child) + } + Value::Response(response) => { + let rt = match expected { + TypeSignature::ResponseType(rt) => rt, + _ => return None, + }; + + let response_ok = response.committed; + let response_data = *response.data; + let inner_type = if response_ok { &rt.0 } else { &rt.1 }; + let (sanitized_inner, did_sanitize_child) = + Self::sanitize_value(epoch, &inner_type, response_data)?; + let sanitized_resp = if response_ok { + Value::okay(sanitized_inner) + } else { + Value::error(sanitized_inner) + }; + (sanitized_resp.ok()?, did_sanitize_child) + } + value => { + if expected.admits(epoch, &value).ok()? { + return Some((value, false)); + } else { + return None; + } + } + }; + + if expected.admits(epoch, &output).ok()? { + Some((output, did_sanitize)) + } else { + None + } } } @@ -919,13 +1328,15 @@ impl ClarityDeserializable for u32 { } } +/// Note: the StacksMessageCodec implementation for Clarity values *does not* +/// sanitize its serialization or deserialization. impl StacksMessageCodec for Value { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { self.serialize_write(fd).map_err(codec_error::WriteError) } fn consensus_deserialize(fd: &mut R) -> Result { - Value::deserialize_read(fd, None).map_err(|e| match e { + Value::deserialize_read(fd, None, false).map_err(|e| match e { SerializationError::IOError(e) => codec_error::ReadError(e.err), _ => codec_error::DeserializeError(format!("Failed to decode clarity value: {:?}", &e)), }) @@ -935,14 +1346,14 @@ impl StacksMessageCodec for Value { impl std::hash::Hash for Value { fn hash(&self, state: &mut H) { let mut s = vec![]; - self.consensus_serialize(&mut s) + self.serialize_write(&mut s) .expect("FATAL: failed to serialize to vec"); s.hash(state); } } #[cfg(test)] -mod tests { +pub mod tests { use std::io::Write; use rstest::rstest; @@ -951,22 +1362,12 @@ mod tests { use super::super::*; use super::SerializationError; - use crate::vm::database::{ClarityDeserializable, ClaritySerializable}; + use crate::vm::database::{ClarityDeserializable, ClaritySerializable, RollbackWrapper}; use crate::vm::errors::Error; + use crate::vm::tests::test_clarity_versions; use crate::vm::types::TypeSignature::{BoolType, IntType}; use crate::vm::ClarityVersion; - #[template] - #[rstest] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] - fn test_clarity_versions_serialization( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, - ) { - } - fn buff_type(size: u32) -> TypeSignature { TypeSignature::SequenceType(SequenceSubtype::BufferType(size.try_into().unwrap())).into() } @@ -974,16 +1375,17 @@ mod tests { fn test_deser_ser(v: Value) { assert_eq!( &v, - &Value::deserialize(&v.serialize(), &TypeSignature::type_of(&v)) + &Value::try_deserialize_hex(&v.serialize_to_hex(), &TypeSignature::type_of(&v), false) + .unwrap() ); assert_eq!( &v, - &Value::try_deserialize_hex_untyped(&v.serialize()).unwrap() + &Value::try_deserialize_hex_untyped(&v.serialize_to_hex()).unwrap() ); // test the serialized_size implementation assert_eq!( v.serialized_size(), - v.serialize().len() as u32 / 2, + v.serialize_to_hex().len() as u32 / 2, "serialized_size() should return the byte length of the serialization (half the length of the hex encoding)", ); } @@ -994,7 +1396,7 @@ mod tests { fn test_bad_expectation(v: Value, e: TypeSignature) { assert!( - match Value::try_deserialize_hex(&v.serialize(), &e).unwrap_err() { + match Value::try_deserialize_hex(&v.serialize_to_hex(), &e, false).unwrap_err() { SerializationError::DeserializeExpected(_) => true, _ => false, } @@ -1012,7 +1414,7 @@ mod tests { test_deser_u32_helper(134217728); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_lists(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let list_list_int = Value::list_from(vec![Value::list_from(vec![ Value::Int(1), @@ -1024,18 +1426,21 @@ mod tests { // Should be legal! Value::try_deserialize_hex( - &Value::list_from(vec![]).unwrap().serialize(), + &Value::list_from(vec![]).unwrap().serialize_to_hex(), &TypeSignature::from_string("(list 2 (list 3 int))", version, epoch), + false, ) .unwrap(); Value::try_deserialize_hex( - &list_list_int.serialize(), + &list_list_int.serialize_to_hex(), &TypeSignature::from_string("(list 2 (list 3 int))", version, epoch), + false, ) .unwrap(); Value::try_deserialize_hex( - &list_list_int.serialize(), + &list_list_int.serialize_to_hex(), &TypeSignature::from_string("(list 1 (list 4 int))", version, epoch), + false, ) .unwrap(); @@ -1071,7 +1476,7 @@ mod tests { .unwrap(); assert_eq!( - Value::deserialize_read(&mut too_big.as_slice(), None).unwrap_err(), + Value::deserialize_read(&mut too_big.as_slice(), None, false).unwrap_err(), "Illegal list type".into() ); @@ -1094,7 +1499,7 @@ mod tests { "Unexpected end of byte stream".into()); */ - match Value::deserialize_read(&mut eof.as_slice(), None) { + match Value::deserialize_read(&mut eof.as_slice(), None, false) { Ok(_) => assert!(false, "Accidentally parsed truncated slice"), Err(eres) => match eres { SerializationError::IOError(ioe) => match ioe.err.kind() { @@ -1136,7 +1541,7 @@ mod tests { test_bad_expectation(Value::UInt(1), TypeSignature::IntType); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_opts(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::none()); test_deser_ser(Value::some(Value::Int(15)).unwrap()); @@ -1150,7 +1555,7 @@ mod tests { ); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_resp(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::okay(Value::Int(15)).unwrap()); test_deser_ser(Value::error(Value::Int(15)).unwrap()); @@ -1167,7 +1572,7 @@ mod tests { ); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_buffs(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::buff_from(vec![0, 0, 0, 0]).unwrap()); test_deser_ser(Value::buff_from(vec![0xde, 0xad, 0xbe, 0xef]).unwrap()); @@ -1185,7 +1590,7 @@ mod tests { ); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_string_ascii(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::string_ascii_from_bytes(vec![61, 62, 63, 64]).unwrap()); @@ -1196,7 +1601,7 @@ mod tests { ); } - #[apply(test_clarity_versions_serialization)] + #[apply(test_clarity_versions)] fn test_string_utf8(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { test_deser_ser(Value::string_utf8_from_bytes(vec![61, 62, 63, 64]).unwrap()); test_deser_ser( @@ -1256,39 +1661,437 @@ mod tests { // t_0 and t_1 are actually the same assert_eq!( - Value::try_deserialize_hex(&t_1.serialize(), &TypeSignature::type_of(&t_0)).unwrap(), - Value::try_deserialize_hex(&t_0.serialize(), &TypeSignature::type_of(&t_0)).unwrap() + Value::try_deserialize_hex( + &t_1.serialize_to_hex(), + &TypeSignature::type_of(&t_0), + false + ) + .unwrap(), + Value::try_deserialize_hex( + &t_0.serialize_to_hex(), + &TypeSignature::type_of(&t_0), + false + ) + .unwrap() ); // field number not equal to expectations - assert!( - match Value::try_deserialize_hex(&t_3.serialize(), &TypeSignature::type_of(&t_1)) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - } - ); + assert!(match Value::try_deserialize_hex( + &t_3.serialize_to_hex(), + &TypeSignature::type_of(&t_1), + false + ) + .unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + }); // field type mismatch - assert!( - match Value::try_deserialize_hex(&t_2.serialize(), &TypeSignature::type_of(&t_1)) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - } - ); + assert!(match Value::try_deserialize_hex( + &t_2.serialize_to_hex(), + &TypeSignature::type_of(&t_1), + false + ) + .unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + }); // field not-present in expected - assert!( - match Value::try_deserialize_hex(&t_1.serialize(), &TypeSignature::type_of(&t_4)) - .unwrap_err() - { - SerializationError::DeserializeExpected(_) => true, - _ => false, - } + assert!(match Value::try_deserialize_hex( + &t_1.serialize_to_hex(), + &TypeSignature::type_of(&t_4), + false + ) + .unwrap_err() + { + SerializationError::DeserializeExpected(_) => true, + _ => false, + }); + } + + #[apply(test_clarity_versions)] + fn test_sanitization(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { + let v_1 = Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ("c".into(), Value::Int(3)), + ]) + .unwrap() + .into(), + ]) + .unwrap(); + let v_1_good = Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::Int(4))]) + .unwrap() + .into(), + ]) + .unwrap(); + + let t_1_good = TypeSignature::from_string("(list 5 (tuple (b int)))", version, epoch); + let t_1_bad_0 = + TypeSignature::from_string("(list 5 (tuple (b int) (a int)))", version, epoch); + let t_1_bad_1 = TypeSignature::from_string("(list 5 (tuple (b uint)))", version, epoch); + + let v_2 = TupleData::from_data(vec![ + ( + "list-1".into(), + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ("c".into(), Value::Int(3)), + ]) + .unwrap() + .into(), + ]) + .unwrap(), + ), + ( + "list-2".into(), + Value::list_from(vec![ + TupleData::from_data(vec![("c".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ("c".into(), Value::Int(3)), + ]) + .unwrap() + .into(), + ]) + .unwrap(), + ), + ]) + .unwrap() + .into(); + + let v_2_good = TupleData::from_data(vec![ + ( + "list-1".into(), + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::Int(4))]) + .unwrap() + .into(), + ]) + .unwrap(), + ), + ( + "list-2".into(), + Value::list_from(vec![ + TupleData::from_data(vec![("c".into(), Value::Int(2))]) + .unwrap() + .into(), + TupleData::from_data(vec![("c".into(), Value::Int(3))]) + .unwrap() + .into(), + ]) + .unwrap(), + ), + ]) + .unwrap() + .into(); + + let t_2_good = TypeSignature::from_string( + "(tuple (list-2 (list 2 (tuple (c int)))) (list-1 (list 5 (tuple (b int)))))", + version, + epoch, + ); + let t_2_bad_0 = TypeSignature::from_string( + "(tuple (list-2 (list 2 (tuple (c int)))) (list-1 (list 5 (tuple (a int)))))", + version, + epoch, ); + let t_2_bad_1 = TypeSignature::from_string( + "(tuple (list-2 (list 1 (tuple (c int)))) (list-1 (list 5 (tuple (b int)))))", + version, + epoch, + ); + + let v_3 = Value::some( + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ("c".into(), Value::Int(3)), + ]) + .unwrap() + .into(), + ) + .unwrap(); + + let v_3_good = Value::some( + TupleData::from_data(vec![ + ("a".into(), Value::Int(1)), + ("b".into(), Value::Int(4)), + ]) + .unwrap() + .into(), + ) + .unwrap(); + + let t_3_good = + TypeSignature::from_string("(optional (tuple (a int) (b int)))", version, epoch); + let t_3_bad_0 = + TypeSignature::from_string("(optional (tuple (a uint) (b int)))", version, epoch); + let t_3_bad_1 = + TypeSignature::from_string("(optional (tuple (d int) (b int)))", version, epoch); + + let v_4 = Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::some(Value::Int(1)).unwrap()), + ("b".into(), Value::none()), + ("c".into(), Value::some(Value::Int(3)).unwrap()), + ]) + .unwrap() + .into(), + ]) + .unwrap(); + let v_4_good = Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::none())]) + .unwrap() + .into(), + ]) + .unwrap(); + + let t_4_good = + TypeSignature::from_string("(list 5 (tuple (b (optional int))))", version, epoch); + let t_4_bad_0 = TypeSignature::from_string( + "(list 5 (tuple (b (optional int)) (a (optional int))))", + version, + epoch, + ); + let t_4_bad_1 = + TypeSignature::from_string("(list 5 (tuple (b (optional uint))))", version, epoch); + + let v_5 = Value::okay( + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::some(Value::Int(1)).unwrap()), + ("b".into(), Value::none()), + ("c".into(), Value::some(Value::Int(3)).unwrap()), + ]) + .unwrap() + .into(), + ]) + .unwrap(), + ) + .unwrap(); + let v_5_good = Value::okay( + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::none())]) + .unwrap() + .into(), + ]) + .unwrap(), + ) + .unwrap(); + + let t_5_good_0 = TypeSignature::from_string( + "(response (list 5 (tuple (b (optional int)))) int)", + version, + epoch, + ); + let t_5_good_1 = TypeSignature::from_string( + "(response (list 2 (tuple (b (optional int)))) int)", + version, + epoch, + ); + let t_5_good_2 = TypeSignature::from_string( + "(response (list 2 (tuple (b (optional int)))) bool)", + version, + epoch, + ); + let t_5_bad_0 = TypeSignature::from_string( + "(response (list 5 (tuple (b (optional int)) (a (optional int)))) uint)", + version, + epoch, + ); + let t_5_bad_1 = TypeSignature::from_string( + "(response (list 5 (tuple (b (optional uint)))) int)", + version, + epoch, + ); + let t_5_bad_2 = TypeSignature::from_string( + "(response int (list 5 (tuple (b (optional int)))))", + version, + epoch, + ); + let t_5_bad_3 = TypeSignature::from_string( + "(list 5 (tuple (b (optional int)) (a (optional int))))", + version, + epoch, + ); + + let v_6 = Value::error( + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![ + ("a".into(), Value::some(Value::Int(1)).unwrap()), + ("b".into(), Value::none()), + ("c".into(), Value::some(Value::Int(3)).unwrap()), + ]) + .unwrap() + .into(), + ]) + .unwrap(), + ) + .unwrap(); + let v_6_good = Value::error( + Value::list_from(vec![ + TupleData::from_data(vec![("b".into(), Value::some(Value::Int(2)).unwrap())]) + .unwrap() + .into(), + TupleData::from_data(vec![("b".into(), Value::none())]) + .unwrap() + .into(), + ]) + .unwrap(), + ) + .unwrap(); + + let t_6_good_0 = TypeSignature::from_string( + "(response int (list 5 (tuple (b (optional int)))))", + version, + epoch, + ); + let t_6_good_1 = TypeSignature::from_string( + "(response int (list 2 (tuple (b (optional int)))))", + version, + epoch, + ); + let t_6_good_2 = TypeSignature::from_string( + "(response bool (list 2 (tuple (b (optional int)))))", + version, + epoch, + ); + let t_6_bad_0 = TypeSignature::from_string( + "(response uint (list 5 (tuple (b (optional int)) (a (optional int)))))", + version, + epoch, + ); + let t_6_bad_1 = TypeSignature::from_string( + "(response int (list 5 (tuple (b (optional uint)))))", + version, + epoch, + ); + let t_6_bad_2 = TypeSignature::from_string( + "(response (list 5 (tuple (b (optional int)))) int)", + version, + epoch, + ); + let t_6_bad_3 = TypeSignature::from_string( + "(list 5 (tuple (b (optional int)) (a (optional int))))", + version, + epoch, + ); + + let test_cases = [ + (v_1, v_1_good, t_1_good, vec![t_1_bad_0, t_1_bad_1]), + (v_2, v_2_good, t_2_good, vec![t_2_bad_0, t_2_bad_1]), + (v_3, v_3_good, t_3_good, vec![t_3_bad_0, t_3_bad_1]), + (v_4, v_4_good, t_4_good, vec![t_4_bad_0, t_4_bad_1]), + ( + v_5.clone(), + v_5_good.clone(), + t_5_good_0, + vec![t_5_bad_0, t_5_bad_1, t_5_bad_2, t_5_bad_3], + ), + (v_5.clone(), v_5_good.clone(), t_5_good_1, vec![]), + (v_5, v_5_good, t_5_good_2, vec![]), + ( + v_6.clone(), + v_6_good.clone(), + t_6_good_0, + vec![t_6_bad_0, t_6_bad_1, t_6_bad_2, t_6_bad_3], + ), + (v_6.clone(), v_6_good.clone(), t_6_good_1, vec![]), + (v_6, v_6_good, t_6_good_2, vec![]), + ]; + + for (input_val, expected_out, good_type, bad_types) in test_cases.iter() { + eprintln!( + "Testing {}. Expected sanitization = {}", + input_val, expected_out + ); + let serialized = input_val.serialize_to_hex(); + + let result = + RollbackWrapper::deserialize_value(&serialized, good_type, &epoch).map(|x| x.value); + if epoch < StacksEpochId::Epoch24 { + let error = result.unwrap_err(); + match error { + SerializationError::DeserializeExpected(_) => {} + _ => panic!("Expected a DeserializeExpected error"), + } + } else { + let value = result.unwrap(); + assert_eq!(&value, expected_out); + } + + for bad_type in bad_types.iter() { + eprintln!("Testing bad type: {}", bad_type); + let result = RollbackWrapper::deserialize_value(&serialized, bad_type, &epoch); + let error = result.unwrap_err(); + match error { + SerializationError::DeserializeExpected(_) => {} + e => panic!("Expected a DeserializeExpected error, got = {}", e), + } + } + + // now test the value::sanitize routine + let result = Value::sanitize_value(&epoch, good_type, input_val.clone()); + if epoch < StacksEpochId::Epoch24 { + let (value, did_sanitize) = result.unwrap(); + assert_eq!(&value, input_val); + assert!(!did_sanitize, "Should not sanitize before epoch-2.4"); + } else { + let (value, did_sanitize) = result.unwrap(); + assert_eq!(&value, expected_out); + assert!(did_sanitize, "Should have sanitized"); + } + + for bad_type in bad_types.iter() { + eprintln!("Testing bad type: {}", bad_type); + let result = Value::sanitize_value(&epoch, bad_type, input_val.clone()); + if epoch < StacksEpochId::Epoch24 { + let (value, did_sanitize) = result.unwrap(); + assert_eq!(&value, input_val); + assert!(!did_sanitize, "Should not sanitize before epoch-2.4"); + } else { + assert!(result.is_none()); + } + } + } } #[test] @@ -1328,7 +2131,7 @@ mod tests { for (test, expected) in tests.iter() { if let Ok(x) = expected { - assert_eq!(test, &x.serialize()); + assert_eq!(test, &x.serialize_to_hex()); } assert_eq!(expected, &Value::try_deserialize_hex_untyped(test)); assert_eq!( diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 454d3ae5e6..42d6aad481 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -531,7 +531,10 @@ impl TypeSignature { pub fn admits_type(&self, epoch: &StacksEpochId, other: &TypeSignature) -> Result { match epoch { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => self.admits_type_v2_0(&other), - StacksEpochId::Epoch21 => self.admits_type_v2_1(other), + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => self.admits_type_v2_1(other), StacksEpochId::Epoch10 => unreachable!("epoch 1.0 not supported"), } } @@ -724,8 +727,13 @@ impl TypeSignature { /// types for the specified epoch. pub fn canonicalize(&self, epoch: &StacksEpochId) -> TypeSignature { match epoch { - StacksEpochId::Epoch21 => self.canonicalize_v2_1(), - _ => self.clone(), + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + // Epoch-2.2 had a regression in canonicalization, so it must be preserved here. + | StacksEpochId::Epoch22 => self.clone(), + // Note for future epochs: Epochs >= 2.3 should use the canonicalize_v2_1() routine + StacksEpochId::Epoch21 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 => self.canonicalize_v2_1(), } } @@ -843,6 +851,7 @@ impl TryFrom> for TupleTypeSignature { } impl TupleTypeSignature { + /// Return the number of fields in this tuple type pub fn len(&self) -> u64 { self.type_map.len() as u64 } @@ -1047,7 +1056,10 @@ impl TypeSignature { ) -> Result { match epoch { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => Self::least_supertype_v2_0(a, b), - StacksEpochId::Epoch21 => Self::least_supertype_v2_1(a, b), + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => Self::least_supertype_v2_1(a, b), StacksEpochId::Epoch10 => unreachable!("Clarity 1.0 is not supported"), } } @@ -1066,8 +1078,9 @@ impl TypeSignature { let entry_out = Self::least_supertype_v2_0(entry_a, entry_b)?; type_map_out.insert(name.clone(), entry_out); } - Ok(TupleTypeSignature::try_from(type_map_out).map(|x| x.into()) - .expect("ERR: least_supertype_v2_0 attempted to construct a too-large supertype of two types")) + Ok(TupleTypeSignature::try_from(type_map_out) + .map(|x| x.into()) + .map_err(|_| CheckErrors::SupertypeTooLarge)?) } ( SequenceType(SequenceSubtype::ListType(ListTypeData { @@ -1088,7 +1101,7 @@ impl TypeSignature { }; let max_len = cmp::max(len_a, len_b); Ok(Self::list_of(entry_type, *max_len) - .expect("ERR: least_supertype_v2_0 attempted to construct a too-large supertype of two types")) + .map_err(|_| CheckErrors::SupertypeTooLarge)?) } (ResponseType(resp_a), ResponseType(resp_b)) => { let ok_type = @@ -1167,8 +1180,9 @@ impl TypeSignature { let entry_out = Self::least_supertype_v2_1(entry_a, entry_b)?; type_map_out.insert(name.clone(), entry_out); } - Ok(TupleTypeSignature::try_from(type_map_out).map(|x| x.into()) - .expect("ERR: least_supertype_v2_1 attempted to construct a too-large supertype of two types")) + Ok(TupleTypeSignature::try_from(type_map_out) + .map(|x| x.into()) + .map_err(|_| CheckErrors::SupertypeTooLarge)?) } ( SequenceType(SequenceSubtype::ListType(ListTypeData { @@ -1189,7 +1203,7 @@ impl TypeSignature { }; let max_len = cmp::max(len_a, len_b); Ok(Self::list_of(entry_type, *max_len) - .expect("ERR: least_supertype_v2_1 attempted to construct a too-large supertype of two types")) + .map_err(|_| CheckErrors::SupertypeTooLarge)?) } (ResponseType(resp_a), ResponseType(resp_b)) => { let ok_type = @@ -1930,16 +1944,7 @@ mod test { use super::*; use crate::vm::{execute, ClarityVersion}; - #[template] - #[rstest] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] - #[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] - #[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] - fn test_clarity_versions_signatures( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, - ) { - } + use crate::vm::tests::test_clarity_versions; fn fail_parse(val: &str, version: ClarityVersion, epoch: StacksEpochId) -> CheckErrors { use crate::vm::ast::parse; @@ -1953,14 +1958,14 @@ mod test { TypeSignature::parse_type_repr(epoch, expr, &mut ()).unwrap_err() } - #[apply(test_clarity_versions_signatures)] + #[apply(test_clarity_versions)] fn type_of_list_of_buffs(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let value = execute("(list \"abc\" \"abcde\")").unwrap().unwrap(); let type_descr = TypeSignature::from_string("(list 2 (string-ascii 5))", version, epoch); assert_eq!(TypeSignature::type_of(&value), type_descr); } - #[apply(test_clarity_versions_signatures)] + #[apply(test_clarity_versions)] fn type_signature_way_too_big(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { // first_tuple.type_size ~= 131 // second_tuple.type_size = k * (130+130) @@ -1981,7 +1986,7 @@ mod test { ); } - #[apply(test_clarity_versions_signatures)] + #[apply(test_clarity_versions)] fn test_construction(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let bad_type_descriptions = [ ("(tuple)", EmptyTuplesNotAllowed), diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 61cdaa0447..7da1f744f6 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -33,6 +33,9 @@ impl ClarityVersion { StacksEpochId::Epoch20 => ClarityVersion::Clarity1, StacksEpochId::Epoch2_05 => ClarityVersion::Clarity1, StacksEpochId::Epoch21 => ClarityVersion::Clarity2, + StacksEpochId::Epoch22 => ClarityVersion::Clarity2, + StacksEpochId::Epoch23 => ClarityVersion::Clarity2, + StacksEpochId::Epoch24 => ClarityVersion::Clarity2, } } } diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md deleted file mode 100644 index fe880fd03f..0000000000 --- a/docs/CONTRIBUTING.md +++ /dev/null @@ -1,402 +0,0 @@ -# Contributing to Blockstack Core - -Blockstack Core is open-source software written in Rust. Contributions -should adhere to the following best practices. - -You can find information on joining online community forums (Discord, mailing list etc.) in the [README](README.md). - -#### Table Of Contents - -[Code of Conduct](#code-of-conduct) - -[How Can I Contribute?](#how-can-i-contribute) -* [Development Workflow](#development-workflow) -* [Contributing Conventions](#contributing-conventions) - -[Style](#style) -* [Git Commit Messages](#git-commit-messages) -* [Rust Styleguide](#rust-styleguide) -* [Comments](#comments) - -[License Agreement](#licensing-and-contributor-license-agreement) - -# Code of Conduct - -This project and everyone participating in it is governed by this [Code of Conduct](CODE_OF_CONDUCT.md). - -# How Can I Contribute? -## Development Workflow - -- For typical development, branch off of the `develop` branch. -- For consensus breaking changes, branch off of the `next` branch. -- For hotfixes, branch off of `master`. - -For up-to-date information on development norms and best practices, refer to [this document](https://github.com/stacks-network/stacks-blockchain/wiki/Development-Process:-Norms-and-Best-Practices). - -### Documentation Updates - -- Any major changes should be added to the [CHANGELOG](CHANGELOG.md). -- Mention any required documentation changes in the description of your pull request. -- If adding an RPC endpoint, add an entry for the new endpoint to the OpenAPI spec `./docs/rpc/openapi.yaml`. -- If your code adds or modifies any major features (struct, trait, test, module, function, etc.), each should be documented according to our [style rules](#comments). -- To generate HTML documentation for the library, run `cargo doc --no-deps --open`. -- It's possible to check the percentage of code coverage by (a) switching to the nightly version of rust (can run `rustup default nightly`, and also might need to edit `rust-toolchain` file to say "nightly" instead of "stable"), and (b) running `RUSTDOCFLAGS='-Z unstable-options --show-coverage' cargo doc`. - -### Each file should include relevant unit tests - -Each Rust file should contain a `mod test {}` definition, in which unit tests -should be supplied for the file's methods. Unit tests should cover a maximal -amount of code paths. - -### GitHub Workflows and Actions -We run our CI pipeline using GitHub workflows. The main workflows are CI (at `.github/workflows/ci.yml`), -and stacks-bitcoin-integration-tests (at `.github/workflows/bitcoin-tests.yml`). These -workflows can be manually triggered on the Actions tab in the GitHub UI on any branch. - -### Guidance for Slow/Non-Parallelizable Tests -PRs must include test coverage. However, if your PR includes large tests or tests which cannot run in parallel -(which is the default operation of the `cargo test` command), these tests should be decorated with `#[ignore]`. -If you add `#[ignore]` tests, you should add your branch to the filters for the `all_tests` job in our circle.yml -(or if you are working on net code or marf code, your branch should be named such that it matches the existing -filters there). - -A test should be marked `#[ignore]` if: - -1. It does not _always_ pass `cargo test` in a vanilla environment (i.e., it does not need to run with `--test-threads 1`). -2. Or, it runs for over a minute via a normal `cargo test` execution (the `cargo test` command will warn if this is not the case). - - -## Contributing Conventions - -### Simplicity of implementation - -The most important consideration when accepting or rejecting a contribution is -the simplicity (i.e. ease of understanding) of its implementation. -Contributions that are "clever" or introduce functionality beyond the scope of -the immediate problem they are meant to solve will be rejected. - -#### Type simplicity - -Simplicity of implementation includes simplicity of types. Type parameters -and associated types should only be used if there are at -least two possible implementations of those types. - -Lifetime parameters should only be introduced if the compiler cannot deduce them -on its own. - -### Builds with a stable Rust compiler -We use a recent, stable Rust compiler. Contributions should _not_ -require nightly Rust features to build and run. - -### Use built-in logging facilities - -Blockstack Core implements logging macros in `util::log`. If your code needs to -output data, it should use these macros _exclusively_ for doing so. The only -exception is code that is explicitly user-facing, such as help documentation. - -### Minimal dependencies - -Adding new package dependencies is very much discouraged. Exceptions will be -granted on a case-by-case basis, and only if deemed absolutely necessary. - -### Minimal global macros - -Adding new global macros is discouraged. Exceptions will only be given if -absolutely necessary. - -### Minimal compiler warnings - -Contributions should not trigger compiler warnings if possible, and should not -mask compiler warnings with macros. Common sources of compiler warnings that -will not be accepted include, but are not limited to: - -* unnecessary imports -* unused code -* variable naming conventions -* unhandled return types - -### Minimal `unsafe` code - -Contributions should not contain `unsafe` blocks if at all possible. - -### Error definitions - -Each module should include an `Error` enumeration in its `mod.rs` that encodes -errors specific to the module. All error code paths in the module should return -an `Err` type with one of the module's errors. - -# Style -## Git Commit Messages -Aim to use descriptive git commit messages. We try to follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/). -The general format is as follows: -``` -[optional scope]: - -[optional body] -[optional footer(s)] -``` -Common types include build, ci, docs, fix, feat, test, refactor, etc. - -## Rust styleguide - -### Rust formatting tools -This repository uses the default rustfmt formatting style. PRs will be checked against `rustfmt` and will _fail_ if not -properly formatted. - -You can check the formatting locally via: - -```bash -cargo fmt --all -- --check --config group_imports=StdExternalCrate -``` - -You can automatically reformat your commit via: - -```bash -cargo fmt --all -- --config group_imports=StdExternalCrate -``` - -### Import order -Code files should have a maximum of three import groups: -1. A group with std/alloc/core imports -2. A group with external crate imports -3. A group for internal imports: self, super, and crate. - -This grouping and ordering is enforced by the rustfmt flag, `group_imports`. - -### Code block consistency - -Surrounding code blocks with `{` and `}` is encouraged, even when the enclosed -block is a single statement. Blocks in the same lexical scope must use -consistent conventions. For example, consider the following: - -``` -match foo { -1..2 => { -// this is a single statement, but it is surrounded -// with { and } because the other blocks in the match -// statement need them. -Ok(true) -}, -3..4 => { -error!("Bad value for foo"); -Err(Error::BadFoo) -}, -_ => { -// similarly, this block uses { } -Ok(true) -} -} - -// conversely, all of the arms of this match statement -// have one-statement blocks, so { and } can be elided. -match bar { -1..2 => Some("abc"), -3..4 => Some("def"), -_ => None -} -``` - -### Whitespace - -All contributions should use the same whitespacing as the rest of the project. -Moreover, Pull requests where a large number of changes only deal with whitespace will be -rejected. - -## Comments -Comments are very important for the readability and correctness of the codebase. The purpose of comments is: - -* Allow readers to understand the roles of components and functions without having to check how they are used. -* Allow readers to check the correctness of the code against the comments. -* Allow readers to follow tests. - -In the limit, if there are no comments, the problems that arise are: - -* Understanding one part of the code requires understanding *many* parts of the code. This is because the reader is forced to learn the meanings of constructs inductively through their use. Learning how one construct is used requires understanding its neighbors, and then their neighbors, and so on, recursively. Instead, with a good comment, the reader can understand the role of a construct with `O(1)` work by reading the comment. -* The user cannot be certain if there is a bug in the code, because there is no distinction between the contract of a function, and its definition. -* The user cannot be sure if a test is correct, because the logic of the test is not specified, and the functions do not have contracts. - -### Comment Formatting - -Comments are to be formatted in typical `rust` style, specifically: - -- Use markdown to format comments. - -- Use the triple forward slash "///" for modules, structs, enums, traits and functions. Use double forward slash "//" for comments on individual lines of code. - -- Start with a high-level description of the function, adding more sentences with details if necessary. - -- When documenting panics, errors, or other conceptual sections, introduce a Markdown section with a single `#`, e.g.: - - - ``` - # Errors - * ContractTooLargeError: Thrown when `contract` is larger than `MAX_CONTRACT_SIZE`. - ``` - -### Content of Comments -The following kinds of things should have comments. - -#### Components -Comments for a component (`struct`, `trait`, or `enum`) should explain what the overall -purpose of that component is. This is usually a concept, and not a formal contract. Include anything that is not obvious about this component. - -**Example:** -```rust - /// The `ReadOnlyChecker` analyzes a contract to determine whether - /// there are any violations of read-only declarations. By a "violation" - /// we mean a function that is marked as "read only" but which tries - /// to modify chainstate. - pub struct ReadOnlyChecker<'a, 'b> { -``` - -This comment is considered positive because it explains the concept behind the class at a glance, so that the reader has some idea about what the methods will achieve, without reading each method declaration and comment. It also defines some terms that can be used in the comments on the method names. - -#### Functions - -The comments on a function should explain what the function does, without having to read it. Wherever practical, it should specify the contract of a function, such that a bug in the logic could be discovered by a discrepancy between contract and implementation, or such that a test could be written with only access to the function comment. - -Without being unnecessarily verbose, explain how the output is calculated -from the inputs. Explain the side effects. Explain any restrictions on the inputs. Explain failure -conditions, including when the function will panic, return an error -or return an empty value. - -**Example:** - -```rust -/// A contract that does not violate its read-only declarations is called -/// *read-only correct*. -impl<'a, 'b> ReadOnlyChecker<'a, 'b> { -/// Checks each top-level expression in `contract_analysis.expressions` -/// for read-only correctness. -/// -/// Returns successfully iff this function is read-only correct. -/// -/// # Errors -/// -/// - Returns CheckErrors::WriteAttemptedInReadOnly if there is a read-only -/// violation, i.e. if some function marked read-only attempts to modify -/// the chainstate. -pub fn run(&mut self, contract_analysis: &ContractAnalysis) -> CheckResult<()> -``` - -This comment is considered positive because it explains the contract of the function in pseudo-code. Someone who understands the constructs mentioned could, e.g., write a test for this method from this description. - -#### Comments on Implementations of Virtual Methods - -Note that, if a function implements a virtual function on an interface, the comments should not -repeat what was specified on the interface declaration. The comment should only add information specific to that implementation. - -### Data Members -Each data member in a struct should have a comment describing what that member -is, and what it is used for. Such comments are usually brief but should -clear up any ambiguity that might result from having only the variable -name and type. - -**Example:** - -```rust -pub struct ReadOnlyChecker<'a, 'b> { -/// Mapping from function name to a boolean indicating whether -/// the function with that name is read-only. -/// This map contains all functions in the contract analyzed. -defined_functions: HashMap, -``` - -This comment is considered positive because it clarifies users might have about the content and role of this member. E.g., it explains that the `bool` indicates whether the function is *read-only*, whereas this cannot be gotten from the signature alone. - -#### Tests - -Each test should have enough comments to help an unfamiliar reader understand: - -1. what is conceptually being tested -1. why a given answer is expected - -Sometimes this can be obvious without much comments, perhaps from the context, -or because the test is very simple. Often though, comments are necessary. - -**Example:** - -```rust -#[test] -#[ignore] -fn transaction_validation_integration_test() { -/// The purpose of this test is to check if the mempool admission checks -/// for the post tx endpoint are working as expected wrt the optional -/// `mempool_admission_check` query parameter. -/// -/// In this test, we are manually creating a microblock as well as -/// reloading the unconfirmed state of the chainstate, instead of relying -/// on `next_block_and_wait` to generate microblocks. We do this because -/// the unconfirmed state is not automatically being initialized -/// on the node, so attempting to validate any transactions against the -/// expected unconfirmed state fails. -``` - -This comment is considered positive because it explains the purpose of the test (checking the case of an optional parameter), it also guides the reader to understand the low-level details about why a microblock is created manually. - -### How Much to Comment - -Contributors should strike a balance between commenting "too much" and commenting "too little". Commenting "too much" primarily includes commenting things that are clear from the context. Commenting "too little" primarily includes writing no comments at all, or writing comments that leave important questions unresolved. - -Human judgment and creativity must be used to create good comments, which convey important information with small amounts of text. There is no single rule which can determine what a good comment is. Longer comments are *not* always better, since needlessly long comments have a cost: they require the reader to read more, take up whitespace, and take longer to write and review. - -#### Don't Restate the Function Names - -The contracts of functions should be implemented precisely enough that tests could be written looking only at the declaration and the comments (and without looking at the definition!). However: - -* **the author should assume that the reader has already read and understood the function name, variable names, type names, etc.** -* **the author should only state information that is new** - -So, if a function and its variables have very descriptive names, then there may be nothing to add in the comments at all! - -**Bad Example** - -``` -/// Appends a transaction to a block. -fn append_transaction_to_block(transaction:Transaction, &mut Block) -> Result<()> -``` - -This is considered bad because the function name already says "append transaction to block", so it doesn't add anything to restate it in the comments. However, *do* add anything that is not redundant, such as elaborating what it means to "append" (if there is more to say), or what conditions will lead to an error. - -**Good Example** - -``` -/// # Errors -/// -/// - BlockTooBigError: Is returned if adding `transaction` to `block` results -/// in a block size bigger than MAX_BLOCK_SIZE. -fn append_transaction_to_block(transaction:Transaction, block:&mut Block) -> Result<()> -``` - -This is considered good because the reader builds on the context created by the function and variable names. Rather than restating them, the function just adds elements of the contract that are not implicit in the declaration. - -#### Do's and Dont's - -*Don't* over-comment by documenting things that are clear from the context. E.g.: - -- Don't document the types of inputs or outputs, since these are parts of the type signature in `rust`. -- Don't necessarily document standard "getters" and "setters", like `get_clarity_version()`, unless there is unexpected information to add with the comment. -- Don't explain that a specific test does type-checking, if it is in a file that is dedicated to type-checking. - -*Do* document things that are not clear, e.g.: - -- For a function called `process_block`, explain what it means to "process" a block. -- For a function called `process_block`, make clear whether we mean anchored blocks, microblocks, or both. -- For a function called `run`, explain the steps involved in "running". -- For a function that takes arguments `peer1` and `peer2`, explain the difference between the two. -- For a function that takes an argument `height`, either explain in the comment what this is the *height of*. Alternatively, expand the variable name to remove the ambiguity. -- For a test, document what it is meant to test, and why the expected answers are, in fact, expected. - -### Changing Code Instead of Comments - -Keep in mind that better variable names can reduce the need for comments, e.g.: - -* `burnblock_height` instead of `height` may eliminate the need to comment that `height` refers to a burnblock height -* `process_microblocks` instead of `process_blocks` is more correct, and may eliminate the need to to explain that the inputs are microblocks -* `add_transaction_to_microblock` explains more than `handle_transaction`, and reduces the need to even read the comment - -# Licensing and contributor license agreement - -Blockstack Core is released under the terms of the GPL version 3. Contributions -that are not licensed under compatible terms will be rejected. Moreover, -contributions will not be accepted unless _all_ authors accept the project's -contributor license agreement. \ No newline at end of file diff --git a/docs/ci-release.md b/docs/ci-release.md new file mode 100644 index 0000000000..7025226d1e --- /dev/null +++ b/docs/ci-release.md @@ -0,0 +1,150 @@ +# Releases + +All releases are built via a Github Actions workflow named `CI`, and is responsible for building binary archives, checksums, and resulting docker images. +This workflow will also trigger any tests that need to be run, like integration tests. + +1. Releases are only created if a tag is manually provided when the ci workflow is triggered. +2. Pushing a new feature branch: Nothing is triggered automatically. PR's are required, or the ci workflow can be triggered manually on a specific branch to build a docker image for the specified branch. + +The following workflow steps are currently disabled: + +- Clippy +- Net-test +- Crate audit + +## TL;DR + +1. A PR will produce a single image built from source on Debian with glibc with 2 tags: + - `stacks-blockchain:` + - `stacks-blockchain:` +2. A merged PR from `develop` to the default branch will produce a single image built from source on Debian with glibc: + - `stacks-blockchain:` +3. An untagged build of any branch will produce a single image built from source on Debian with glibc: + - `stacks-blockchain:` +4. A tagged release on a non-default branch will produce 2 versions of the docker image (along with all binary archives): + - An Alpine image for several architectures tagged with: + - `stacks-blockchain:` + - An Debian image for several architectures tagged with: + - `stacks-blockchain:` +5. A tagged release on the default branch will produce 2 versions of the docker image (along with all binary archives): + - An Alpine image for several architectures tagged with: + - `stacks-blockchain:` + - `stacks-blockchain:` + - An Debian image for several architectures tagged with: + - `stacks-blockchain:` + - `stacks-blockchain:` + +## Release workflow: + +1. Create a feature branch: `feat/112-fix-something` +2. PR `feat/112-fix-something` to the `develop` branch + 1. CI Workflow is automatically triggered, resulting in a pushed docker image tagged with the **branch name** and **PR number** +3. PR `develop` to the default branch + 1. CI Workflow is automatically triggered, resulting in a pushed docker image tagged with the **branch name** and **PR number** +4. Merge `develop` branch to the default branch + 1. CI Workflow is triggered, resulting in a pushed docker image tagged with the **default branch name** +5. CI workflow is manually triggered on **non-default branch** with a version, i.e. `2.1.0.0.0-rc0` + 1. Github release for the manually input version is created with binaries + 2. Docker image pushed with tags of the **input version** and **branch** +6. CI workflow is manually triggered on **default branch** with a version, i.e. `2.1.0.0.0` + 1. Github release for the manually input version is created with binaries + 2. Docker image pushed with tags of the **input version** and **latest** + +## PR a branch to develop: + +ex: Branch is named `feat/112-fix-something` and the PR is numbered `112` + +- Steps executed: + - Rust Format + - Integration Tests + - Leaked credential test + - Docker image is built from source on a debian distribution and pushed with the branch name and PR number as tags + - ex: + - `stacks-blockchain:feat-112-fix-something` + - `stacks-blockchain:pr-112` +- Steps _not_ executed: + - No binaries are built + - No github release + - No docker images built from binary artifacts + +## Merging a branch to develop: + +Nothing is triggered automatically + +## PR develop to master branches: + +ex: Branch is named `develop` and the PR is numbered `113` + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Docker image is built from source on a debian distribution and pushed with the branch name and PR number as tags + - ex: + - `stacks-blockchain:develop` + - `stacks-blockchain:pr-113` +- Steps _not_ executed: + - No binaries are built + - No github release + - No docker images built from binary artifacts + +## Merging a PR from develop to master: + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Docker image is built from source on a debian distribution and pushed with the branch name as a tag + - ex: + - `stacks-blockchain:master` +- Steps _not_ executed: + - No binaries are built + - No github release + - No docker images built from binary artifacts + +## Manually triggering workflow without tag (any branch): + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Docker image is built from source on a debian distribution and pushed with the branch name as a tag + - ex: + - `stacks-blockchain:` +- Steps _not_ executed: + - No binaries are built + - No github release + - No docker images built from binary artifacts + +## Manually triggering workflow with tag on a non-default branch (i.e. tag of `2.1.0.0.0-rc0`): + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Binaries built for specified architectures + - Archive and checksum files added to github release + - Github release (with artifacts/checksum) is created using the manually input tag + - Docker image built from binaries on debian/alpine distributions and pushed with the provided input tag and `latest` + - ex: + - `stacks-blockchain:2.1.0.0.0-rc0` +- Steps _not_ executed: + - No docker images built from source + +## Manually triggering workflow with tag on default branch (i.e. tag of `2.1.0.0.0`): + +- Steps executed: + - Rust format + - Integration tests + - Leaked credential test + - Binaries built for specified architectures + - Archive and checksum files added to github release + - Github release (with artifacts/checksum) is created using the manually input tag + - Docker image built from binaries on debian/alpine distributions and pushed with the provided input tag and `latest` + - ex: + - `stacks-blockchain:2.1.0.0.0-debian` + - `stacks-blockchain:latest-debian` + - `stacks-blockchain:2.1.0.0.0` + - `stacks-blockchain:latest` +- Steps _not_ executed: + - No docker images built from source diff --git a/docs/profiling.md b/docs/profiling.md index c35ca532af..35bbaf2f18 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -27,7 +27,7 @@ Enabling debug logging using environment variable `STACKS_LOG_DEBUG=1`: $ STACKS_LOG_DEBUG=1 cargo run -r -p stacks-node --bin stacks-node check-config --config=testnet/stacks-node/conf/mainnet-mockminer-conf.toml INFO [1661276562.220137] [testnet/stacks-node/src/main.rs:82] [main] stacks-node 0.1.0 (tip-mine:c90476aa8a+, release build, macos [aarch64]) INFO [1661276562.220363] [testnet/stacks-node/src/main.rs:115] [main] Loading config at path testnet/stacks-node/conf/mainnet-mockminer-conf.toml -DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoind.stacks.co"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444,02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444,03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } +DEBG [1661276562.222450] [testnet/stacks-node/src/main.rs:118] [main] Loaded config file: ConfigFile { burnchain: Some(BurnchainConfigFile { chain: Some("bitcoin"), burn_fee_cap: Some(1), mode: Some("mainnet"), commit_anchor_block_within: None, peer_host: Some("bitcoind.stacks.co"), peer_port: Some(8333), rpc_port: Some(8332), rpc_ssl: None, username: Some("blockstack"), password: Some("blockstacksystem"), timeout: None, magic_bytes: None, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: None, satoshis_per_byte: None, leader_key_tx_estimated_size: None, block_commit_tx_estimated_size: None, rbf_fee_increment: None, max_rbf: None, epochs: None }), node: Some(NodeConfigFile { name: None, seed: None, deny_nodes: None, working_dir: Some("/Users/igor/w/stacks-work/working_dir"), rpc_bind: Some("0.0.0.0:20443"), p2p_bind: Some("0.0.0.0:20444"), p2p_address: None, data_url: None, bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444"), local_peer_seed: None, miner: Some(true), mock_mining: Some(true), mine_microblocks: None, microblock_frequency: None, max_microblocks: None, wait_time_for_microblocks: None, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: None, pox_sync_sample_secs: None, use_test_genesis_chainstate: None }), ustx_balance: None, events_observer: Some([EventObserverConfigFile { endpoint: "localhost:3700", events_keys: ["*"] }]), connection_options: None, fee_estimation: None, miner: None } INFO [1661276562.233071] [testnet/stacks-node/src/main.rs:128] [main] Valid config! ``` diff --git a/src/burnchains/affirmation.rs b/src/burnchains/affirmation.rs index a119a15586..163d65566a 100644 --- a/src/burnchains/affirmation.rs +++ b/src/burnchains/affirmation.rs @@ -571,11 +571,11 @@ pub fn read_prepare_phase_commits( test_debug!("Skip too-early block commit"); continue; } - if (opdata.parent_block_ptr as u64) < first_block_height { - if opdata.parent_block_ptr != 0 || opdata.parent_vtxindex != 0 { - test_debug!("Skip orphaned block-commit"); - continue; - } + // the block commit's parent must be a burnchain block that is evaluated by the node + // blocks that are <= first_block_height do not meet this requirement. + if (opdata.parent_block_ptr as u64) <= first_block_height { + test_debug!("Skip orphaned block-commit"); + continue; } if opdata.block_height <= opdata.parent_block_ptr as u64 { test_debug!("Skip block-commit whose 'parent' comes at or after it"); diff --git a/src/burnchains/bitcoin/spv.rs b/src/burnchains/bitcoin/spv.rs index 8fd41cea58..0fce861ff6 100644 --- a/src/burnchains/bitcoin/spv.rs +++ b/src/burnchains/bitcoin/spv.rs @@ -827,6 +827,11 @@ impl SpvClient { assert!(self.readwrite, "SPV header DB is open read-only"); let num_headers = block_headers.len(); + if num_headers == 0 { + // nothing to do + return Ok(()); + } + let first_header_hash = block_headers[0].header.bitcoin_hash(); let last_header_hash = block_headers[block_headers.len() - 1].header.bitcoin_hash(); let total_work_before = self.update_chain_work()?; @@ -1822,4 +1827,24 @@ mod test { let deserialized: Vec> = deserialize(&encoded_tx).unwrap(); } + + #[test] + fn test_handle_headers_empty() { + let headers_path = "/tmp/test-spv-handle_headers_empty.dat"; + if fs::metadata(headers_path).is_ok() { + fs::remove_file(headers_path).unwrap(); + } + + let mut spv_client = SpvClient::new( + headers_path, + 0, + None, + BitcoinNetworkType::Regtest, + true, + false, + ) + .unwrap(); + + spv_client.handle_headers(1, vec![]).unwrap(); + } } diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index 36f2f9289d..fd7246a069 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -23,6 +23,18 @@ use std::io; use std::marker::PhantomData; use rusqlite::Error as sqlite_error; + +use crate::chainstate::burn::distribution::BurnSamplePoint; +use crate::chainstate::burn::operations::leader_block_commit::OUTPUTS_PER_COMMIT; +use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::burn::operations::Error as op_error; +use crate::chainstate::burn::operations::LeaderKeyRegisterOp; +use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::POX_3_NAME; +use crate::chainstate::stacks::StacksPublicKey; +use crate::core::*; +use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; +use crate::util_lib::db::Error as db_error; use stacks_common::address::AddressHashMode; use stacks_common::types::chainstate::ConsensusHash; pub use stacks_common::types::{Address, PrivateKey, PublicKey}; @@ -39,21 +51,12 @@ use self::bitcoin::Error as btc_error; use self::bitcoin::{ BitcoinBlock, BitcoinInputType, BitcoinTransaction, BitcoinTxInput, BitcoinTxOutput, }; -use crate::chainstate::burn::distribution::BurnSamplePoint; -use crate::chainstate::burn::operations::leader_block_commit::OUTPUTS_PER_COMMIT; -use crate::chainstate::burn::operations::BlockstackOperationType; -use crate::chainstate::burn::operations::Error as op_error; -use crate::chainstate::burn::operations::LeaderKeyRegisterOp; -use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME}; -use crate::chainstate::stacks::StacksPublicKey; use crate::core::*; -use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use crate::types::chainstate::BurnchainHeaderHash; use crate::types::chainstate::PoxId; use crate::types::chainstate::StacksAddress; use crate::types::chainstate::TrieHash; -use crate::util_lib::db::Error as db_error; /// This module contains drivers and types for all burn chains we support. pub mod affirmation; @@ -308,6 +311,10 @@ pub struct PoxConstants { /// also defines the burn height at which PoX reward sets are calculated using /// PoX v2 rather than v1 pub v1_unlock_height: u32, + /// The auto unlock height for PoX v2 lockups during Epoch 2.2 + pub v2_unlock_height: u32, + /// After this burn height, reward cycles use pox-3 for reward set data + pub pox_3_activation_height: u32, _shadow: PhantomData<()>, } @@ -321,10 +328,14 @@ impl PoxConstants { sunset_start: u64, sunset_end: u64, v1_unlock_height: u32, + v2_unlock_height: u32, + pox_3_activation_height: u32, ) -> PoxConstants { assert!(anchor_threshold > (prepare_length / 2)); assert!(prepare_length < reward_cycle_length); assert!(sunset_start <= sunset_end); + assert!(v2_unlock_height >= v1_unlock_height); + assert!(pox_3_activation_height >= v2_unlock_height); PoxConstants { reward_cycle_length, @@ -335,18 +346,26 @@ impl PoxConstants { sunset_start, sunset_end, v1_unlock_height, + v2_unlock_height, + pox_3_activation_height, _shadow: PhantomData, } } #[cfg(test)] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots - PoxConstants::new(10, 5, 3, 25, 5, 5000, 10000, u32::max_value()) + PoxConstants::new(10, 5, 3, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX) } /// Returns the PoX contract that is "active" at the given burn block height - pub fn static_active_pox_contract(v1_unlock_height: u64, burn_height: u64) -> &'static str { - if burn_height > v1_unlock_height { + pub fn static_active_pox_contract( + v1_unlock_height: u64, + pox_3_activation_height: u64, + burn_height: u64, + ) -> &'static str { + if burn_height > pox_3_activation_height { + POX_3_NAME + } else if burn_height > v1_unlock_height { POX_2_NAME } else { POX_1_NAME @@ -355,7 +374,11 @@ impl PoxConstants { /// Returns the PoX contract that is "active" at the given burn block height pub fn active_pox_contract(&self, burn_height: u64) -> &'static str { - Self::static_active_pox_contract(self.v1_unlock_height as u64, burn_height) + Self::static_active_pox_contract( + self.v1_unlock_height as u64, + self.pox_3_activation_height as u64, + burn_height, + ) } pub fn reward_slots(&self) -> u32 { @@ -382,6 +405,10 @@ impl PoxConstants { BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT, + POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT, + BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT + .try_into() + .expect("Epoch transition height must be <= u32::MAX"), ) } @@ -395,6 +422,10 @@ impl PoxConstants { BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT, + POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT, + BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT + .try_into() + .expect("Epoch transition height must be <= u32::MAX"), ) // total liquid supply is 40000000000000000 µSTX } @@ -408,6 +439,8 @@ impl PoxConstants { BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, 1_000_000, + 2_000_000, + 3_000_000, ) } diff --git a/src/burnchains/tests/affirmation.rs b/src/burnchains/tests/affirmation.rs index 9110df21a6..a47f7145e6 100644 --- a/src/burnchains/tests/affirmation.rs +++ b/src/burnchains/tests/affirmation.rs @@ -486,8 +486,18 @@ fn test_read_prepare_phase_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -553,8 +563,18 @@ fn test_parent_block_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -645,8 +665,18 @@ fn test_filter_orphan_block_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -706,8 +736,18 @@ fn test_filter_missed_block_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -767,8 +807,18 @@ fn test_find_heaviest_block_commit() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -980,8 +1030,18 @@ fn test_find_heaviest_parent_commit_many_commits() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1233,8 +1293,18 @@ fn test_update_pox_affirmation_maps_3_forks() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1483,8 +1553,18 @@ fn test_update_pox_affirmation_maps_unique_anchor_block() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -1676,8 +1756,18 @@ fn test_update_pox_affirmation_maps_absent() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -2139,8 +2229,18 @@ fn test_update_pox_affirmation_maps_nothing() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(10, 5, 3, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 10, + 5, + 3, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -2406,8 +2506,18 @@ fn test_update_pox_affirmation_fork_2_cycles() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 2, 2, 25, 5, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 2, + 2, + 25, + 5, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -2698,8 +2808,18 @@ fn test_update_pox_affirmation_fork_duel() { let first_height = 0; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 2, 2, 25, 5, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 2, + 2, + 25, + 5, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; diff --git a/src/burnchains/tests/db.rs b/src/burnchains/tests/db.rs index d29d39325d..6adf317772 100644 --- a/src/burnchains/tests/db.rs +++ b/src/burnchains/tests/db.rs @@ -509,8 +509,18 @@ fn test_get_commit_at() { let first_height = 1; let mut burnchain = Burnchain::regtest(":memory"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -624,8 +634,18 @@ fn test_get_set_check_anchor_block() { let first_height = 1; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -709,8 +729,18 @@ fn test_update_block_descendancy() { let first_height = 1; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; @@ -828,8 +858,18 @@ fn test_update_block_descendancy_with_fork() { let first_height = 1; let mut burnchain = Burnchain::regtest(":memory:"); - burnchain.pox_constants = - PoxConstants::new(5, 3, 2, 3, 0, u64::MAX - 1, u64::MAX, u32::max_value()); + burnchain.pox_constants = PoxConstants::new( + 5, + 3, + 2, + 3, + 0, + u64::MAX - 1, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); burnchain.first_block_timestamp = first_timestamp; diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index ee1637f40b..98b7b8aafc 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -606,7 +606,7 @@ impl FromRow for StacksEpoch { } } -pub const SORTITION_DB_VERSION: &'static str = "5"; +pub const SORTITION_DB_VERSION: &'static str = "8"; const SORTITION_DB_INITIAL_SCHEMA: &'static [&'static str] = &[ r#" @@ -807,10 +807,21 @@ const SORTITION_DB_SCHEMA_4: &'static [&'static str] = &[ );"#, ]; +/// The changes for version five *just* replace the existing epochs table +/// by deleting all the current entries and inserting the new epochs definition. +const SORTITION_DB_SCHEMA_5: &'static [&'static str] = &[r#" + DELETE FROM epochs;"#]; + +const SORTITION_DB_SCHEMA_6: &'static [&'static str] = &[r#" + DELETE FROM epochs;"#]; + +const SORTITION_DB_SCHEMA_7: &'static [&'static str] = &[r#" + DELETE FROM epochs;"#]; + // update this to add new indexes const LAST_SORTITION_DB_INDEX: &'static str = "index_peg_out_fulfill_burn_header_hash "; -const SORTITION_DB_SCHEMA_5: &'static [&'static str] = &[ +const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ r#" CREATE TABLE peg_in ( txid TEXT NOT NULL, @@ -2813,7 +2824,10 @@ impl SortitionDB { SortitionDB::apply_schema_2(&db_tx, epochs_ref)?; SortitionDB::apply_schema_3(&db_tx)?; SortitionDB::apply_schema_4(&db_tx)?; - SortitionDB::apply_schema_5(&db_tx)?; + SortitionDB::apply_schema_5(&db_tx, epochs_ref)?; + SortitionDB::apply_schema_6(&db_tx, epochs_ref)?; + SortitionDB::apply_schema_7(&db_tx, epochs_ref)?; + SortitionDB::apply_schema_8(&db_tx)?; db_tx.instantiate_index()?; @@ -2970,7 +2984,9 @@ impl SortitionDB { } /// Get the schema version of a sortition DB, given the path to it. - /// Returns the version string, if it exists + /// Returns the version string, if it exists. + /// + /// Does **not** migrate the database (like `open()` or `connect()` would) pub fn get_db_version_from_path(path: &str) -> Result, db_error> { if fs::metadata(path).is_err() { return Err(db_error::NoDBError); @@ -3000,10 +3016,56 @@ impl SortitionDB { match epoch { StacksEpochId::Epoch10 => true, StacksEpochId::Epoch20 => { - version == "1" || version == "2" || version == "3" || version == "4" + version == "1" + || version == "2" + || version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" + || version == "8" + } + StacksEpochId::Epoch2_05 => { + version == "2" + || version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" + || version == "8" + } + StacksEpochId::Epoch21 => { + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" + || version == "8" + } + StacksEpochId::Epoch22 => { + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" + || version == "8" + } + StacksEpochId::Epoch23 => { + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" + || version == "8" + } + StacksEpochId::Epoch24 => { + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" + || version == "8" } - StacksEpochId::Epoch2_05 => version == "2" || version == "3" || version == "4", - StacksEpochId::Epoch21 => version == "5", } } @@ -3072,15 +3134,63 @@ impl SortitionDB { Ok(()) } - fn apply_schema_5(tx: &DBTx) -> Result<(), db_error> { + fn apply_schema_5(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + // the schema 5 changes simply **replace** the contents of the epochs table + // by dropping all the current rows and then revalidating and inserting + // `epochs` for sql_exec in SORTITION_DB_SCHEMA_5 { tx.execute_batch(sql_exec)?; } + SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + tx.execute( "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", &["5"], )?; + + Ok(()) + } + + fn apply_schema_6(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + for sql_exec in SORTITION_DB_SCHEMA_6 { + tx.execute_batch(sql_exec)?; + } + + SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["6"], + )?; + + Ok(()) + } + + fn apply_schema_7(tx: &DBTx, epochs: &[StacksEpoch]) -> Result<(), db_error> { + for sql_exec in SORTITION_DB_SCHEMA_7 { + tx.execute_batch(sql_exec)?; + } + + SortitionDB::validate_and_insert_epochs(&tx, epochs)?; + + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["7"], + )?; + + Ok(()) + } + + fn apply_schema_8(tx: &DBTx) -> Result<(), db_error> { + for sql_exec in SORTITION_DB_SCHEMA_8 { + tx.execute_batch(sql_exec)?; + } + + tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &["8"], + )?; Ok(()) } @@ -3124,7 +3234,19 @@ impl SortitionDB { tx.commit()?; } else if version == "4" { let tx = self.tx_begin()?; - SortitionDB::apply_schema_5(&tx.deref())?; + SortitionDB::apply_schema_5(&tx.deref(), epochs)?; + tx.commit()?; + } else if version == "5" { + let tx = self.tx_begin()?; + SortitionDB::apply_schema_6(&tx.deref(), epochs)?; + tx.commit()?; + } else if version == "6" { + let tx = self.tx_begin()?; + SortitionDB::apply_schema_7(&tx.deref(), epochs)?; + tx.commit()?; + } else if version == "7" { + let tx = self.tx_begin()?; + SortitionDB::apply_schema_8(&tx.deref())?; tx.commit()?; } else if version == expected_version { return Ok(()); @@ -9973,7 +10095,18 @@ pub mod tests { fs::create_dir_all(path_root).unwrap(); - let pox_consts = PoxConstants::new(10, 3, 3, 25, 5, u64::MAX, u64::MAX, u32::MAX); + let pox_consts = PoxConstants::new( + 10, + 3, + 3, + 25, + 5, + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); let mut burnchain = Burnchain::regtest(path_root); burnchain.pox_constants = pox_consts.clone(); diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index a9f8c58bee..232a268bf3 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -43,6 +43,9 @@ use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::index::storage::TrieFileStorage; use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; use crate::codec::{write_next, Error as codec_error, StacksMessageCodec}; +use crate::core::STACKS_EPOCH_2_2_MARKER; +use crate::core::STACKS_EPOCH_2_3_MARKER; +use crate::core::STACKS_EPOCH_2_4_MARKER; use crate::core::{StacksEpoch, StacksEpochId}; use crate::core::{STACKS_EPOCH_2_05_MARKER, STACKS_EPOCH_2_1_MARKER}; use crate::net::Error as net_error; @@ -754,6 +757,9 @@ impl LeaderBlockCommitOp { } StacksEpochId::Epoch2_05 => self.check_epoch_commit_marker(STACKS_EPOCH_2_05_MARKER), StacksEpochId::Epoch21 => self.check_epoch_commit_marker(STACKS_EPOCH_2_1_MARKER), + StacksEpochId::Epoch22 => self.check_epoch_commit_marker(STACKS_EPOCH_2_2_MARKER), + StacksEpochId::Epoch23 => self.check_epoch_commit_marker(STACKS_EPOCH_2_3_MARKER), + StacksEpochId::Epoch24 => self.check_epoch_commit_marker(STACKS_EPOCH_2_4_MARKER), } } @@ -768,7 +774,10 @@ impl LeaderBlockCommitOp { ) -> Result { let tx_tip = tx.context.chain_tip.clone(); let intended_sortition = match epoch_id { - StacksEpochId::Epoch21 => { + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { // correct behavior -- uses *sortition height* to find the intended sortition ID let sortition_height = self .block_height @@ -1765,7 +1774,18 @@ mod tests { ]; let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::max_value()), + pox_constants: PoxConstants::new( + 6, + 2, + 2, + 25, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + ), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), @@ -2298,7 +2318,18 @@ mod tests { ]; let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::max_value()), + pox_constants: PoxConstants::new( + 6, + 2, + 2, + 25, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + ), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), @@ -2988,7 +3019,18 @@ mod tests { .unwrap(); let burnchain = Burnchain { - pox_constants: PoxConstants::new(6, 2, 2, 25, 5, 5000, 10000, u32::max_value()), + pox_constants: PoxConstants::new( + 6, + 2, + 2, + 25, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + ), peer_version: 0x012345678, network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index a4fe23414c..f939ed2c08 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -54,6 +54,7 @@ use crate::chainstate::coordinator::comm::{ ArcCounterCoordinatorNotices, CoordinatorEvents, CoordinatorNotices, CoordinatorReceivers, }; use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::POX_3_NAME; use crate::chainstate::stacks::index::marf::MARFOpenOpts; use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::{ @@ -242,7 +243,7 @@ impl From for Error { pub trait RewardSetProvider { fn get_reward_set( &self, - current_burn_height: u64, + cycle_start_burn_height: u64, chainstate: &mut StacksChainState, burnchain: &Burnchain, sortdb: &SortitionDB, @@ -255,12 +256,44 @@ pub struct OnChainRewardSetProvider(); impl RewardSetProvider for OnChainRewardSetProvider { fn get_reward_set( &self, + // Todo: `current_burn_height` is a misleading name: should be the `cycle_start_burn_height` current_burn_height: u64, chainstate: &mut StacksChainState, burnchain: &Burnchain, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { + let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), current_burn_height)?.expect( + &format!("FATAL: no epoch for burn height {}", current_burn_height), + ); + match cur_epoch.epoch_id { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 => { + // Epochs 1.0 - 2.1 compute reward sets + } + StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => { + info!("PoX reward cycle defaulting to burn in Epochs 2.2 and 2.3"); + return Ok(RewardSet::empty()); + } + StacksEpochId::Epoch24 => { + // Epoch 2.4 computes reward sets, but *only* if PoX-3 is active + if burnchain + .pox_constants + .active_pox_contract(current_burn_height) + != POX_3_NAME + { + // Note: this should not happen in mainnet or testnet, because the no reward cycle start height + // exists between Epoch 2.4's instantiation height and the pox-3 activation height. + // However, this *will* happen in testing if Epoch 2.4's instantiation height is set == a reward cycle + // start height + info!("PoX reward cycle defaulting to burn in Epoch 2.4 because cycle start is before PoX-3 activation"); + return Ok(RewardSet::empty()); + } + } + }; + let registered_addrs = chainstate.get_reward_addresses(burnchain, sortdb, current_burn_height, block_id)?; @@ -291,10 +324,6 @@ impl RewardSetProvider for OnChainRewardSetProvider { "registered_addrs" => registered_addrs.len()); } - let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), current_burn_height)?.expect( - &format!("FATAL: no epoch for burn height {}", current_burn_height), - ); - Ok(StacksChainState::make_reward_set( threshold, registered_addrs, @@ -2983,8 +3012,11 @@ impl< return Ok(Some(pox_anchor)); } } - StacksEpochId::Epoch21 => { - // 2.1 behavior: the anchor block must also be the + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { + // 2.1 and onward behavior: the anchor block must also be the // heaviest-confirmed anchor block by BTC weight, and the highest // such anchor block if there are multiple contenders. if let Some(pox_anchor) = diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index f582429193..7557156e83 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -64,10 +64,10 @@ use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; use crate::chainstate::coordinator::{Error as CoordError, *}; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; -use crate::chainstate::stacks::boot::PoxStartCycleInfo; use crate::chainstate::stacks::boot::COSTS_2_NAME; use crate::chainstate::stacks::boot::POX_1_NAME; use crate::chainstate::stacks::boot::POX_2_NAME; +use crate::chainstate::stacks::boot::{PoxStartCycleInfo, POX_3_NAME}; use crate::chainstate::stacks::db::{ accounts::MinerReward, ClarityTx, StacksChainState, StacksHeaderInfo, }; @@ -513,9 +513,11 @@ pub fn get_burnchain(path: &str, pox_consts: Option) -> Burnchain 3, 25, 5, - u64::max_value(), - u64::max_value(), - u32::max_value(), + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, ) }); b @@ -658,7 +660,7 @@ fn make_genesis_block_with_recipients( ), key_block_ptr: 1, // all registers happen in block height 1 key_vtxindex: (1 + key_index) as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_4_MARKER], new_seed: VRFSeed::from_proof(&proof), commit_outs, @@ -921,7 +923,7 @@ fn make_stacks_block_with_input( ), key_block_ptr: 1, // all registers happen in block height 1 key_vtxindex: (1 + key_index) as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_4_MARKER], new_seed: VRFSeed::from_proof(&proof), commit_outs, @@ -952,7 +954,9 @@ fn missed_block_commits_2_05() { 5, 7010, sunset_ht, - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1268,7 +1272,9 @@ fn missed_block_commits_2_1() { 5, 7010, sunset_ht, - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1608,7 +1614,9 @@ fn late_block_commits_2_1() { 5, 7010, sunset_ht, - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2662,7 +2670,8 @@ fn test_pox_btc_ops() { let _r = std::fs::remove_dir_all(path); let sunset_ht = 8000; - let pox_v1_unlock_ht = u32::max_value(); + let pox_v1_unlock_ht = u32::MAX; + let pox_v2_unlock_ht = u32::MAX; let pox_consts = Some(PoxConstants::new( 5, 3, @@ -2672,6 +2681,8 @@ fn test_pox_btc_ops() { 7010, sunset_ht, pox_v1_unlock_ht, + pox_v2_unlock_ht, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2849,8 +2860,11 @@ fn test_pox_btc_ops() { assert_eq!(stacker_balance.amount_locked(), stacked_amt); } else { assert_eq!( - stacker_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + stacker_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), balance as u128, "No lock should be active" ); @@ -2938,7 +2952,8 @@ fn test_stx_transfer_btc_ops() { let path = "/tmp/stacks-blockchain-stx_transfer-btc-ops"; let _r = std::fs::remove_dir_all(path); - let pox_v1_unlock_ht = u32::max_value(); + let pox_v1_unlock_ht = u32::MAX; + let pox_v2_unlock_ht = u32::MAX; let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 5, @@ -2949,6 +2964,8 @@ fn test_stx_transfer_btc_ops() { 7010, sunset_ht, pox_v1_unlock_ht, + pox_v2_unlock_ht, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3148,26 +3165,38 @@ fn test_stx_transfer_btc_ops() { if ix > 2 { assert_eq!( - sender_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + sender_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), (balance as u128) - transfer_amt, "Transfer should have decremented balance" ); assert_eq!( - recipient_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + recipient_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), transfer_amt, "Recipient should have incremented balance" ); } else { assert_eq!( - sender_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + sender_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), balance as u128, ); assert_eq!( - recipient_balance - .get_available_balance_at_burn_block(burn_height as u64, pox_v1_unlock_ht), + recipient_balance.get_available_balance_at_burn_block( + burn_height as u64, + pox_v1_unlock_ht, + pox_v2_unlock_ht + ), 0, ); } @@ -3255,6 +3284,7 @@ fn test_sbtc_ops() { let _r = std::fs::remove_dir_all(path); let pox_v1_unlock_ht = 12; + let pox_v2_unlock_ht = u32::max_value(); let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 100, @@ -3265,6 +3295,8 @@ fn test_sbtc_ops() { 7010, sunset_ht, pox_v1_unlock_ht, + pox_v2_unlock_ht, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3597,6 +3629,7 @@ fn test_delegate_stx_btc_ops() { let _r = std::fs::remove_dir_all(path); let pox_v1_unlock_ht = 12; + let pox_v2_unlock_ht = u32::MAX; let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 100, @@ -3607,6 +3640,8 @@ fn test_delegate_stx_btc_ops() { 7010, sunset_ht, pox_v1_unlock_ht, + pox_v2_unlock_ht, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3909,7 +3944,9 @@ fn test_initial_coinbase_reward_distributions() { 5, 7010, sunset_ht, - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4146,7 +4183,9 @@ fn test_epoch_switch_cost_contract_instantiation() { 5, 10, sunset_ht, - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4332,28 +4371,34 @@ fn test_epoch_switch_cost_contract_instantiation() { // and that the epoch transition is only applied once. If it were to be applied more than once, // the test would panic when trying to re-create the pox-2 contract. #[test] -fn test_epoch_switch_pox_contract_instantiation() { - let path = "/tmp/stacks-blockchain-epoch-switch-pox-contract-instantiation"; +fn test_epoch_switch_pox_2_contract_instantiation() { + let path = "/tmp/stacks-blockchain-epoch-switch-pox-2-contract-instantiation"; let _r = std::fs::remove_dir_all(path); let sunset_ht = 8000; - let pox_consts = Some(PoxConstants::new(6, 3, 3, 25, 5, 10, sunset_ht, 10)); + let pox_consts = Some(PoxConstants::new( + 6, + 3, + 3, + 25, + 5, + 10, + sunset_ht, + 10, + u32::MAX, + u32::MAX, + )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..15).map(|_| VRFPrivateKey::new()).collect(); let committers: Vec<_> = (0..15).map(|_| StacksPrivateKey::new()).collect(); - let stacker = p2pkh_from(&StacksPrivateKey::new()); - let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); - let initial_balances = vec![(stacker.clone().into(), balance)]; - setup_states( &[path], &vrf_keys, &committers, pox_consts.clone(), - Some(initial_balances), + None, StacksEpochId::Epoch21, ); @@ -4385,7 +4430,7 @@ fn test_epoch_switch_pox_contract_instantiation() { let mut burnchain = get_burnchain_db(path, pox_consts.clone()); let mut chainstate = get_chainstate(path); - // Want to ensure that the pox-2 contract DNE for all blocks after the epoch transition height, + // Want to ensure that the pox-2 contract DNE for all blocks before the epoch transition height, // and does exist for blocks after the boundary. // Epoch 2.1 transition // ^ @@ -4405,16 +4450,6 @@ fn test_epoch_switch_pox_contract_instantiation() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); let b = get_burnchain(path, pox_consts.clone()); - let next_mock_header = BurnchainBlockHeader { - block_height: burnchain_tip.block_height + 1, - block_hash: BurnchainHeaderHash([0; 32]), - parent_block_hash: burnchain_tip.block_hash, - num_txs: 0, - timestamp: 1, - }; - - let reward_cycle_info = coord.get_reward_cycle_info(&next_mock_header).unwrap(); - let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( &sort_db, @@ -4444,7 +4479,6 @@ fn test_epoch_switch_pox_contract_instantiation() { let expected_winner = good_op.txid(); let ops = vec![good_op]; - let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); produce_burn_block( &b, &mut burnchain, @@ -4537,6 +4571,203 @@ fn test_epoch_switch_pox_contract_instantiation() { } } +// This test ensures the epoch transition from 2.3 to 2.4 is applied at the proper block boundaries, +// and that the epoch transition is only applied once. If it were to be applied more than once, +// the test would panic when trying to re-create the pox-3 contract. +#[test] +fn test_epoch_switch_pox_3_contract_instantiation() { + let path = "/tmp/stacks-blockchain-epoch-switch-pox-3-contract-instantiation"; + let _r = std::fs::remove_dir_all(path); + + let sunset_ht = 8000; + let pox_consts = Some(PoxConstants::new(6, 3, 3, 25, 5, 10, sunset_ht, 10, 14, 16)); + let burnchain_conf = get_burnchain(path, pox_consts.clone()); + + let vrf_keys: Vec<_> = (0..25).map(|_| VRFPrivateKey::new()).collect(); + let committers: Vec<_> = (0..25).map(|_| StacksPrivateKey::new()).collect(); + + setup_states( + &[path], + &vrf_keys, + &committers, + pox_consts.clone(), + None, + StacksEpochId::Epoch24, + ); + + let mut coord = make_coordinator(path, Some(burnchain_conf)); + + coord.handle_new_burnchain_block().unwrap(); + + let sort_db = get_sortition_db(path, pox_consts.clone()); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + assert_eq!(tip.block_height, 1); + assert_eq!(tip.sortition, false); + let (_, ops) = sort_db + .get_sortition_result(&tip.sortition_id) + .unwrap() + .unwrap(); + + // we should have all the VRF registrations accepted + assert_eq!(ops.accepted_ops.len(), vrf_keys.len()); + assert_eq!(ops.consumed_leader_keys.len(), 0); + + // process sequential blocks, and their sortitions... + let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; + + for ix in 0..24 { + let vrf_key = &vrf_keys[ix]; + let miner = &committers[ix]; + + let mut burnchain = get_burnchain_db(path, pox_consts.clone()); + let mut chainstate = get_chainstate(path); + + // Want to ensure that the pox-3 contract DNE for all blocks before the epoch 2.4 transition height, + // and does exist for blocks after the boundary. + // Epoch 2.1 transition Epoch 2.2 transition Epoch 2.3 transition Epoch 2.4 transition + // ^ ^ ^ ^ + //.. -> B6 -> B7 -> B8 -> B9 -> B10 -> B11 -> B12 -> B13 -> B14 -> B15 -> B16 -> B17 -> B18 -> B19 + //.. -> S5 -> S6 -> S7 -> S8 -> S9 -> S10 -> S11 -> S12 -> S13 -> S14 -> S15 -> S16 -> S17 -> S18 + // \ + // \ + // _ _ _ S19 -> S20 -> .. + let parent = if ix == 0 { + BlockHeaderHash([0; 32]) + } else if ix == 15 { + stacks_blocks[ix - 2].1.header.block_hash() + } else { + stacks_blocks[ix - 1].1.header.block_hash() + }; + + let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); + let b = get_burnchain(path, pox_consts.clone()); + + let (good_op, block) = if ix == 0 { + make_genesis_block_with_recipients( + &sort_db, + &mut chainstate, + &parent, + miner, + 10000, + vrf_key, + ix as u32, + None, + ) + } else { + make_stacks_block_with_recipients( + &sort_db, + &mut chainstate, + &b, + &parent, + burnchain_tip.block_height, + miner, + 1000, + vrf_key, + ix as u32, + None, + ) + }; + + let expected_winner = good_op.txid(); + let ops = vec![good_op]; + + produce_burn_block( + &b, + &mut burnchain, + &burnchain_tip.block_hash, + ops, + vec![].iter_mut(), + ); + // handle the sortition + coord.handle_new_burnchain_block().unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + assert_eq!(&tip.winning_block_txid, &expected_winner); + + // load the block into staging + let block_hash = block.header.block_hash(); + + assert_eq!(&tip.winning_stacks_block_hash, &block_hash); + stacks_blocks.push((tip.sortition_id.clone(), block.clone())); + + preprocess_block(&mut chainstate, &sort_db, &tip, block); + + // handle the stacks block + coord.handle_new_stacks_block().unwrap(); + + let stacks_tip = SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db.conn()).unwrap(); + let burn_block_height = tip.block_height; + + // check that the expected stacks epoch ID is equal to the actual stacks epoch ID + let expected_epoch = match burn_block_height { + x if x < 4 => StacksEpochId::Epoch20, + x if x >= 4 && x < 8 => StacksEpochId::Epoch2_05, + x if x >= 8 && x < 12 => StacksEpochId::Epoch21, + x if x >= 12 && x < 16 => StacksEpochId::Epoch22, + x if x >= 16 && x < 20 => StacksEpochId::Epoch23, + _ => StacksEpochId::Epoch24, + }; + assert_eq!( + chainstate + .with_read_only_clarity_tx( + &sort_db.index_conn(), + &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), + |conn| conn.with_clarity_db_readonly(|db| db + .get_stacks_epoch(burn_block_height as u32) + .unwrap()) + ) + .unwrap() + .epoch_id, + expected_epoch + ); + + // These expectations are according to according to hard-coded values in + // `StacksEpoch::unit_test_2_4`. + let expected_runtime = match burn_block_height { + x if x < 4 => u64::MAX, + x if x >= 4 && x < 8 => 205205, + x => 210210, + }; + assert_eq!( + chainstate + .with_read_only_clarity_tx( + &sort_db.index_conn(), + &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), + |conn| { + conn.with_clarity_db_readonly(|db| { + db.get_stacks_epoch(burn_block_height as u32).unwrap() + }) + }, + ) + .unwrap() + .block_limit + .runtime, + expected_runtime + ); + + // check that pox-3 contract DNE before epoch 2.4, and that it does exist after + let does_pox_3_contract_exist = chainstate + .with_read_only_clarity_tx( + &sort_db.index_conn(), + &StacksBlockId::new(&stacks_tip.0, &stacks_tip.1), + |conn| { + conn.with_clarity_db_readonly(|db| { + db.get_contract(&boot_code_id(POX_3_NAME, false)) + }) + }, + ) + .unwrap(); + + if burn_block_height < 20 { + assert!(does_pox_3_contract_exist.is_err()) + } else { + assert!(does_pox_3_contract_exist.is_ok()) + } + } +} + +#[cfg(test)] fn get_total_stacked_info( chainstate: &mut StacksChainState, burn_dbconn: &dyn BurnStateDB, @@ -4580,6 +4811,7 @@ fn test_epoch_verify_active_pox_contract() { let _r = std::fs::remove_dir_all(path); let pox_v1_unlock_ht = 12; + let pox_v2_unlock_ht = u32::max_value(); let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 6, @@ -4590,6 +4822,8 @@ fn test_epoch_verify_active_pox_contract() { 7010, sunset_ht, pox_v1_unlock_ht, + pox_v2_unlock_ht, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4878,7 +5112,9 @@ fn test_sortition_with_sunset() { 5, 10, sunset_ht, - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5186,6 +5422,8 @@ fn test_sortition_with_sunset_and_epoch_switch() { 10, sunset_ht, v1_unlock_ht, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5533,7 +5771,9 @@ fn test_pox_processable_block_in_different_pox_forks() { 5, u64::MAX - 1, u64::MAX, - u32::max_value(), + u32::MAX, + u32::MAX, + u32::MAX, )); let b = get_burnchain(path, pox_consts.clone()); let b_blind = get_burnchain(path_blinded, pox_consts.clone()); diff --git a/src/chainstate/stacks/boot/contract_tests.rs b/src/chainstate/stacks/boot/contract_tests.rs index 4ec001d80b..141f7b6e18 100644 --- a/src/chainstate/stacks/boot/contract_tests.rs +++ b/src/chainstate/stacks/boot/contract_tests.rs @@ -96,7 +96,7 @@ lazy_static! { pub struct ClarityTestSim { marf: MarfedKV, - height: u64, + pub height: u64, fork: u64, /// This vec specifies the transitions for each epoch. /// It is a list of heights at which the simulated chain transitions @@ -378,6 +378,8 @@ impl BurnStateDB for TestSimBurnStateDB { 0 => StacksEpochId::Epoch20, 1 => StacksEpochId::Epoch2_05, 2 => StacksEpochId::Epoch21, + 3 => StacksEpochId::Epoch22, + 4 => StacksEpochId::Epoch23, _ => panic!("Epoch unknown"), }; @@ -399,7 +401,15 @@ impl BurnStateDB for TestSimBurnStateDB { } fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_v2_unlock_height(&self) -> u32 { + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index a99a3dcb04..426fdf0b99 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -22,6 +22,7 @@ use std::convert::TryInto; use clarity::codec::StacksMessageCodec; use clarity::types::chainstate::BlockHeaderHash; use clarity::util::hash::to_hex; +use clarity::vm::analysis::CheckErrors; use clarity::vm::ast::ASTRules; use clarity::vm::clarity::Error as ClarityError; use clarity::vm::clarity::TransactionConnection; @@ -31,6 +32,7 @@ use clarity::vm::costs::{ }; use clarity::vm::database::ClarityDatabase; use clarity::vm::database::{NULL_BURN_STATE_DB, NULL_HEADER_DB}; +use clarity::vm::errors::Error as VmError; use clarity::vm::errors::InterpreterError; use clarity::vm::events::StacksTransactionEvent; use clarity::vm::representations::ClarityName; @@ -81,10 +83,10 @@ pub const BOOT_CODE_BNS: &'static str = std::include_str!("bns.clar"); pub const BOOT_CODE_GENESIS: &'static str = std::include_str!("genesis.clar"); pub const POX_1_NAME: &'static str = "pox"; pub const POX_2_NAME: &'static str = "pox-2"; +pub const POX_3_NAME: &'static str = "pox-3"; -const POX_2_TESTNET_CONSTS: &'static str = std::include_str!("pox-testnet.clar"); -const POX_2_MAINNET_CONSTS: &'static str = std::include_str!("pox-mainnet.clar"); const POX_2_BODY: &'static str = std::include_str!("pox-2.clar"); +const POX_3_BODY: &'static str = std::include_str!("pox-3.clar"); pub const COSTS_1_NAME: &'static str = "costs"; pub const COSTS_2_NAME: &'static str = "costs-2"; @@ -101,6 +103,10 @@ lazy_static! { format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_2_BODY); pub static ref POX_2_TESTNET_CODE: String = format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_2_BODY); + pub static ref POX_3_MAINNET_CODE: String = + format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_3_BODY); + pub static ref POX_3_TESTNET_CODE: String = + format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_3_BODY); pub static ref BOOT_CODE_COST_VOTING_TESTNET: String = make_testnet_cost_voting(); pub static ref STACKS_BOOT_CODE_MAINNET: [(&'static str, &'static str); 6] = [ ("pox", &BOOT_CODE_POX_MAINNET), @@ -141,7 +147,7 @@ pub fn make_contract_id(addr: &StacksAddress, name: &str) -> QualifiedContractId ) } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct RawRewardSetEntry { pub reward_address: PoxAddress, pub amount_stacked: u128, @@ -220,11 +226,12 @@ impl StacksChainState { fn get_user_stacking_state( clarity: &mut ClarityTransactionConnection, principal: &PrincipalData, + pox_contract_name: &str, ) -> TupleData { // query the stacking state for this user before deleting it let is_mainnet = clarity.is_mainnet(); let sender_addr = PrincipalData::from(boot::boot_code_addr(clarity.is_mainnet())); - let pox_contract = boot::boot_code_id(POX_2_NAME, clarity.is_mainnet()); + let pox_contract = boot::boot_code_id(pox_contract_name, clarity.is_mainnet()); let user_stacking_state = clarity .with_readonly_clarity_env( is_mainnet, @@ -322,14 +329,45 @@ impl StacksChainState { /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. - pub fn handle_pox_cycle_start( + /// + /// This should only be called for PoX v2 cycles. + pub fn handle_pox_cycle_start_pox_2( + clarity: &mut ClarityTransactionConnection, + cycle_number: u64, + cycle_info: Option, + ) -> Result, Error> { + Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_2_NAME) + } + + /// Do all the necessary Clarity operations at the start of a PoX reward cycle. + /// Currently, this just means applying any auto-unlocks to Stackers who qualified. + /// + /// This should only be called for PoX v3 cycles. + pub fn handle_pox_cycle_start_pox_3( clarity: &mut ClarityTransactionConnection, cycle_number: u64, cycle_info: Option, + ) -> Result, Error> { + Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_3_NAME) + } + + /// Do all the necessary Clarity operations at the start of a PoX reward cycle. + /// Currently, this just means applying any auto-unlocks to Stackers who qualified. + /// + fn handle_pox_cycle_start( + clarity: &mut ClarityTransactionConnection, + cycle_number: u64, + cycle_info: Option, + pox_contract_name: &str, ) -> Result, Error> { clarity.with_clarity_db(|db| Ok(Self::mark_pox_cycle_handled(db, cycle_number)))?; - debug!("Handling PoX reward cycle start"; "reward_cycle" => cycle_number, "cycle_active" => cycle_info.is_some()); + debug!( + "Handling PoX reward cycle start"; + "reward_cycle" => cycle_number, + "cycle_active" => cycle_info.is_some(), + "pox_contract" => pox_contract_name + ); let cycle_info = match cycle_info { Some(x) => x, @@ -337,7 +375,7 @@ impl StacksChainState { }; let sender_addr = PrincipalData::from(boot::boot_code_addr(clarity.is_mainnet())); - let pox_contract = boot::boot_code_id(POX_2_NAME, clarity.is_mainnet()); + let pox_contract = boot::boot_code_id(pox_contract_name, clarity.is_mainnet()); let mut total_events = vec![]; for (principal, amount_locked) in cycle_info.missed_reward_slots.iter() { @@ -362,7 +400,7 @@ impl StacksChainState { }).expect("FATAL: failed to accelerate PoX unlock"); // query the stacking state for this user before deleting it - let user_data = Self::get_user_stacking_state(clarity, principal); + let user_data = Self::get_user_stacking_state(clarity, principal, pox_contract_name); // perform the unlock let (result, _, mut events, _) = clarity @@ -843,6 +881,95 @@ impl StacksChainState { Ok(ret) } + fn get_reward_addresses_pox_3( + &mut self, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + reward_cycle: u64, + ) -> Result, Error> { + if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_3_NAME)? { + debug!( + "PoX was voted disabled in block {} (reward cycle {})", + block_id, reward_cycle + ); + return Ok(vec![]); + } + + // how many in this cycle? + let num_addrs = self + .eval_boot_code_read_only( + sortdb, + block_id, + POX_3_NAME, + &format!("(get-reward-set-size u{})", reward_cycle), + )? + .expect_u128(); + + debug!( + "At block {:?} (reward cycle {}): {} PoX reward addresses", + block_id, reward_cycle, num_addrs + ); + + let mut ret = vec![]; + for i in 0..num_addrs { + // value should be (optional (tuple (pox-addr (tuple (...))) (total-ustx uint))). + let tuple = self + .eval_boot_code_read_only( + sortdb, + block_id, + POX_3_NAME, + &format!("(get-reward-set-pox-address u{} u{})", reward_cycle, i), + )? + .expect_optional() + .expect(&format!( + "FATAL: missing PoX address in slot {} out of {} in reward cycle {}", + i, num_addrs, reward_cycle + )) + .expect_tuple(); + + let pox_addr_tuple = tuple + .get("pox-addr") + .expect(&format!("FATAL: no `pox-addr` in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) + .to_owned(); + + let reward_address = PoxAddress::try_from_pox_tuple(self.mainnet, &pox_addr_tuple) + .expect(&format!( + "FATAL: not a valid PoX address: {:?}", + &pox_addr_tuple + )); + + let total_ustx = tuple + .get("total-ustx") + .expect(&format!("FATAL: no 'total-ustx' in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) + .to_owned() + .expect_u128(); + + let stacker = tuple + .get("stacker") + .expect(&format!( + "FATAL: no 'stacker' in return value from (get-reward-set-pox-address u{} u{})", + reward_cycle, i + )) + .to_owned() + .expect_optional() + .map(|value| value.expect_principal()); + + debug!( + "Parsed PoX reward address"; + "stacked_ustx" => total_ustx, + "reward_address" => %reward_address, + "stacker" => ?stacker, + ); + ret.push(RawRewardSetEntry { + reward_address, + amount_stacked: total_ustx, + stacker, + }) + } + + Ok(ret) + } + /// Get the sequence of reward addresses, as well as the PoX-specified hash mode (which gets /// lost in the conversion to StacksAddress) /// Each address will have at least (get-stacking-minimum) tokens. @@ -863,13 +990,26 @@ impl StacksChainState { .pox_constants .active_pox_contract(reward_cycle_start_height); - match pox_contract_name { + let result = match pox_contract_name { x if x == POX_1_NAME => self.get_reward_addresses_pox_1(sortdb, block_id, reward_cycle), x if x == POX_2_NAME => self.get_reward_addresses_pox_2(sortdb, block_id, reward_cycle), + x if x == POX_3_NAME => self.get_reward_addresses_pox_3(sortdb, block_id, reward_cycle), unknown_contract => { panic!("Blockchain implementation failure: PoX contract name '{}' is unknown. Chainstate is corrupted.", unknown_contract); } + }; + + // Catch the epoch boundary edge case where burn height >= pox 3 activation height, but + // there hasn't yet been a Stacks block. + match result { + Err(Error::ClarityError(ClarityError::Interpreter(VmError::Unchecked( + CheckErrors::NoSuchContract(_), + )))) => { + warn!("Reward cycle attempted to calculate rewards before the PoX contract was instantiated"); + return Ok(vec![]); + } + x => x, } } } @@ -878,6 +1018,8 @@ impl StacksChainState { pub mod contract_tests; #[cfg(test)] pub mod pox_2_tests; +#[cfg(test)] +pub mod pox_3_tests; #[cfg(test)] pub mod test { @@ -969,7 +1111,8 @@ pub mod test { #[test] fn get_reward_threshold_units() { - let test_pox_constants = PoxConstants::new(501, 1, 1, 1, 5, 5000, 10000, u32::max_value()); + let test_pox_constants = + PoxConstants::new(501, 1, 1, 1, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX); // when the liquid amount = the threshold step, // the threshold should always be the step size. let liquid = POX_THRESHOLD_STEPS_USTX; @@ -1335,6 +1478,29 @@ pub mod test { addr: PoxAddress, lock_period: u128, burn_ht: u64, + ) -> StacksTransaction { + make_pox_2_or_3_lockup(key, nonce, amount, addr, lock_period, burn_ht, POX_2_NAME) + } + + pub fn make_pox_3_lockup( + key: &StacksPrivateKey, + nonce: u64, + amount: u128, + addr: PoxAddress, + lock_period: u128, + burn_ht: u64, + ) -> StacksTransaction { + make_pox_2_or_3_lockup(key, nonce, amount, addr, lock_period, burn_ht, POX_3_NAME) + } + + pub fn make_pox_2_or_3_lockup( + key: &StacksPrivateKey, + nonce: u64, + amount: u128, + addr: PoxAddress, + lock_period: u128, + burn_ht: u64, + contract_name: &str, ) -> StacksTransaction { // (define-public (stack-stx (amount-ustx uint) // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) @@ -1343,7 +1509,7 @@ pub mod test { let addr_tuple = Value::Tuple(addr.as_clarity_tuple().unwrap()); let payload = TransactionPayload::new_contract_call( boot_code_test_addr(), - POX_2_NAME, + contract_name, "stack-stx", vec![ Value::UInt(amount), @@ -1391,6 +1557,24 @@ pub mod test { make_tx(key, nonce, 0, payload) } + pub fn make_pox_3_extend( + key: &StacksPrivateKey, + nonce: u64, + addr: PoxAddress, + lock_period: u128, + ) -> StacksTransaction { + let addr_tuple = Value::Tuple(addr.as_clarity_tuple().unwrap()); + let payload = TransactionPayload::new_contract_call( + boot_code_test_addr(), + POX_3_NAME, + "stack-extend", + vec![Value::UInt(lock_period), addr_tuple], + ) + .unwrap(); + + make_tx(key, nonce, 0, payload) + } + fn make_tx( key: &StacksPrivateKey, nonce: u64, @@ -1444,6 +1628,23 @@ pub mod test { make_tx(key, nonce, 0, payload) } + pub fn make_pox_3_contract_call( + key: &StacksPrivateKey, + nonce: u64, + function_name: &str, + args: Vec, + ) -> StacksTransaction { + let payload = TransactionPayload::new_contract_call( + boot_code_test_addr(), + POX_3_NAME, + function_name, + args, + ) + .unwrap(); + + make_tx(key, nonce, 0, payload) + } + // make a stream of invalid pox-lockup transactions fn make_invalid_pox_lockups(key: &StacksPrivateKey, mut nonce: u64) -> Vec { let mut ret = vec![]; diff --git a/src/chainstate/stacks/boot/pox-2.clar b/src/chainstate/stacks/boot/pox-2.clar index e07addb450..8e56084a74 100644 --- a/src/chainstate/stacks/boot/pox-2.clar +++ b/src/chainstate/stacks/boot/pox-2.clar @@ -346,7 +346,7 @@ (update-list (unwrap-panic (replace-at? moved-reward-list moved-cycle-index set-index)))) (map-set stacking-state { stacker: moved-stacker } (merge moved-state { reward-set-indexes: update-list }))) - ;; otherwise, we dont need to update stacking-state after move + ;; otherwise, we don't need to update stacking-state after move true)) ;; if not moving, just noop true) @@ -636,7 +636,7 @@ ;; to issue the stacking lock. ;; The caller specifies: ;; * amount-ustx: the total amount of ustx the delegate may be allowed to lock -;; * until-burn-ht: an optional burn height at which this delegation expiration +;; * until-burn-ht: an optional burn height at which this delegation expires ;; * pox-addr: an optional address to which any rewards *must* be sent (define-public (delegate-stx (amount-ustx uint) (delegate-to principal) @@ -765,7 +765,7 @@ (let ((amount-ustx (get stacked-amount partial-stacked)) ;; reward-cycle must point to an existing record in reward-cycle-total-stacked - ;; infallable; getting something from partial-stacked-by-cycle succeeded so this must succeed + ;; infallible; getting something from partial-stacked-by-cycle succeeded so this must succeed (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) ;; reward-cycle and reward-cycle-index must point to an existing record in reward-cycle-pox-address-list (existing-entry (unwrap! (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }) @@ -1279,7 +1279,7 @@ (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, reward-cycle: reward-cycle, sender: sender }) ) -;; How any uSTX have voted to reject PoX in a given reward cycle? +;; How many uSTX have voted to reject PoX in a given reward cycle? ;; *New in Stacks 2.1* (define-read-only (get-total-pox-rejection (reward-cycle uint)) (match (map-get? stacking-rejection { reward-cycle: reward-cycle }) diff --git a/src/chainstate/stacks/boot/pox-3.clar b/src/chainstate/stacks/boot/pox-3.clar new file mode 100644 index 0000000000..5878038a0b --- /dev/null +++ b/src/chainstate/stacks/boot/pox-3.clar @@ -0,0 +1,1320 @@ +;; The .pox-3 contract +;; Error codes +(define-constant ERR_STACKING_UNREACHABLE 255) +(define-constant ERR_STACKING_CORRUPTED_STATE 254) +(define-constant ERR_STACKING_INSUFFICIENT_FUNDS 1) +(define-constant ERR_STACKING_INVALID_LOCK_PERIOD 2) +(define-constant ERR_STACKING_ALREADY_STACKED 3) +(define-constant ERR_STACKING_NO_SUCH_PRINCIPAL 4) +(define-constant ERR_STACKING_EXPIRED 5) +(define-constant ERR_STACKING_STX_LOCKED 6) +(define-constant ERR_STACKING_PERMISSION_DENIED 9) +(define-constant ERR_STACKING_THRESHOLD_NOT_MET 11) +(define-constant ERR_STACKING_POX_ADDRESS_IN_USE 12) +(define-constant ERR_STACKING_INVALID_POX_ADDRESS 13) +(define-constant ERR_STACKING_ALREADY_REJECTED 17) +(define-constant ERR_STACKING_INVALID_AMOUNT 18) +(define-constant ERR_NOT_ALLOWED 19) +(define-constant ERR_STACKING_ALREADY_DELEGATED 20) +(define-constant ERR_DELEGATION_EXPIRES_DURING_LOCK 21) +(define-constant ERR_DELEGATION_TOO_MUCH_LOCKED 22) +(define-constant ERR_DELEGATION_POX_ADDR_REQUIRED 23) +(define-constant ERR_INVALID_START_BURN_HEIGHT 24) +(define-constant ERR_NOT_CURRENT_STACKER 25) +(define-constant ERR_STACK_EXTEND_NOT_LOCKED 26) +(define-constant ERR_STACK_INCREASE_NOT_LOCKED 27) +(define-constant ERR_DELEGATION_NO_REWARD_SLOT 28) +(define-constant ERR_DELEGATION_WRONG_REWARD_SLOT 29) +(define-constant ERR_STACKING_IS_DELEGATED 30) +(define-constant ERR_STACKING_NOT_DELEGATED 31) + +;; PoX disabling threshold (a percent) +(define-constant POX_REJECTION_FRACTION u25) + +;; Valid values for burnchain address versions. +;; These first four correspond to address hash modes in Stacks 2.1, +;; and are defined in pox-mainnet.clar and pox-testnet.clar (so they +;; cannot be defined here again). +;; (define-constant ADDRESS_VERSION_P2PKH 0x00) +;; (define-constant ADDRESS_VERSION_P2SH 0x01) +;; (define-constant ADDRESS_VERSION_P2WPKH 0x02) +;; (define-constant ADDRESS_VERSION_P2WSH 0x03) +(define-constant ADDRESS_VERSION_NATIVE_P2WPKH 0x04) +(define-constant ADDRESS_VERSION_NATIVE_P2WSH 0x05) +(define-constant ADDRESS_VERSION_NATIVE_P2TR 0x06) +;; Keep these constants in lock-step with the address version buffs above +;; Maximum value of an address version as a uint +(define-constant MAX_ADDRESS_VERSION u6) +;; Maximum value of an address version that has a 20-byte hashbytes +;; (0x00, 0x01, 0x02, 0x03, and 0x04 have 20-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_20 u4) +;; Maximum value of an address version that has a 32-byte hashbytes +;; (0x05 and 0x06 have 32-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_32 u6) + +;; Data vars that store a copy of the burnchain configuration. +;; Implemented as data-vars, so that different configurations can be +;; used in e.g. test harnesses. +(define-data-var pox-prepare-cycle-length uint PREPARE_CYCLE_LENGTH) +(define-data-var pox-reward-cycle-length uint REWARD_CYCLE_LENGTH) +(define-data-var pox-rejection-fraction uint POX_REJECTION_FRACTION) +(define-data-var first-burnchain-block-height uint u0) +(define-data-var configured bool false) +(define-data-var first-2-1-reward-cycle uint u0) + +;; This function can only be called once, when it boots up +(define-public (set-burnchain-parameters (first-burn-height uint) + (prepare-cycle-length uint) + (reward-cycle-length uint) + (rejection-fraction uint) + (begin-2-1-reward-cycle uint)) + (begin + (asserts! (not (var-get configured)) (err ERR_NOT_ALLOWED)) + (var-set first-burnchain-block-height first-burn-height) + (var-set pox-prepare-cycle-length prepare-cycle-length) + (var-set pox-reward-cycle-length reward-cycle-length) + (var-set pox-rejection-fraction rejection-fraction) + (var-set first-2-1-reward-cycle begin-2-1-reward-cycle) + (var-set configured true) + (ok true)) +) + +;; The Stacking lock-up state and associated metadata. +;; Records are inserted into this map via `stack-stx`, `delegate-stack-stx`, `stack-extend` +;; `delegate-stack-extend` and burnchain transactions for invoking `stack-stx`, etc. +;; Records will be deleted from this map when auto-unlocks are processed +;; +;; This map de-normalizes some state from the `reward-cycle-pox-address-list` map +;; and the `pox-3` contract tries to keep this state in sync with the reward-cycle +;; state. The major invariants of this `stacking-state` map are: +;; (1) any entry in `reward-cycle-pox-address-list` with `some stacker` points to a real `stacking-state` +;; (2) `stacking-state.reward-set-indexes` matches the index of that `reward-cycle-pox-address-list` +;; (3) all `stacking-state.reward-set-indexes` match the index of their reward cycle entries +;; (4) `stacking-state.pox-addr` matches `reward-cycle-pox-address-list.pox-addr` +;; (5) if set, (len reward-set-indexes) == lock-period +;; (6) (reward-cycle-to-burn-height (+ lock-period first-reward-cycle)) == (get unlock-height (stx-account stacker)) +;; These invariants only hold while `cur-reward-cycle < (+ lock-period first-reward-cycle)` +;; +(define-map stacking-state + { stacker: principal } + { + ;; Description of the underlying burnchain address that will + ;; receive PoX'ed tokens. Translating this into an address + ;; depends on the burnchain being used. When Bitcoin is + ;; the burnchain, this gets translated into a p2pkh, p2sh, + ;; p2wpkh-p2sh, p2wsh-p2sh, p2wpkh, p2wsh, or p2tr UTXO, + ;; depending on the version. The `hashbytes` field *must* be + ;; either 20 bytes or 32 bytes, depending on the output. + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + ;; how long the uSTX are locked, in reward cycles. + lock-period: uint, + ;; reward cycle when rewards begin + first-reward-cycle: uint, + ;; indexes in each reward-set associated with this user. + ;; these indexes are only valid looking forward from + ;; `first-reward-cycle` (i.e., they do not correspond + ;; to entries in the reward set that may have been from + ;; previous stack-stx calls, or prior to an extend) + reward-set-indexes: (list 12 uint), + ;; principal of the delegate, if stacker has delegated + delegated-to: (optional principal) + } +) + +;; Delegation relationships +(define-map delegation-state + { stacker: principal } + { + amount-ustx: uint, ;; how many uSTX delegated? + delegated-to: principal, ;; who are we delegating? + until-burn-ht: (optional uint), ;; how long does the delegation last? + ;; does the delegate _need_ to use a specific + ;; pox recipient address? + pox-addr: (optional { version: (buff 1), hashbytes: (buff 32) }) + } +) + +;; allowed contract-callers +(define-map allowance-contract-callers + { sender: principal, contract-caller: principal } + { until-burn-ht: (optional uint) }) + +;; How many uSTX are stacked in a given reward cycle. +;; Updated when a new PoX address is registered, or when more STX are granted +;; to it. +(define-map reward-cycle-total-stacked + { reward-cycle: uint } + { total-ustx: uint } +) + +;; Internal map read by the Stacks node to iterate through the list of +;; PoX reward addresses on a per-reward-cycle basis. +(define-map reward-cycle-pox-address-list + { reward-cycle: uint, index: uint } + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + total-ustx: uint, + stacker: (optional principal) + } +) + +(define-map reward-cycle-pox-address-list-len + { reward-cycle: uint } + { len: uint } +) + +;; how much has been locked up for this address before +;; committing? +;; this map allows stackers to stack amounts < minimum +;; by paying the cost of aggregation during the commit +(define-map partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; This is identical to partial-stacked-by-cycle, but its data is never deleted. +;; It is used to preserve data for downstream clients to observe aggregate +;; commits. Each key/value pair in this map is simply the last value of +;; partial-stacked-by-cycle right after it was deleted (so, subsequent calls +;; to the `stack-aggregation-*` functions will overwrite this). +(define-map logged-partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; Amount of uSTX that reject PoX, by reward cycle +(define-map stacking-rejection + { reward-cycle: uint } + { amount: uint } +) + +;; Who rejected in which reward cycle +(define-map stacking-rejectors + { stacker: principal, reward-cycle: uint } + { amount: uint } +) + +;; Getter for stacking-rejectors +(define-read-only (get-pox-rejection (stacker principal) (reward-cycle uint)) + (map-get? stacking-rejectors { stacker: stacker, reward-cycle: reward-cycle })) + +;; Has PoX been rejected in the given reward cycle? +(define-read-only (is-pox-active (reward-cycle uint)) + (let ( + (reject-votes + (default-to + u0 + (get amount (map-get? stacking-rejection { reward-cycle: reward-cycle })))) + ) + ;; (100 * reject-votes) / stx-liquid-supply < pox-rejection-fraction + (< (* u100 reject-votes) + (* (var-get pox-rejection-fraction) stx-liquid-supply))) +) + +;; What's the reward cycle number of the burnchain block height? +;; Will runtime-abort if height is less than the first burnchain block (this is intentional) +(define-read-only (burn-height-to-reward-cycle (height uint)) + (/ (- height (var-get first-burnchain-block-height)) (var-get pox-reward-cycle-length))) + +;; What's the block height at the start of a given reward cycle? +(define-read-only (reward-cycle-to-burn-height (cycle uint)) + (+ (var-get first-burnchain-block-height) (* cycle (var-get pox-reward-cycle-length)))) + +;; What's the current PoX reward cycle? +(define-read-only (current-pox-reward-cycle) + (burn-height-to-reward-cycle burn-block-height)) + +;; Get the _current_ PoX stacking principal information. If the information +;; is expired, or if there's never been such a stacker, then returns none. +(define-read-only (get-stacker-info (stacker principal)) + (match (map-get? stacking-state { stacker: stacker }) + stacking-info + (if (<= (+ (get first-reward-cycle stacking-info) (get lock-period stacking-info)) (current-pox-reward-cycle)) + ;; present, but lock has expired + none + ;; present, and lock has not expired + (some stacking-info) + ) + ;; no state at all + none + )) + +(define-read-only (check-caller-allowed) + (or (is-eq tx-sender contract-caller) + (let ((caller-allowed + ;; if not in the caller map, return false + (unwrap! (map-get? allowance-contract-callers + { sender: tx-sender, contract-caller: contract-caller }) + false)) + (expires-at + ;; if until-burn-ht not set, then return true (because no expiry) + (unwrap! (get until-burn-ht caller-allowed) true))) + ;; is the caller allowance expired? + (if (>= burn-block-height expires-at) + false + true)))) + +(define-read-only (get-check-delegation (stacker principal)) + (let ((delegation-info (try! (map-get? delegation-state { stacker: stacker })))) + ;; did the existing delegation expire? + (if (match (get until-burn-ht delegation-info) + until-burn-ht (> burn-block-height until-burn-ht) + false) + ;; it expired, return none + none + ;; delegation is active + (some delegation-info)))) + +;; Get the size of the reward set for a reward cycle. +;; Note that this does _not_ return duplicate PoX addresses. +;; Note that this also _will_ return PoX addresses that are beneath +;; the minimum threshold -- i.e. the threshold can increase after insertion. +;; Used internally by the Stacks node, which filters out the entries +;; in this map to select PoX addresses with enough STX. +(define-read-only (get-reward-set-size (reward-cycle uint)) + (default-to + u0 + (get len (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle })))) + +;; How many rejection votes have we been accumulating for the next block +(define-read-only (next-cycle-rejection-votes) + (default-to + u0 + (get amount (map-get? stacking-rejection { reward-cycle: (+ u1 (current-pox-reward-cycle)) })))) + +;; Add a single PoX address to a single reward cycle. +;; Used to build up a set of per-reward-cycle PoX addresses. +;; No checking will be done -- don't call if this PoX address is already registered in this reward cycle! +;; Returns the index into the reward cycle that the PoX address is stored to +(define-private (append-reward-cycle-pox-addr (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-cycle uint) + (amount-ustx uint) + (stacker (optional principal))) + (let ((sz (get-reward-set-size reward-cycle))) + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: sz } + { pox-addr: pox-addr, total-ustx: amount-ustx, stacker: stacker }) + (map-set reward-cycle-pox-address-list-len + { reward-cycle: reward-cycle } + { len: (+ u1 sz) }) + sz)) + +;; How many uSTX are stacked? +(define-read-only (get-total-ustx-stacked (reward-cycle uint)) + (default-to + u0 + (get total-ustx (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) +) + +;; Called internally by the node to iterate through the list of PoX addresses in this reward cycle. +;; Returns (optional (tuple (pox-addr ) (total-ustx ))) +(define-read-only (get-reward-set-pox-address (reward-cycle uint) (index uint)) + (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: index })) + +(define-private (fold-unlock-reward-cycle (set-index uint) + (data-res (response { cycle: uint, + first-unlocked-cycle: uint, + stacker: principal + } int))) + (let ((data (try! data-res)) + (cycle (get cycle data)) + (first-unlocked-cycle (get first-unlocked-cycle data))) + ;; if current-cycle hasn't reached first-unlocked-cycle, just continue to next iter + (asserts! (>= cycle first-unlocked-cycle) (ok (merge data { cycle: (+ u1 cycle) }))) + (let ((cycle-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: set-index }))) + (cycle-entry-u (get stacker cycle-entry)) + (cycle-entry-total-ustx (get total-ustx cycle-entry)) + (cycle-last-entry-ix (- (get len (unwrap-panic (map-get? reward-cycle-pox-address-list-len { reward-cycle: cycle }))) u1))) + (asserts! (is-eq cycle-entry-u (some (get stacker data))) (err ERR_STACKING_CORRUPTED_STATE)) + (if (not (is-eq cycle-last-entry-ix set-index)) + ;; do a "move" if the entry to remove isn't last + (let ((move-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix })))) + (map-set reward-cycle-pox-address-list + { reward-cycle: cycle, index: set-index } + move-entry) + (match (get stacker move-entry) moved-stacker + ;; if the moved entry had an associated stacker, update its state + (let ((moved-state (unwrap-panic (map-get? stacking-state { stacker: moved-stacker }))) + ;; calculate the index into the reward-set-indexes that `cycle` is at + (moved-cycle-index (- cycle (get first-reward-cycle moved-state))) + (moved-reward-list (get reward-set-indexes moved-state)) + ;; reward-set-indexes[moved-cycle-index] = set-index via slice?, append, concat. + (update-list (unwrap-panic (replace-at? moved-reward-list moved-cycle-index set-index)))) + (map-set stacking-state { stacker: moved-stacker } + (merge moved-state { reward-set-indexes: update-list }))) + ;; otherwise, we don't need to update stacking-state after move + true)) + ;; if not moving, just noop + true) + ;; in all cases, we now need to delete the last list entry + (map-delete reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix }) + (map-set reward-cycle-pox-address-list-len { reward-cycle: cycle } { len: cycle-last-entry-ix }) + ;; finally, update `reward-cycle-total-stacked` + (map-set reward-cycle-total-stacked { reward-cycle: cycle } + { total-ustx: (- (get total-ustx (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: cycle }))) + cycle-entry-total-ustx) }) + (ok (merge data { cycle: (+ u1 cycle)} ))))) + +;; This method is called by the Stacks block processor directly in order to handle the contract state mutations +;; associated with an early unlock. This can only be invoked by the block processor: it is private, and no methods +;; from this contract invoke it. +(define-private (handle-unlock (user principal) (amount-locked uint) (cycle-to-unlock uint)) + (let ((user-stacking-state (unwrap-panic (map-get? stacking-state { stacker: user }))) + (first-cycle-locked (get first-reward-cycle user-stacking-state)) + (reward-set-indexes (get reward-set-indexes user-stacking-state))) + ;; iterate over each reward set the user is a member of, and remove them from the sets. only apply to reward sets after cycle-to-unlock. + (try! (fold fold-unlock-reward-cycle reward-set-indexes (ok { cycle: first-cycle-locked, first-unlocked-cycle: cycle-to-unlock, stacker: user }))) + ;; Now that we've cleaned up all the reward set entries for the user, delete the user's stacking-state + (map-delete stacking-state { stacker: user }) + (ok true))) + +;; Add a PoX address to the `cycle-index`-th reward cycle, if `cycle-index` is between 0 and the given num-cycles (exclusive). +;; Arguments are given as a tuple, so this function can be (folded ..)'ed onto a list of its arguments. +;; Used by add-pox-addr-to-reward-cycles. +;; No checking is done. +;; The returned tuple is the same as inputted `params`, but the `i` field is incremented if +;; the pox-addr was added to the given cycle. Also, `reward-set-indexes` grows to include all +;; of the `reward-cycle-index` key parts of the `reward-cycle-pox-address-list` which get added by this function. +;; This way, the caller knows which items in a given reward cycle's PoX address list got updated. +(define-private (add-pox-addr-to-ith-reward-cycle (cycle-index uint) (params (tuple + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-set-indexes (list 12 uint)) + (first-reward-cycle uint) + (num-cycles uint) + (stacker (optional principal)) + (amount-ustx uint) + (i uint)))) + (let ((reward-cycle (+ (get first-reward-cycle params) (get i params))) + (num-cycles (get num-cycles params)) + (i (get i params)) + (reward-set-index (if (< i num-cycles) + (let ((total-ustx (get-total-ustx-stacked reward-cycle)) + (reward-index + ;; record how many uSTX this pox-addr will stack for in the given reward cycle + (append-reward-cycle-pox-addr + (get pox-addr params) + reward-cycle + (get amount-ustx params) + (get stacker params) + ))) + ;; update running total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: (+ (get amount-ustx params) total-ustx) }) + (some reward-index)) + none)) + (next-i (if (< i num-cycles) (+ i u1) i))) + { + pox-addr: (get pox-addr params), + first-reward-cycle: (get first-reward-cycle params), + num-cycles: num-cycles, + amount-ustx: (get amount-ustx params), + stacker: (get stacker params), + reward-set-indexes: (match + reward-set-index new (unwrap-panic (as-max-len? (append (get reward-set-indexes params) new) u12)) + (get reward-set-indexes params)), + i: next-i + })) + +;; Add a PoX address to a given sequence of reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-addr-to-reward-cycles (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint) + (stacker principal)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11)) + (results (fold add-pox-addr-to-ith-reward-cycle cycle-indexes + { pox-addr: pox-addr, first-reward-cycle: first-reward-cycle, num-cycles: num-cycles, + reward-set-indexes: (list), amount-ustx: amount-ustx, i: u0, stacker: (some stacker) })) + (reward-set-indexes (get reward-set-indexes results))) + ;; For safety, add up the number of times (add-principal-to-ith-reward-cycle) returns 1. + ;; It _should_ be equal to num-cycles. + (asserts! (is-eq num-cycles (get i results)) (err ERR_STACKING_UNREACHABLE)) + (asserts! (is-eq num-cycles (len reward-set-indexes)) (err ERR_STACKING_UNREACHABLE)) + (ok reward-set-indexes))) + +(define-private (add-pox-partial-stacked-to-ith-cycle + (cycle-index uint) + (params { pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + num-cycles: uint, + amount-ustx: uint })) + (let ((pox-addr (get pox-addr params)) + (num-cycles (get num-cycles params)) + (reward-cycle (get reward-cycle params)) + (amount-ustx (get amount-ustx params))) + (let ((current-amount + (default-to u0 + (get stacked-amount + (map-get? partial-stacked-by-cycle { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle }))))) + (if (>= cycle-index num-cycles) + ;; do not add to cycles >= cycle-index + false + ;; otherwise, add to the partial-stacked-by-cycle + (map-set partial-stacked-by-cycle + { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle } + { stacked-amount: (+ amount-ustx current-amount) })) + ;; produce the next params tuple + { pox-addr: pox-addr, + reward-cycle: (+ u1 reward-cycle), + num-cycles: num-cycles, + amount-ustx: amount-ustx }))) + +;; Add a PoX address to a given sequence of partial reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-partial-stacked (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11))) + (fold add-pox-partial-stacked-to-ith-cycle cycle-indexes + { pox-addr: pox-addr, reward-cycle: first-reward-cycle, num-cycles: num-cycles, amount-ustx: amount-ustx }) + true)) + +;; What is the minimum number of uSTX to be stacked in the given reward cycle? +;; Used internally by the Stacks node, and visible publicly. +(define-read-only (get-stacking-minimum) + (/ stx-liquid-supply STACKING_THRESHOLD_25)) + +;; Is the address mode valid for a PoX address? +(define-read-only (check-pox-addr-version (version (buff 1))) + (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION)) + +;; Is this buffer the right length for the given PoX address? +(define-read-only (check-pox-addr-hashbytes (version (buff 1)) (hashbytes (buff 32))) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_20) + (is-eq (len hashbytes) u20) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_32) + (is-eq (len hashbytes) u32) + false))) + +;; Is the given lock period valid? +(define-read-only (check-pox-lock-period (lock-period uint)) + (and (>= lock-period MIN_POX_REWARD_CYCLES) + (<= lock-period MAX_POX_REWARD_CYCLES))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (can-stack-stx (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; minimum uSTX must be met + (asserts! (<= (get-stacking-minimum) amount-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle num-cycles))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (minimal-can-stack-stx + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; amount must be valid + (asserts! (> amount-ustx u0) + (err ERR_STACKING_INVALID_AMOUNT)) + + ;; sender principal must not have rejected in this upcoming reward cycle + (asserts! (is-none (get-pox-rejection tx-sender first-reward-cycle)) + (err ERR_STACKING_ALREADY_REJECTED)) + + ;; lock period must be in acceptable range. + (asserts! (check-pox-lock-period num-cycles) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; address version must be valid + (asserts! (check-pox-addr-version (get version pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + ;; address hashbytes must be valid for the version + (asserts! (check-pox-addr-hashbytes (get version pox-addr) (get hashbytes pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + (ok true))) + +;; Revoke contract-caller authorization to call stacking methods +(define-public (disallow-contract-caller (caller principal)) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete allowance-contract-callers { sender: tx-sender, contract-caller: caller })))) + +;; Give a contract-caller authorization to call stacking methods +;; normally, stacking methods may only be invoked by _direct_ transactions +;; (i.e., the tx-sender issues a direct contract-call to the stacking methods) +;; by issuing an allowance, the tx-sender may call through the allowed contract +(define-public (allow-contract-caller (caller principal) (until-burn-ht (optional uint))) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-set allowance-contract-callers + { sender: tx-sender, contract-caller: caller } + { until-burn-ht: until-burn-ht })))) + +;; Lock up some uSTX for stacking! Note that the given amount here is in micro-STX (uSTX). +;; The STX will be locked for the given number of reward cycles (lock-period). +;; This is the self-service interface. tx-sender will be the Stacker. +;; +;; * The given stacker cannot currently be stacking. +;; * You will need the minimum uSTX threshold. This will be determined by (get-stacking-minimum) +;; at the time this method is called. +;; * You may need to increase the amount of uSTX locked up later, since the minimum uSTX threshold +;; may increase between reward cycles. +;; * The Stacker will receive rewards in the reward cycle following `start-burn-ht`. +;; Importantly, `start-burn-ht` may not be further into the future than the next reward cycle, +;; and in most cases should be set to the current burn block height. +;; +;; The tokens will unlock and be returned to the Stacker (tx-sender) automatically. +(define-public (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender principal must not be stacking + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance tx-sender) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked + (let ((reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-reward-cycle lock-period amount-ustx tx-sender)))) + ;; add stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: tx-sender, lock-amount: amount-ustx, unlock-burn-height: (reward-cycle-to-burn-height (+ first-reward-cycle lock-period)) })))) + +(define-public (revoke-delegate-stx) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete delegation-state { stacker: tx-sender })))) + +;; Delegate to `delegate-to` the ability to stack from a given address. +;; This method _does not_ lock the funds, rather, it allows the delegate +;; to issue the stacking lock. +;; The caller specifies: +;; * amount-ustx: the total amount of ustx the delegate may be allowed to lock +;; * until-burn-ht: an optional burn height at which this delegation expires +;; * pox-addr: an optional address to which any rewards *must* be sent +(define-public (delegate-stx (amount-ustx uint) + (delegate-to principal) + (until-burn-ht (optional uint)) + (pox-addr (optional { version: (buff 1), + hashbytes: (buff 32) }))) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; delegate-stx no longer requires the delegator to not currently + ;; be stacking. + ;; delegate-stack-* functions assert that + ;; 1. users can't swim in two pools at the same time. + ;; 2. users can't switch pools without cool down cycle. + ;; Other pool admins can't increase or extend. + ;; 3. users can't join a pool while already directly stacking. + + ;; pox-addr, if given, must be valid + (match pox-addr + address + (asserts! (check-pox-addr-version (get version address)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + true) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; add delegation record + (map-set delegation-state + { stacker: tx-sender } + { amount-ustx: amount-ustx, + delegated-to: delegate-to, + until-burn-ht: until-burn-ht, + pox-addr: pox-addr }) + + (ok true))) + +;; Commit partially stacked STX and allocate a new PoX reward address slot. +;; This allows a stacker/delegate to lock fewer STX than the minimal threshold in multiple transactions, +;; so long as: 1. The pox-addr is the same. +;; 2. This "commit" transaction is called _before_ the PoX anchor block. +;; This ensures that each entry in the reward set returned to the stacks-node is greater than the threshold, +;; but does not require it be all locked up within a single transaction +;; +;; Returns (ok uint) on success, where the given uint is the reward address's index in the list of reward +;; addresses allocated in this reward cycle. This index can then be passed to `stack-aggregation-increase` +;; to later increment the STX this PoX address represents, in amounts less than the stacking minimum. +;; +;; *New in Stacks 2.1.* +(define-private (inner-stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (let ((amount-ustx (get stacked-amount partial-stacked))) + (try! (can-stack-stx pox-addr amount-ustx reward-cycle u1)) + ;; Add the pox addr to the reward cycle, and extract the index of the PoX address + ;; so the delegator can later use it to call stack-aggregation-increase. + (let ((add-pox-addr-info + (add-pox-addr-to-ith-reward-cycle + u0 + { pox-addr: pox-addr, + first-reward-cycle: reward-cycle, + num-cycles: u1, + reward-set-indexes: (list), + stacker: none, + amount-ustx: amount-ustx, + i: u0 })) + (pox-addr-index (unwrap-panic + (element-at (get reward-set-indexes add-pox-addr-info) u0)))) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok pox-addr-index))))) + +;; Legacy interface for stack-aggregation-commit. +;; Wraps inner-stack-aggregation-commit. See its docstring for details. +;; Returns (ok true) on success +;; Returns (err ...) on failure. +(define-public (stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (match (inner-stack-aggregation-commit pox-addr reward-cycle) + pox-addr-index (ok true) + commit-err (err commit-err))) + +;; Public interface to `inner-stack-aggregation-commit`. See its documentation for details. +;; *New in Stacks 2.1.* +(define-public (stack-aggregation-commit-indexed (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (inner-stack-aggregation-commit pox-addr reward-cycle)) + +;; Commit partially stacked STX to a PoX address which has already received some STX (more than the Stacking min). +;; This allows a delegator to lock up marginally more STX from new delegates, even if they collectively do not +;; exceed the Stacking minimum, so long as the target PoX address already represents at least as many STX as the +;; Stacking minimum. +;; +;; The `reward-cycle-index` is emitted as a contract event from `stack-aggregation-commit` when the initial STX are +;; locked up by this delegator. It must be passed here to add more STX behind this PoX address. If the delegator +;; called `stack-aggregation-commit` multiple times for the same PoX address, then any such `reward-cycle-index` will +;; work here. +;; +;; *New in Stacks 2.1* +;; +(define-public (stack-aggregation-increase (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (reward-cycle-index uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; reward-cycle must be in the future + (asserts! (> reward-cycle (current-pox-reward-cycle)) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((amount-ustx (get stacked-amount partial-stacked)) + ;; reward-cycle must point to an existing record in reward-cycle-total-stacked + ;; infallible; getting something from partial-stacked-by-cycle succeeded so this must succeed + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + ;; reward-cycle and reward-cycle-index must point to an existing record in reward-cycle-pox-address-list + (existing-entry (unwrap! (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }) + (err ERR_DELEGATION_NO_REWARD_SLOT))) + (increased-ustx (+ (get total-ustx existing-entry) amount-ustx)) + (total-ustx (+ (get total-ustx existing-total) amount-ustx))) + + ;; must be stackable + (try! (minimal-can-stack-stx pox-addr total-ustx reward-cycle u1)) + + ;; new total must exceed the stacking minimum + (asserts! (<= (get-stacking-minimum) total-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + ;; there must *not* be a stacker entry (since this is a delegator) + (asserts! (is-none (get stacker existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; the given PoX address must match the one on record + (asserts! (is-eq pox-addr (get pox-addr existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; update the pox-address list -- bump the total-ustx + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: pox-addr, + total-ustx: increased-ustx, + stacker: none }) + + ;; update the total ustx in this cycle + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok true)))) + +;; As a delegate, stack the given principal's STX using partial-stacked-by-cycle +;; Once the delegate has stacked > minimum, the delegate should call stack-aggregation-commit +(define-public (delegate-stack-stx (stacker principal) + (amount-ustx uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht))) + (unlock-burn-height (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 lock-period)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + unlock-burn-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; stacker principal must not be stacking + (asserts! (is-none (get-stacker-info stacker)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance stacker) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-reward-cycle lock-period amount-ustx) + + ;; add stacker record + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + first-reward-cycle: first-reward-cycle, + reward-set-indexes: (list), + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + lock-amount: amount-ustx, + unlock-burn-height: unlock-burn-height }))) + +;; Reject Stacking for this reward cycle. +;; tx-sender votes all its uSTX for rejection. +;; Note that unlike PoX, rejecting PoX does not lock the tx-sender's +;; tokens. PoX rejection acts like a coin vote. +(define-public (reject-pox) + (let ( + (balance (stx-get-balance tx-sender)) + (vote-reward-cycle (+ u1 (current-pox-reward-cycle))) + ) + + ;; tx-sender principal must not have rejected in this upcoming reward cycle + (asserts! (is-none (get-pox-rejection tx-sender vote-reward-cycle)) + (err ERR_STACKING_ALREADY_REJECTED)) + + ;; tx-sender can't be a stacker + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; vote for rejection + (map-set stacking-rejection + { reward-cycle: vote-reward-cycle } + { amount: (+ (next-cycle-rejection-votes) balance) } + ) + + ;; mark voted + (map-set stacking-rejectors + { stacker: tx-sender, reward-cycle: vote-reward-cycle } + { amount: balance } + ) + + (ok true)) +) + +;; Used for PoX parameters discovery +(define-read-only (get-pox-info) + (ok { + min-amount-ustx: (get-stacking-minimum), + reward-cycle-id: (current-pox-reward-cycle), + prepare-cycle-length: (var-get pox-prepare-cycle-length), + first-burnchain-block-height: (var-get first-burnchain-block-height), + reward-cycle-length: (var-get pox-reward-cycle-length), + rejection-fraction: (var-get pox-rejection-fraction), + current-rejection-votes: (next-cycle-rejection-votes), + total-liquid-supply-ustx: stx-liquid-supply, + }) +) + +;; Update the number of stacked STX in a given reward cycle entry. +;; `reward-cycle-index` is the index into the `reward-cycle-pox-address-list` map for a given reward cycle number. +;; `updates`, if `(some ..)`, encodes which PoX reward cycle entry (if any) gets updated. In particular, it must have +;; `(some stacker)` as the listed stacker, and must be an upcoming reward cycle. +(define-private (increase-reward-cycle-entry + (reward-cycle-index uint) + (updates (optional { first-cycle: uint, reward-cycle: uint, stacker: principal, add-amount: uint }))) + (let ((data (try! updates)) + (first-cycle (get first-cycle data)) + (reward-cycle (get reward-cycle data))) + (if (> first-cycle reward-cycle) + ;; not at first cycle to process yet + (some { first-cycle: first-cycle, reward-cycle: (+ u1 reward-cycle), stacker: (get stacker data), add-amount: (get add-amount data) }) + (let ((existing-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }))) + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + (add-amount (get add-amount data)) + (total-ustx (+ (get total-ustx existing-total) add-amount))) + ;; stacker must match + (asserts! (is-eq (get stacker existing-entry) (some (get stacker data))) none) + ;; update the pox-address list + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: (get pox-addr existing-entry), + ;; This addresses the bug in pox-2 (see SIP-022) + total-ustx: (+ (get total-ustx existing-entry) add-amount), + stacker: (some (get stacker data)) }) + ;; update the total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + (some { first-cycle: first-cycle, + reward-cycle: (+ u1 reward-cycle), + stacker: (get stacker data), + add-amount: (get add-amount data) }))))) + +;; Increase the number of STX locked. +;; *New in Stacks 2.1* +;; This method locks up an additional amount of STX from `tx-sender`'s, indicated +;; by `increase-by`. The `tx-sender` must already be Stacking. +(define-public (stack-increase (increase-by uint)) + (let ((stacker-info (stx-account tx-sender)) + (amount-stacked (get locked stacker-info)) + (amount-unlocked (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + (first-increased-cycle (+ cur-cycle u1)) + (stacker-state (unwrap! (map-get? stacking-state + { stacker: tx-sender }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + ;; tx-sender must be currently locked + (asserts! (> amount-stacked u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + ;; stacker must have enough stx to lock + (asserts! (>= amount-unlocked increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + ;; update reward cycle amounts + (asserts! (is-some (fold increase-reward-cycle-entry + (get reward-set-indexes stacker-state) + (some { first-cycle: first-increased-cycle, + reward-cycle: (get first-reward-cycle stacker-state), + stacker: tx-sender, + add-amount: increase-by }))) + (err ERR_STACKING_UNREACHABLE)) + ;; NOTE: stacking-state map is unchanged: it does not track amount-stacked in PoX-3 + (ok { stacker: tx-sender, total-locked: (+ amount-stacked increase-by)}))) + +;; Extend an active Stacking lock. +;; *New in Stacks 2.1* +;; This method extends the `tx-sender`'s current lockup for an additional `extend-count` +;; and associates `pox-addr` with the rewards +(define-public (stack-extend (extend-count uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) })) + (let ((stacker-info (stx-account tx-sender)) + ;; to extend, there must already be an etry in the stacking-state + (stacker-state (unwrap! (get-stacker-info tx-sender) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + + ;; TODO: add more assertions to sanity check the `stacker-info` values with + ;; the `stacker-state` values + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender must be locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; standard can-stack-stx checks + (try! (can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked + ;; for the new cycles + (let ((extended-reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-extend-cycle extend-count amount-ustx tx-sender))) + (reward-set-indexes + ;; use the active stacker state and extend the existing reward-set-indexes + (let ((cur-cycle-index (- first-reward-cycle (get first-reward-cycle stacker-state))) + (old-indexes (get reward-set-indexes stacker-state)) + ;; build index list by taking the old-indexes starting from cur cycle + ;; and adding the new indexes to it. this way, the index is valid starting from the current cycle + (new-list (concat (default-to (list) (slice? old-indexes cur-cycle-index (len old-indexes))) + extended-reward-set-indexes))) + (unwrap-panic (as-max-len? new-list u12))))) + ;; update stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return lock-up information + (ok { stacker: tx-sender, unlock-burn-height: new-unlock-ht }))))) + +;; As a delegator, increase an active Stacking lock, issuing a "partial commitment" for the +;; increased cycles. +;; *New in Stacks 2.1* +;; This method increases `stacker`'s current lockup and partially commits the additional +;; STX to `pox-addr` +(define-public (delegate-stack-increase + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (increase-by uint)) + (let ((stacker-info (stx-account stacker)) + (existing-lock (get locked stacker-info)) + (available-stx (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info))) + + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + + (let ((unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + (first-increase-cycle (+ cur-cycle u1)) + (last-increase-cycle (- unlock-in-cycle u1)) + (cycle-count (try! (if (<= first-increase-cycle last-increase-cycle) + (ok (+ u1 (- last-increase-cycle first-increase-cycle))) + (err ERR_STACKING_INVALID_LOCK_PERIOD)))) + (new-total-locked (+ increase-by existing-lock)) + (stacker-state + (unwrap! (map-get? stacking-state { stacker: stacker }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must be currently locked + (asserts! (> existing-lock u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + + ;; stacker must have enough stx to lock + (asserts! (>= available-stx increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED))) + (delegated-to (get delegated-to delegation-info)) + (delegated-amount (get amount-ustx delegation-info)) + (delegated-pox-addr (get pox-addr delegation-info)) + (delegated-until (get until-burn-ht delegation-info))) + ;; must have delegated to tx-sender + (asserts! (is-eq delegated-to tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= delegated-amount new-total-locked) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match delegated-pox-addr + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match delegated-until + until-burn-ht + (>= until-burn-ht unlock-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr new-total-locked first-increase-cycle (+ u1 (- last-increase-cycle first-increase-cycle)))) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-increase-cycle cycle-count increase-by) + + ;; stacking-state is unchanged, so no need to update + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, total-locked: new-total-locked})))) + +;; As a delegator, extend an active stacking lock, issuing a "partial commitment" for the +;; extended-to cycles. +;; *New in Stacks 2.1* +;; This method extends `stacker`'s current lockup for an additional `extend-count` +;; and partially commits those new cycles to `pox-addr` +(define-public (delegate-stack-extend + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (extend-count uint)) + (let ((stacker-info (stx-account stacker)) + ;; to extend, there must already be an entry in the stacking-state + (stacker-state (unwrap! (get-stacker-info stacker) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; check valid lock period + (asserts! (check-pox-lock-period lock-period) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be currently locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + new-unlock-ht) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-extend-cycle extend-count amount-ustx) + + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + reward-set-indexes: (list), + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + unlock-burn-height: new-unlock-ht })))) + +;; Get the _current_ PoX stacking delegation information for a stacker. If the information +;; is expired, or if there's never been such a stacker, then returns none. +;; *New in Stacks 2.1* +(define-read-only (get-delegation-info (stacker principal)) + (get-check-delegation stacker) +) + +;; Get the burn height at which a particular contract is allowed to stack for a particular principal. +;; *New in Stacks 2.1* +;; Returns (some (some X)) if X is the burn height at which the allowance terminates +;; Returns (some none) if the caller is allowed indefinitely +;; Returns none if there is no allowance record +(define-read-only (get-allowance-contract-callers (sender principal) (calling-contract principal)) + (map-get? allowance-contract-callers { sender: sender, contract-caller: calling-contract }) +) + +;; How many PoX addresses in this reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-num-reward-set-pox-addresses (reward-cycle uint)) + (match (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle }) + num-addrs + (get len num-addrs) + u0 + ) +) + +;; How many uSTX have been locked up for this address so far, before the delegator commits them? +;; *New in Stacks 2.1* +(define-read-only (get-partial-stacked-by-cycle (pox-addr { version: (buff 1), hashbytes: (buff 32) }) (reward-cycle uint) (sender principal)) + (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, reward-cycle: reward-cycle, sender: sender }) +) + +;; How many uSTX have voted to reject PoX in a given reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-total-pox-rejection (reward-cycle uint)) + (match (map-get? stacking-rejection { reward-cycle: reward-cycle }) + rejected + (get amount rejected) + u0 + ) +) diff --git a/src/chainstate/stacks/boot/pox_2_tests.rs b/src/chainstate/stacks/boot/pox_2_tests.rs index d1adc2a52d..242cb18de8 100644 --- a/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/src/chainstate/stacks/boot/pox_2_tests.rs @@ -2,6 +2,25 @@ use std::collections::{HashMap, HashSet, VecDeque}; use std::convert::TryFrom; use std::convert::TryInto; +use crate::address::AddressHashMode; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::burn::ConsensusHash; +use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; +use crate::chainstate::stacks::boot::{ + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_2_NAME, + POX_3_NAME, +}; +use crate::chainstate::stacks::db::{ + MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, +}; +use crate::chainstate::stacks::index::marf::MarfConnection; +use crate::chainstate::stacks::index::MarfTrieId; +use crate::chainstate::stacks::*; +use crate::clarity_vm::database::marf::MarfedKV; +use crate::clarity_vm::database::HeadersDBConn; +use crate::core::*; +use crate::util_lib::db::{DBConn, FromRow}; +use crate::vm::events::StacksTransactionEvent; use clarity::types::Address; use clarity::vm::clarity::ClarityConnection; use clarity::vm::contexts::OwnedEnvironment; @@ -29,28 +48,12 @@ use stacks_common::util::hash::to_hex; use stacks_common::util::hash::{Sha256Sum, Sha512Trunc256Sum}; use super::{test::*, RawRewardSetEntry}; -use crate::address::AddressHashMode; use crate::chainstate::burn::operations::*; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::burn::ConsensusHash; -use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; -use crate::chainstate::stacks::boot::{ - BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, -}; -use crate::chainstate::stacks::db::{ - MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, -}; -use crate::chainstate::stacks::index::marf::MarfConnection; -use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::*; use crate::clarity_vm::clarity::Error as ClarityError; -use crate::clarity_vm::database::marf::MarfedKV; -use crate::clarity_vm::database::HeadersDBConn; use crate::core::*; use crate::net::test::TestPeer; use crate::util_lib::boot::boot_code_id; -use crate::util_lib::db::{DBConn, FromRow}; -use crate::vm::events::StacksTransactionEvent; use crate::{ burnchains::Burnchain, chainstate::{ @@ -95,35 +98,50 @@ pub fn get_reward_set_entries_index_order_at( }) } -/// Get the STXBalance for `account` at the given chaintip +/// Get the canonicalized STXBalance for `account` at the given chaintip pub fn get_stx_account_at( peer: &mut TestPeer, tip: &StacksBlockId, account: &PrincipalData, ) -> STXBalance { - with_clarity_db_ro(peer, tip, |db| db.get_account_stx_balance(account)) + with_clarity_db_ro(peer, tip, |db| { + db.get_stx_balance_snapshot(account) + .canonical_balance_repr() + }) } -/// Get the STXBalance for `account` at the given chaintip -pub fn get_stacking_state_pox_2( +/// get the stacking-state entry for an account at the chaintip +pub fn get_stacking_state_pox( peer: &mut TestPeer, tip: &StacksBlockId, account: &PrincipalData, + pox_contract: &str, ) -> Option { with_clarity_db_ro(peer, tip, |db| { let lookup_tuple = Value::Tuple( TupleData::from_data(vec![("stacker".into(), account.clone().into())]).unwrap(), ); + let epoch = db.get_clarity_epoch_version(); db.fetch_entry_unknown_descriptor( - &boot_code_id(boot::POX_2_NAME, false), + &boot_code_id(pox_contract, false), "stacking-state", &lookup_tuple, + &epoch, ) .unwrap() .expect_optional() }) } +/// Get the pox-2 stacking-state entry for `account` at the given chaintip +pub fn get_stacking_state_pox_2( + peer: &mut TestPeer, + tip: &StacksBlockId, + account: &PrincipalData, +) -> Option { + get_stacking_state_pox(peer, tip, account, boot::POX_2_NAME) +} + /// Perform `check_stacker_link_invariants` on cycles [first_cycle_number, max_cycle_number] pub fn check_all_stacker_link_invariants( peer: &mut TestPeer, @@ -142,11 +160,27 @@ pub fn check_all_stacker_link_invariants( info!("Invoked check all"; "tip" => %tip, "first" => first_cycle_number, "last" => max_cycle_number); for cycle in first_cycle_number..(max_cycle_number + 1) { + // check if it makes sense to test invariants yet. + // For cycles where PoX-3 is active, check if Epoch24 has activated first. + let active_pox_contract = peer + .config + .burnchain + .pox_constants + .active_pox_contract(peer.config.burnchain.reward_cycle_to_block_height(cycle)); + if active_pox_contract == POX_3_NAME && epoch < StacksEpochId::Epoch24 { + info!( + "Skipping check on a PoX-3 reward cycle because Epoch24 has not started yet"; + "cycle" => cycle, + "epoch" => %epoch, + "active_pox_contract" => %active_pox_contract, + ); + continue; + } + check_stacker_link_invariants(peer, tip, cycle); } } -#[cfg(test)] pub fn generate_pox_clarity_value(str_hash: &str) -> Value { let byte_vec = hex_bytes(str_hash).unwrap(); let pox_addr_tuple = TupleData::from_data(vec![ @@ -158,19 +192,17 @@ pub fn generate_pox_clarity_value(str_hash: &str) -> Value { Value::Tuple(pox_addr_tuple) } -#[cfg(test)] -struct PoxPrintFields { - op_name: String, - stacker: Value, - balance: Value, - locked: Value, - burnchain_unlock_height: Value, +pub struct PoxPrintFields { + pub op_name: String, + pub stacker: Value, + pub balance: Value, + pub locked: Value, + pub burnchain_unlock_height: Value, } -#[cfg(test)] // This function takes in a StacksTransactionEvent for a print statement from a pox function that modifies // a stacker's state. It verifies that the values in the print statement are as expected. -fn check_pox_print_event( +pub fn check_pox_print_event( event: &StacksTransactionEvent, common_data: PoxPrintFields, op_data: HashMap<&str, Value>, @@ -258,11 +290,11 @@ fn check_pox_print_event( } pub struct StackingStateCheckData { - pox_addr: PoxAddress, + pub pox_addr: PoxAddress, /// this is a map from reward cycle number to the value in reward-set-indexes - cycle_indexes: HashMap, - first_cycle: u128, - lock_period: u128, + pub cycle_indexes: HashMap, + pub first_cycle: u128, + pub lock_period: u128, } /// Check the stacking-state invariants of `stacker` @@ -272,14 +304,26 @@ pub fn check_stacking_state_invariants( tip: &StacksBlockId, stacker: &PrincipalData, expect_indexes: bool, + active_pox_contract: &str, ) -> StackingStateCheckData { let account_state = with_clarity_db_ro(peer, tip, |db| { db.get_stx_balance_snapshot(stacker) .canonical_balance_repr() }); - let stacking_state_entry = get_stacking_state_pox_2(peer, tip, stacker) - .expect("Invariant violated: reward-cycle entry has stacker field set, but not present in stacker-state") + let tip_burn_height = StacksChainState::get_stacks_block_header_info_by_index_block_hash( + peer.chainstate().db(), + tip, + ) + .unwrap() + .unwrap() + .burn_header_height; + + let stacking_state_entry = get_stacking_state_pox(peer, tip, stacker, active_pox_contract) + .expect(&format!( + "Invariant violated: reward-cycle entry has stacker field set, but not present in stacker-state (pox_contract = {})", + active_pox_contract, + )) .expect_tuple(); let first_cycle = stacking_state_entry .get("first-reward-cycle") @@ -310,7 +354,9 @@ pub fn check_stacking_state_invariants( assert_eq!( account_state.unlock_height() + 1, stacking_state_unlock_ht, - "Invariant violated: stacking-state and account state have different unlock heights" + "Invariant violated: stacking-state and account state have different unlock heights. Tip height = {}, PoX Contract: {}", + tip_burn_height, + active_pox_contract, ); let mut cycle_indexes = HashMap::new(); @@ -334,10 +380,12 @@ pub fn check_stacking_state_invariants( .unwrap(), ); let entry_value = with_clarity_db_ro(peer, tip, |db| { + let epoch = db.get_clarity_epoch_version(); db.fetch_entry_unknown_descriptor( - &boot_code_id(boot::POX_2_NAME, false), + &boot_code_id(active_pox_contract, false), "reward-cycle-pox-address-list", - &entry_key + &entry_key, + &epoch, ) .unwrap() .expect_optional() @@ -400,11 +448,47 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c .config .burnchain .reward_cycle_to_block_height(cycle_number); + + let tip_epoch = SortitionDB::get_stacks_epoch(peer.sortdb().conn(), current_burn_height as u64) + .unwrap() + .unwrap(); + + let cycle_start_epoch = SortitionDB::get_stacks_epoch(peer.sortdb().conn(), cycle_start) + .unwrap() + .unwrap(); + + let active_pox_contract = peer.config.burnchain.pox_constants.active_pox_contract( + peer.config + .burnchain + .reward_cycle_to_block_height(cycle_number), + ); + + if cycle_start_epoch.epoch_id == StacksEpochId::Epoch22 + || cycle_start_epoch.epoch_id == StacksEpochId::Epoch23 + { + info!( + "Skipping reward set validation checks on reward cycles that start in Epoch 2.2 or Epoch 2.3"; + "cycle" => cycle_number, + ); + return; + } + + if cycle_start_epoch.epoch_id == StacksEpochId::Epoch24 && active_pox_contract != POX_3_NAME { + info!( + "Skipping validation of reward set that started in Epoch24, but its cycle starts before pox-3 activation"; + "cycle" => cycle_number, + "cycle_start" => cycle_start, + "pox_3_activation" => peer.config.burnchain.pox_constants.pox_3_activation_height, + "epoch_2_4_start" => cycle_start_epoch.start_height, + ); + return; + } + let reward_set_entries = get_reward_set_entries_index_order_at(peer, tip, cycle_start); let mut checked_total = 0; for (actual_index, entry) in reward_set_entries.iter().enumerate() { debug!( - "Cycle {}: Check {:?} (stacked={}, stacker={})", + "Cycle {}: Check {:?} (stacked={}, stacker={}, tip_epoch={})", cycle_number, &entry.reward_address, entry.amount_stacked, @@ -412,11 +496,12 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c .stacker .as_ref() .map(|s| format!("{}", &s)) - .unwrap_or("(none)".to_string()) + .unwrap_or("(none)".to_string()), + &tip_epoch.epoch_id, ); checked_total += entry.amount_stacked; if let Some(stacker) = &entry.stacker { - if tip_cycle > cycle_start { + if tip_cycle > cycle_number { // if the checked cycle is before the tip's cycle, // the reward-set-entrie's stacker links are no longer necessarily valid // (because the reward cycles for those entries has passed) @@ -424,14 +509,36 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c continue; } + if tip_epoch.epoch_id == StacksEpochId::Epoch22 + || tip_epoch.epoch_id == StacksEpochId::Epoch23 + { + // if the current tip is epoch-2.2 or epoch-2.3, the stacker invariant checks + // no longer make sense: the stacker has unlocked, even though a reward cycle + // is still active (i.e., the last active cycle from epoch-2.1). + continue; + } + + if tip_epoch.epoch_id >= StacksEpochId::Epoch24 + && current_burn_height + <= peer.config.burnchain.pox_constants.pox_3_activation_height + { + // if the tip is epoch-2.4, and pox-3 isn't the active pox contract yet, + // the invariant checks will not make sense for the same reasons as above + continue; + } + let StackingStateCheckData { pox_addr, cycle_indexes, .. - } = check_stacking_state_invariants(peer, tip, stacker, true); + } = check_stacking_state_invariants(peer, tip, stacker, true, active_pox_contract); assert_eq!(&entry.reward_address, &pox_addr, "Invariant violated: reward-cycle entry has a different PoX addr than in stacker-state"); - assert_eq!(cycle_indexes.get(&(cycle_number as u128)).cloned().unwrap(), actual_index as u128, "Invariant violated: stacking-state.reward-set-indexes entry at cycle_number must point to this stacker's entry"); + assert_eq!( + cycle_indexes.get(&(cycle_number as u128)).cloned().unwrap(), + actual_index as u128, + "Invariant violated: stacking-state.reward-set-indexes entry at cycle_number must point to this stacker's entry" + ); } } let expected_total = get_reward_cycle_total(peer, tip, cycle_number); @@ -444,6 +551,12 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c /// Get the `cycle_number`'s total stacked amount at the given chaintip pub fn get_reward_cycle_total(peer: &mut TestPeer, tip: &StacksBlockId, cycle_number: u64) -> u128 { + let active_pox_contract = peer.config.burnchain.pox_constants.active_pox_contract( + peer.config + .burnchain + .reward_cycle_to_block_height(cycle_number), + ); + with_clarity_db_ro(peer, tip, |db| { let total_stacked_key = TupleData::from_data(vec![( "reward-cycle".into(), @@ -451,10 +564,12 @@ pub fn get_reward_cycle_total(peer: &mut TestPeer, tip: &StacksBlockId, cycle_nu )]) .unwrap() .into(); + let epoch = db.get_clarity_epoch_version(); db.fetch_entry_unknown_descriptor( - &boot_code_id(boot::POX_2_NAME, false), + &boot_code_id(active_pox_contract, false), "reward-cycle-total-stacked", &total_stacked_key, + &epoch, ) .map(|v| { v.expect_optional() @@ -479,6 +594,7 @@ pub fn get_partial_stacked( pox_addr: &Value, cycle_number: u64, sender: &PrincipalData, + pox_contract: &str, ) -> u128 { with_clarity_db_ro(peer, tip, |db| { let key = TupleData::from_data(vec![ @@ -488,10 +604,12 @@ pub fn get_partial_stacked( ]) .unwrap() .into(); + let epoch = db.get_clarity_epoch_version(); db.fetch_entry_unknown_descriptor( - &boot_code_id(boot::POX_2_NAME, false), + &boot_code_id(pox_contract, false), "partial-stacked-by-cycle", &key, + &epoch, ) .map(|v| { v.expect_optional() @@ -1115,7 +1233,11 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), ) - .canonical_repr_at_block(height_target + 1, burnchain.pox_constants.v1_unlock_height); + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); while get_tip(peer.sortdb.as_ref()).block_height < height_target { @@ -1140,7 +1262,11 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), ) - .canonical_repr_at_block(height_target + 1, burnchain.pox_constants.v1_unlock_height); + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); assert_eq!(bob_bal.amount_locked(), 0); // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block @@ -1149,7 +1275,11 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { &latest_block, &key_to_stacks_addr(&bob).to_account_principal(), ) - .canonical_repr_at_block(height_target + 1, burnchain.pox_constants.v1_unlock_height); + .canonical_repr_at_block( + height_target + 1, + burnchain.pox_constants.v1_unlock_height, + burnchain.pox_constants.v2_unlock_height, + ); assert_eq!(bob_bal.amount_locked(), 0); // check that the total reward cycle amounts have decremented correctly @@ -1383,6 +1513,7 @@ fn delegate_stack_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } @@ -1407,6 +1538,7 @@ fn delegate_stack_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } @@ -1488,6 +1620,7 @@ fn delegate_stack_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, alice_first_lock_amount); } @@ -1499,6 +1632,7 @@ fn delegate_stack_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, alice_delegation_amount,); } @@ -2879,12 +3013,24 @@ fn test_delegate_extend_transition_pox_2() { first_cycle: alice_first_cycle, lock_period: alice_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &alice_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &alice_principal, + false, + POX_2_NAME, + ); let StackingStateCheckData { first_cycle: bob_first_cycle, lock_period: bob_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &bob_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &bob_principal, + false, + POX_2_NAME, + ); assert_eq!( alice_first_cycle as u64, first_v2_cycle, @@ -2932,12 +3078,24 @@ fn test_delegate_extend_transition_pox_2() { first_cycle: alice_first_cycle, lock_period: alice_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &alice_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &alice_principal, + false, + POX_2_NAME, + ); let StackingStateCheckData { first_cycle: bob_first_cycle, lock_period: bob_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &bob_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &bob_principal, + false, + POX_2_NAME, + ); assert_eq!( alice_first_cycle as u64, first_v2_cycle, @@ -2988,12 +3146,24 @@ fn test_delegate_extend_transition_pox_2() { first_cycle: alice_first_cycle, lock_period: alice_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &alice_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &alice_principal, + false, + POX_2_NAME, + ); let StackingStateCheckData { first_cycle: bob_first_cycle, lock_period: bob_lock_period, .. - } = check_stacking_state_invariants(&mut peer, &tip_index_block, &bob_principal, false); + } = check_stacking_state_invariants( + &mut peer, + &tip_index_block, + &bob_principal, + false, + POX_2_NAME, + ); assert_eq!( alice_first_cycle as u64, first_v2_cycle, @@ -4376,6 +4546,7 @@ fn stack_aggregation_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } @@ -4402,6 +4573,7 @@ fn stack_aggregation_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); } @@ -4475,6 +4647,7 @@ fn stack_aggregation_increase() { &bob_pox_addr, cur_reward_cycle + 1, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, 1); @@ -4486,6 +4659,7 @@ fn stack_aggregation_increase() { &bob_pox_addr, cycle_number, &bob_principal, + POX_2_NAME, ); assert_eq!(partial_stacked, alice_delegation_amount); } diff --git a/src/chainstate/stacks/boot/pox_3_tests.rs b/src/chainstate/stacks/boot/pox_3_tests.rs new file mode 100644 index 0000000000..adefc7ee01 --- /dev/null +++ b/src/chainstate/stacks/boot/pox_3_tests.rs @@ -0,0 +1,4412 @@ +use std::collections::{HashMap, HashSet, VecDeque}; +use std::convert::TryFrom; +use std::convert::TryInto; + +use crate::address::AddressHashMode; +use crate::burnchains::PoxConstants; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::burn::ConsensusHash; +use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; +use crate::chainstate::stacks::boot::pox_2_tests::{ + check_pox_print_event, check_stacking_state_invariants, generate_pox_clarity_value, + get_partial_stacked, get_reward_cycle_total, get_reward_set_entries_at, get_stacking_state_pox, + get_stacking_state_pox_2, get_stx_account_at, PoxPrintFields, StackingStateCheckData, +}; +use crate::chainstate::stacks::boot::{ + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_2_NAME, + POX_3_NAME, +}; +use crate::chainstate::stacks::db::{ + MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, +}; +use crate::chainstate::stacks::index::marf::MarfConnection; +use crate::chainstate::stacks::index::MarfTrieId; +use crate::chainstate::stacks::*; +use crate::clarity_vm::database::marf::MarfedKV; +use crate::clarity_vm::database::HeadersDBConn; +use crate::core::*; +use crate::util_lib::db::{DBConn, FromRow}; +use crate::vm::events::StacksTransactionEvent; +use clarity::types::Address; +use clarity::vm::contexts::OwnedEnvironment; +use clarity::vm::contracts::Contract; +use clarity::vm::costs::CostOverflowingMath; +use clarity::vm::database::*; +use clarity::vm::errors::{ + CheckErrors, Error, IncomparableError, InterpreterError, InterpreterResult, RuntimeErrorType, +}; +use clarity::vm::eval; +use clarity::vm::representations::SymbolicExpression; +use clarity::vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; +use clarity::vm::types::Value::Response; +use clarity::vm::types::{ + BuffData, OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, SequenceData, + StacksAddressExtensions, StandardPrincipalData, TupleData, TupleTypeSignature, TypeSignature, + Value, NONE, +}; +use stacks_common::util::hash::hex_bytes; +use stacks_common::util::hash::to_hex; +use stacks_common::util::hash::{Sha256Sum, Sha512Trunc256Sum}; + +use crate::net::test::TestPeer; +use crate::util_lib::boot::boot_code_id; +use crate::{ + burnchains::Burnchain, + chainstate::{ + burn::db::sortdb::SortitionDB, + stacks::{events::TransactionOrigin, tests::make_coinbase}, + }, + clarity_vm::{clarity::ClarityBlockConnection, database::marf::WritableMarfStore}, + net::test::TestEventObserver, +}; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, VRFSeed, +}; + +use super::{test::*, RawRewardSetEntry}; +use crate::clarity_vm::clarity::Error as ClarityError; + +use crate::chainstate::burn::operations::*; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; + +const USTX_PER_HOLDER: u128 = 1_000_000; + +/// Return the BlockSnapshot for the latest sortition in the provided +/// SortitionDB option-reference. Panics on any errors. +fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { + SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() +} + +fn make_test_epochs_pox() -> (Vec, PoxConstants) { + let EMPTY_SORTITIONS = 25; + let EPOCH_2_1_HEIGHT = 11; // 36 + let EPOCH_2_2_HEIGHT = EPOCH_2_1_HEIGHT + 14; // 50 + let EPOCH_2_3_HEIGHT = EPOCH_2_2_HEIGHT + 2; // 52 + // epoch-2.4 will start at the first block of cycle 11! + // this means that cycle 11 should also be treated like a "burn" + let EPOCH_2_4_HEIGHT = EPOCH_2_2_HEIGHT + 6; // 56 + + // cycle 11 = 60 + + let epochs = vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: EMPTY_SORTITIONS + EPOCH_2_1_HEIGHT, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: EMPTY_SORTITIONS + EPOCH_2_1_HEIGHT, + end_height: EMPTY_SORTITIONS + EPOCH_2_2_HEIGHT, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: EMPTY_SORTITIONS + EPOCH_2_2_HEIGHT, + end_height: EMPTY_SORTITIONS + EPOCH_2_3_HEIGHT, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: EMPTY_SORTITIONS + EPOCH_2_3_HEIGHT, + end_height: EMPTY_SORTITIONS + EPOCH_2_4_HEIGHT, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: EMPTY_SORTITIONS + EPOCH_2_4_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + ]; + + let mut pox_constants = PoxConstants::mainnet_default(); + pox_constants.reward_cycle_length = 5; + pox_constants.prepare_length = 2; + pox_constants.anchor_threshold = 1; + pox_constants.v1_unlock_height = (EMPTY_SORTITIONS + EPOCH_2_1_HEIGHT + 1) as u32; + pox_constants.v2_unlock_height = (EMPTY_SORTITIONS + EPOCH_2_2_HEIGHT + 1) as u32; + pox_constants.pox_3_activation_height = (EMPTY_SORTITIONS + EPOCH_2_4_HEIGHT + 1) as u32; + + (epochs, pox_constants) +} + +/// In this test case, two Stackers, Alice and Bob stack and interact with the +/// PoX v1 contract and PoX v2 contract across the epoch transition and then +/// again with the PoX v3 contract. +/// +/// Alice: stacks via PoX v1 for 4 cycles. The third of these cycles occurs after +/// the PoX v1 -> v2 transition, and so Alice gets "early unlocked". +/// After the early unlock, Alice re-stacks in PoX v2 +/// Bob: stacks via PoX v2 for 6 cycles. He attempted to stack via PoX v1 as well, +/// but is forbidden because he has already placed an account lock via PoX v2. +/// +/// After the PoX-3 contract is instantiated, Alice and Bob both stack via PoX v3. +/// +#[test] +fn simple_pox_lockup_transition_pox_2() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + eprintln!("First v2 cycle = {}", first_v2_cycle); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7104, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 20)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let charlie = keys.pop().unwrap(); + + let EXPECTED_ALICE_FIRST_REWARD_CYCLE = 6; + + let mut coinbase_nonce = 0; + + // our "tenure counter" is now at 0 + let tip = get_tip(peer.sortdb.as_ref()); + assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); + + // first tenure is empty + peer.tenure_with_txs(&[], &mut coinbase_nonce); + + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + + let alice_account = get_account(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!( + alice_account.stx_balance.amount_unlocked(), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + assert_eq!(alice_account.stx_balance.amount_locked(), 0); + assert_eq!(alice_account.stx_balance.unlock_height(), 0); + + // next tenure include Alice's lockup + let tip = get_tip(peer.sortdb.as_ref()); + let alice_lockup = make_pox_lockup( + &alice, + 0, + 1024 * POX_THRESHOLD_STEPS_USTX, + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + 4, + tip.block_height, + ); + + // our "tenure counter" is now at 1 + assert_eq!(tip.block_height, 1 + EMPTY_SORTITIONS as u64); + + let tip_index_block = peer.tenure_with_txs(&[alice_lockup], &mut coinbase_nonce); + + // check the stacking minimum + let total_liquid_ustx = get_liquid_ustx(&mut peer); + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + chainstate.get_stacking_minimum(sortdb, &tip_index_block) + }) + .unwrap(); + assert_eq!( + min_ustx, + total_liquid_ustx / POX_TESTNET_STACKING_THRESHOLD_25 + ); + + // no reward addresses + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) + }) + .unwrap(); + assert_eq!(reward_addrs.len(), 0); + + // check the first reward cycle when Alice's tokens get stacked + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); + let alice_first_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap() as u128; + + assert_eq!(alice_first_reward_cycle, EXPECTED_ALICE_FIRST_REWARD_CYCLE); + + // alice locked, so balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + + // produce blocks until immediately before the 2.1 epoch switch + while get_tip(peer.sortdb.as_ref()).block_height < epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + + // alice is still locked, balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + } + + // Have Charlie try to use the PoX2 contract. This transaction + // should be accepted (checked via the tx receipt). Also, importantly, + // the cost tracker should assign costs to Charlie's transaction. + // This is also checked by the transaction receipt. + let tip = get_tip(peer.sortdb.as_ref()); + + let test = make_pox_2_contract_call( + &charlie, + 0, + "delegate-stx", + vec![ + Value::UInt(1_000_000), + PrincipalData::from(key_to_stacks_addr(&charlie)).into(), + Value::none(), + Value::none(), + ], + ); + peer.tenure_with_txs(&[test], &mut coinbase_nonce); + + // alice is still locked, balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + + // in the next tenure, PoX 2 should now exist. + // Lets have Bob lock up for v2 + // this will lock for cycles 8, 9, 10, and 11 + // the first v2 cycle will be 8 + let tip = get_tip(peer.sortdb.as_ref()); + + let bob_lockup = make_pox_2_lockup( + &bob, + 0, + 512 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 6, + tip.block_height, + ); + + let block_id = peer.tenure_with_txs(&[bob_lockup], &mut coinbase_nonce); + + assert_eq!( + get_tip(peer.sortdb.as_ref()).block_height as u32, + pox_constants.v1_unlock_height + 1, + "Test should have reached 1 + PoX-v1 unlock height" + ); + + // Auto unlock height is reached, Alice balance should be unlocked + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + + // Now, Bob tries to lock in PoX v1 too, but it shouldn't work! + let tip = get_tip(peer.sortdb.as_ref()); + + let bob_lockup = make_pox_lockup( + &bob, + 1, + 512 * POX_THRESHOLD_STEPS_USTX, + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + 4, + tip.block_height, + ); + + let block_id = peer.tenure_with_txs(&[bob_lockup], &mut coinbase_nonce); + + // At this point, the auto unlock height for v1 accounts has been reached. + // let Alice stack in PoX v2 + let tip = get_tip(peer.sortdb.as_ref()); + + let alice_lockup = make_pox_2_lockup( + &alice, + 1, + 512 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 12, + tip.block_height, + ); + peer.tenure_with_txs(&[alice_lockup], &mut coinbase_nonce); + + // Alice locked half her balance in PoX 2 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); + + // now, let's roll the chain forward until just before Epoch-2.2 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should always include this half lockup + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // this block is mined in epoch-2.2 + peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); + // this block should unlock alice's balance + peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + + // now, roll the chain forward to Epoch-2.4 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should always be unlocked + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + } + + let tip = get_tip(peer.sortdb.as_ref()).block_height; + let bob_lockup = make_pox_3_lockup( + &bob, + 2, + 512 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 6, + tip, + ); + + let alice_lockup = make_pox_3_lockup( + &alice, + 2, + 512 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip, + ); + + peer.tenure_with_txs(&[bob_lockup, alice_lockup], &mut coinbase_nonce); + + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); + let bob_balance = get_balance(&mut peer, &key_to_stacks_addr(&bob).into()); + assert_eq!(bob_balance, 512 * POX_THRESHOLD_STEPS_USTX); + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut charlie_txs = HashMap::new(); + + debug!("Alice addr: {}, Bob addr: {}", alice_address, bob_address); + + let mut tested_charlie = false; + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + debug!("Transaction addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == key_to_stacks_addr(&charlie) { + assert!( + r.execution_cost != ExecutionCost::zero(), + "Execution cost is not zero!" + ); + charlie_txs.insert(t.auth.get_origin_nonce(), r); + + tested_charlie = true; + } + } + } + } + + assert!(tested_charlie, "Charlie TX must be tested"); + // Alice should have three accepted transactions: + // TX0 -> Alice's initial lockup in PoX 1 + // TX1 -> Alice's PoX 2 lockup + // TX2 -> Alice's PoX 3 lockup + assert_eq!(alice_txs.len(), 3, "Alice should have 3 confirmed txs"); + // Bob should have two accepted transactions: + // TX0 -> Bob's initial lockup in PoX 2 + // TX1 -> Bob's attempt to lock again in PoX 1 -- this one should fail + // because PoX 1 is now defunct. Checked via the tx receipt. + // TX2 -> Bob's PoX 3 lockup + assert_eq!(bob_txs.len(), 3, "Bob should have 3 confirmed txs"); + // Charlie should have one accepted transactions: + // TX0 -> Charlie's delegation in PoX 2. This tx just checks that the + // initialization code tracks costs in txs that occur after the + // initialization code (which uses a free tracker). + assert_eq!(charlie_txs.len(), 1, "Charlie should have 1 confirmed txs"); + + // TX0 -> Alice's initial lockup in PoX 1 + assert!( + match alice_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Alice tx0 should have committed okay" + ); + + // TX1 -> Alice's PoX 2 lockup + assert!( + match alice_txs.get(&1).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Alice tx1 should have committed okay" + ); + + // TX2 -> Alice's PoX 3 lockup + assert!( + match alice_txs.get(&1).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Alice tx3 should have committed okay" + ); + + // TX0 -> Bob's initial lockup in PoX 2 + assert!( + match bob_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Bob tx0 should have committed okay" + ); + + // TX1 -> Bob's attempt to lock again in PoX 1 -- this one should fail + // because PoX 1 is now defunct. Checked via the tx receipt. + assert_eq!( + bob_txs.get(&1).unwrap().result, + Value::err_none(), + "Bob tx1 should have resulted in a runtime error" + ); + + // TX0 -> Charlie's delegation in PoX 2. This tx just checks that the + // initialization code tracks costs in txs that occur after the + // initialization code (which uses a free tracker). + assert!( + match charlie_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Charlie tx0 should have committed okay" + ); +} + +#[test] +fn pox_auto_unlock_ab() { + pox_auto_unlock(true) +} + +#[test] +fn pox_auto_unlock_ba() { + pox_auto_unlock(false) +} + +/// In this test case, two Stackers, Alice and Bob stack and interact with the +/// PoX v1 contract and PoX v2 contract across the epoch transition, and then again +/// in PoX v3. +/// +/// Alice: stacks via PoX v1 for 4 cycles. The third of these cycles occurs after +/// the PoX v1 -> v2 transition, and so Alice gets "early unlocked". +/// After the early unlock, Alice re-stacks in PoX v2 +/// Bob: stacks via PoX v2 for 6 cycles. He attempted to stack via PoX v1 as well, +/// but is forbidden because he has already placed an account lock via PoX v2. +/// +/// Note: this test is symmetric over the order of alice and bob's stacking calls. +/// when alice goes first, the auto-unlock code doesn't need to perform a "move" +/// when bob goes first, the auto-unlock code does need to perform a "move" +fn pox_auto_unlock(alice_first: bool) { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + eprintln!("First v2 cycle = {}", first_v2_cycle); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + &format!("{}-{}", function_name!(), alice_first), + 7102 + if alice_first { 0 } else { 20 }, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + + let mut coinbase_nonce = 0; + + // produce blocks until epoch 2.1 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 2 should now exist. + // Lets have Bob lock up for v2 + // this will lock for cycles 8, 9, 10, and 11 + // the first v2 cycle will be 8 + let tip = get_tip(peer.sortdb.as_ref()); + + let alice_lockup = make_pox_2_lockup( + &alice, + 0, + 1024 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip.block_height, + ); + + let bob_lockup = make_pox_2_lockup( + &bob, + 0, + 1 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 6, + tip.block_height, + ); + + let txs = if alice_first { + [alice_lockup, bob_lockup] + } else { + [bob_lockup, alice_lockup] + }; + let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice and bob + // for the pox-2 cycles + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 2); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&bob).bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + } + + // we'll produce blocks until the next reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE) + 1; + + // but first, check that bob has locked tokens at (height_target + 1) + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let first_auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; + + // check that the "raw" reward sets for all cycles just contains entries for alice + // at the cycle start + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + } + + // now check that bob has an unlock height of `height_target` + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ); + assert_eq!(bob_bal.unlock_height(), height_target); + + // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block + assert_eq!(bob_bal.amount_locked(), 10000000000); + + // check that the total reward cycle amounts have decremented correctly + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + } + + // check that bob is fully unlocked at next block + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ); + assert_eq!(bob_bal.unlock_height(), 0); + assert_eq!(bob_bal.amount_locked(), 0); + + // check that the total reward cycle amounts have decremented correctly + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + } + + // check that bob's stacking-state is gone and alice's stacking-state is correct + assert!( + get_stacking_state_pox_2( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal() + ) + .is_none(), + "Bob should not have a stacking-state entry" + ); + + let alice_state = get_stacking_state_pox_2( + &mut peer, + &latest_block, + &key_to_stacks_addr(&alice).to_account_principal(), + ) + .expect("Alice should have stacking-state entry") + .expect_tuple(); + let reward_indexes_str = format!("{}", alice_state.get("reward-set-indexes").unwrap()); + assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); + + // now, lets check behavior in Epochs 2.2-2.4, with pox-3 auto unlock tests + + // produce blocks until epoch 2.2 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[4].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + } + + // check that alice is unlocked now + peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); + + // produce blocks until epoch 2.4 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // repeat the lockups as before, so we can test the pox-3 auto unlock behavior + let tip = get_tip(peer.sortdb.as_ref()); + + let alice_lockup = make_pox_3_lockup( + &alice, + 1, + 1024 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip.block_height, + ); + + let bob_lockup = make_pox_3_lockup( + &bob, + 1, + 1 * POX_THRESHOLD_STEPS_USTX, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 6, + tip.block_height, + ); + + let txs = if alice_first { + [alice_lockup, bob_lockup] + } else { + [bob_lockup, alice_lockup] + }; + latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice and bob + // for the pox-3 cycles + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 2); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&bob).bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + } + + // we'll produce blocks until the next reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle) + 1; + let second_auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; + + // but first, check that bob has locked tokens at (height_target + 1) + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ); + assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // check that the "raw" reward sets for all cycles just contains entries for alice + // at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + } + + // now check that bob has an unlock height of `height_target` + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ); + assert_eq!(bob_bal.unlock_height(), height_target); + // but bob's still locked at (height_target): the unlock is accelerated to the "next" burn block + assert_eq!(bob_bal.amount_locked(), 10000000000); + + // check that the total reward cycle amounts have decremented correctly + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + } + + // check that bob's stacking-state is gone and alice's stacking-state is correct + assert!( + get_stacking_state_pox( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + POX_3_NAME, + ) + .is_none(), + "Bob should not have a stacking-state entry" + ); + + let alice_state = get_stacking_state_pox( + &mut peer, + &latest_block, + &key_to_stacks_addr(&alice).to_account_principal(), + POX_3_NAME, + ) + .expect("Alice should have stacking-state entry") + .expect_tuple(); + let reward_indexes_str = format!("{}", alice_state.get("reward-set-indexes").unwrap()); + assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); + + // check that bob is fully unlocked at next block + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + let bob_bal = get_stx_account_at( + &mut peer, + &latest_block, + &key_to_stacks_addr(&bob).to_account_principal(), + ); + assert_eq!(bob_bal.unlock_height(), 0); + assert_eq!(bob_bal.amount_locked(), 0); + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut coinbase_txs = vec![]; + + for b in blocks.into_iter() { + for (i, r) in b.receipts.into_iter().enumerate() { + if i == 0 { + coinbase_txs.push(r); + continue; + } + match r.transaction { + TransactionOrigin::Stacks(ref t) => { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } + } + _ => {} + } + } + } + + assert_eq!(alice_txs.len(), 2); + assert_eq!(bob_txs.len(), 2); + + // TX0 -> Bob's initial lockup in PoX 2 + assert!( + match bob_txs.get(&0).unwrap().result { + Value::Response(ref r) => r.committed, + _ => false, + }, + "Bob tx0 should have committed okay" + ); + + assert_eq!(coinbase_txs.len(), 38); + + info!( + "Expected first auto-unlock coinbase index: {}", + first_auto_unlock_coinbase + ); + + // Check that the event produced by "handle-unlock" has a well-formed print event + // and that this event is included as part of the coinbase tx + for unlock_coinbase_index in [first_auto_unlock_coinbase, second_auto_unlock_coinbase] { + // expect the unlock to occur 1 block after the handle-unlock method was invoked. + let expected_unlock_height = unlock_coinbase_index + EMPTY_SORTITIONS + 1; + let expected_cycle = pox_constants + .block_height_to_reward_cycle(0, expected_unlock_height) + .unwrap(); + + let auto_unlock_tx = coinbase_txs[unlock_coinbase_index as usize].events[0].clone(); + let pox_addr_val = generate_pox_clarity_value("60c59ab11f7063ef44c16d3dc856f76bbb915eba"); + let auto_unlock_op_data = HashMap::from([ + ("first-cycle-locked", Value::UInt(expected_cycle.into())), + ("first-unlocked-cycle", Value::UInt(expected_cycle.into())), + ("pox-addr", pox_addr_val), + ]); + let common_data = PoxPrintFields { + op_name: "handle-unlock".to_string(), + stacker: Value::Principal( + StacksAddress::from_string("ST1GCB6NH3XR67VT4R5PKVJ2PYXNVQ4AYQATXNP4P") + .unwrap() + .to_account_principal(), + ), + balance: Value::UInt(10230000000000), + locked: Value::UInt(10000000000), + burnchain_unlock_height: Value::UInt(expected_unlock_height.into()), + }; + check_pox_print_event(&auto_unlock_tx, common_data, auto_unlock_op_data); + } +} + +/// In this test case, Alice delegates to Bob. +/// Bob stacks Alice's funds via PoX v2 for 6 cycles. In the third cycle, +/// Bob increases Alice's stacking amount. +/// +#[test] +fn delegate_stack_increase() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7103, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let num_blocks = 35; + + let alice = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let alice_principal = PrincipalData::from(alice_address.clone()); + let bob = keys.pop().unwrap(); + let bob_address = key_to_stacks_addr(&bob); + let bob_principal = PrincipalData::from(bob_address.clone()); + let bob_pox_addr = make_pox_addr(AddressHashMode::SerializeP2PKH, bob_address.bytes.clone()); + let mut alice_nonce = 0; + let mut bob_nonce = 0; + + let alice_delegation_amount = 1023 * POX_THRESHOLD_STEPS_USTX; + let alice_first_lock_amount = 512 * POX_THRESHOLD_STEPS_USTX; + + let mut coinbase_nonce = 0; + + // produce blocks until epoch 2.1 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 2 should now exist. + let tip = get_tip(peer.sortdb.as_ref()); + + // submit delegation tx + let alice_delegation_1 = make_pox_2_contract_call( + &alice, + alice_nonce, + "delegate-stx", + vec![ + Value::UInt(alice_delegation_amount), + bob_principal.clone().into(), + Value::none(), + Value::none(), + ], + ); + + let alice_delegation_pox_2_nonce = alice_nonce; + alice_nonce += 1; + + let delegate_stack_tx = make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-stx", + vec![ + alice_principal.clone().into(), + Value::UInt(alice_first_lock_amount), + bob_pox_addr.clone(), + Value::UInt(tip.block_height as u128), + Value::UInt(6), + ], + ); + + bob_nonce += 1; + + let mut latest_block = peer.tenure_with_txs( + &[alice_delegation_1, delegate_stack_tx], + &mut coinbase_nonce, + ); + + let expected_pox_2_unlock_ht = + burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 6) - 1; + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); + + // check that the partial stacking state contains entries for bob + for cycle_number in EXPECTED_FIRST_V2_CYCLE..(EXPECTED_FIRST_V2_CYCLE + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_2_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // we'll produce blocks until the 1st reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 1) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + + // check that the partial stacking state contains entries for bob + for cycle_number in EXPECTED_FIRST_V2_CYCLE..(EXPECTED_FIRST_V2_CYCLE + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_2_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + let mut txs_to_submit = vec![]; + + let fail_direct_increase_delegation = alice_nonce; + txs_to_submit.push(make_pox_2_contract_call( + &alice, + alice_nonce, + "stack-increase", + vec![Value::UInt(1)], + )); + alice_nonce += 1; + + let fail_delegate_too_much_locked = bob_nonce; + txs_to_submit.push(make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_delegation_amount - alice_first_lock_amount + 1), + ], + )); + bob_nonce += 1; + + let fail_invalid_amount = bob_nonce; + txs_to_submit.push(make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(0), + ], + )); + bob_nonce += 1; + + let fail_insufficient_funds = bob_nonce; + txs_to_submit.push(make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_bal.amount_unlocked() + 1), + ], + )); + bob_nonce += 1; + + txs_to_submit.push(make_pox_2_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_delegation_amount - alice_first_lock_amount), + ], + )); + let bob_delegate_increase_pox_2_nonce = bob_nonce; + bob_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_delegation_amount); + assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); + + // check that the partial stacking state contains entries for bob and they've incremented correctly + for cycle_number in (EXPECTED_FIRST_V2_CYCLE)..(EXPECTED_FIRST_V2_CYCLE + 2) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_2_NAME, + ); + assert_eq!(partial_stacked, alice_first_lock_amount); + } + + for cycle_number in (EXPECTED_FIRST_V2_CYCLE + 2)..(EXPECTED_FIRST_V2_CYCLE + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_2_NAME, + ); + assert_eq!(partial_stacked, alice_delegation_amount,); + } + + // okay, now let's progress through epochs 2.2-2.4, and perform the delegation tests + // on pox-3 + + // roll the chain forward until just before Epoch-2.2 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should always include this half lockup + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + alice_delegation_amount + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &bob_principal).amount_locked(), + 0, + ); + } + + // this block is mined in epoch-2.2 + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + alice_delegation_amount + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &bob_principal).amount_locked(), + 0, + ); + // this block should unlock alice's balance + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + 0, + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &bob_principal).amount_locked(), + 0, + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_unlocked(), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &bob_principal).amount_unlocked(), + 1024 * POX_THRESHOLD_STEPS_USTX + ); + + // Roll to Epoch-2.4 and re-do the above tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + + // submit delegation tx + let alice_delegation_1 = make_pox_3_contract_call( + &alice, + alice_nonce, + "delegate-stx", + vec![ + Value::UInt(alice_delegation_amount), + bob_principal.clone().into(), + Value::none(), + Value::none(), + ], + ); + let alice_delegation_pox_3_nonce = alice_nonce; + alice_nonce += 1; + + let delegate_stack_tx = make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-stx", + vec![ + alice_principal.clone().into(), + Value::UInt(alice_first_lock_amount), + bob_pox_addr.clone(), + Value::UInt(tip.block_height as u128), + Value::UInt(6), + ], + ); + + bob_nonce += 1; + + latest_block = peer.tenure_with_txs( + &[alice_delegation_1, delegate_stack_tx], + &mut coinbase_nonce, + ); + + let expected_pox_3_unlock_ht = burnchain.reward_cycle_to_block_height(first_v3_cycle + 6) - 1; + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + assert_eq!(alice_bal.unlock_height(), expected_pox_3_unlock_ht); + + // check that the partial stacking state contains entries for bob + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // we'll produce blocks until the 3rd reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + let bob_bal = get_stx_account_at(&mut peer, &latest_block, &bob_principal); + assert_eq!(bob_bal.amount_locked(), 0); + + // check that the partial stacking state contains entries for bob + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + let mut txs_to_submit = vec![]; + + let pox_3_fail_direct_increase_delegation = alice_nonce; + txs_to_submit.push(make_pox_3_contract_call( + &alice, + alice_nonce, + "stack-increase", + vec![Value::UInt(1)], + )); + alice_nonce += 1; + + let pox_3_fail_delegate_too_much_locked = bob_nonce; + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_delegation_amount - alice_first_lock_amount + 1), + ], + )); + bob_nonce += 1; + + let pox_3_fail_invalid_amount = bob_nonce; + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(0), + ], + )); + bob_nonce += 1; + + let pox_3_fail_insufficient_funds = bob_nonce; + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_bal.amount_unlocked() + 1), + ], + )); + bob_nonce += 1; + + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(alice_delegation_amount - alice_first_lock_amount), + ], + )); + let bob_delegate_increase_pox_3_nonce = bob_nonce; + bob_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + alice_delegation_amount + ); + + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).unlock_height(), + expected_pox_3_unlock_ht, + ); + + // check that the partial stacking state contains entries for bob and they've incremented correctly + for cycle_number in first_v3_cycle..(first_v3_cycle + 4) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!( + partial_stacked, + alice_first_lock_amount, + "Unexpected partially stacked amount in cycle: {} = {} + {}", + cycle_number, + first_v3_cycle, + first_v3_cycle - cycle_number, + ); + } + + for cycle_number in (first_v3_cycle + 4)..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, alice_delegation_amount); + } + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len() as u64, 4); + assert_eq!(bob_txs.len() as u64, 10); + + // transaction should fail because Alice cannot increase her own stacking amount while delegating + assert_eq!( + &alice_txs[&fail_direct_increase_delegation] + .result + .to_string(), + "(err 20)" + ); + + // transaction should fail because Alice did not delegate enough funds to Bob + assert_eq!( + &bob_txs[&fail_delegate_too_much_locked].result.to_string(), + "(err 22)" + ); + + // transaction should fail because Alice doesn't have enough funds + assert_eq!( + &bob_txs[&fail_insufficient_funds].result.to_string(), + "(err 1)" + ); + + // transaction should fail because the amount supplied is invalid (i.e., 0) + assert_eq!( + &bob_txs[&fail_invalid_amount].result.to_string(), + "(err 18)" + ); + + assert_eq!( + &alice_txs[&pox_3_fail_direct_increase_delegation] + .result + .to_string(), + "(err 30)" + ); + + // transaction should fail because Alice did not delegate enough funds to Bob + assert_eq!( + &bob_txs[&pox_3_fail_delegate_too_much_locked] + .result + .to_string(), + "(err 22)" + ); + + // transaction should fail because Alice doesn't have enough funds + assert_eq!( + &bob_txs[&pox_3_fail_insufficient_funds].result.to_string(), + "(err 1)" + ); + + // transaction should fail because the amount supplied is invalid (i.e., 0) + assert_eq!( + &bob_txs[&pox_3_fail_invalid_amount].result.to_string(), + "(err 18)" + ); + + for delegation_nonce in [alice_delegation_pox_2_nonce, alice_delegation_pox_3_nonce] { + let delegate_stx_tx = &alice_txs.get(&delegation_nonce).unwrap().clone().events[0]; + let delegate_stx_op_data = HashMap::from([ + ("pox-addr", Value::none()), + ("amount-ustx", Value::UInt(10230000000000)), + ("unlock-burn-height", Value::none()), + ( + "delegate-to", + Value::Principal( + StacksAddress::from_string("ST1GCB6NH3XR67VT4R5PKVJ2PYXNVQ4AYQATXNP4P") + .unwrap() + .to_account_principal(), + ), + ), + ]); + let common_data = PoxPrintFields { + op_name: "delegate-stx".to_string(), + stacker: Value::Principal( + StacksAddress::from_string("ST2Q1B4S2DY2Y96KYNZTVCCZZD1V9AGWCS5MFXM4C") + .unwrap() + .to_account_principal(), + ), + balance: Value::UInt(10240000000000), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + check_pox_print_event(delegate_stx_tx, common_data, delegate_stx_op_data); + } + + // Check that the call to `delegate-stack-increase` has a well-formed print event. + for (unlock_height, del_increase_nonce) in [ + (expected_pox_2_unlock_ht, bob_delegate_increase_pox_2_nonce), + (expected_pox_3_unlock_ht, bob_delegate_increase_pox_3_nonce), + ] { + let delegate_stack_increase_tx = + &bob_txs.get(&del_increase_nonce).unwrap().clone().events[0]; + let pox_addr_val = generate_pox_clarity_value("60c59ab11f7063ef44c16d3dc856f76bbb915eba"); + let delegate_op_data = HashMap::from([ + ("pox-addr", pox_addr_val), + ("increase-by", Value::UInt(5110000000000)), + ("total-locked", Value::UInt(10230000000000)), + ( + "delegator", + Value::Principal( + StacksAddress::from_string("ST1GCB6NH3XR67VT4R5PKVJ2PYXNVQ4AYQATXNP4P") + .unwrap() + .to_account_principal(), + ), + ), + ]); + let common_data = PoxPrintFields { + op_name: "delegate-stack-increase".to_string(), + stacker: Value::Principal( + StacksAddress::from_string("ST2Q1B4S2DY2Y96KYNZTVCCZZD1V9AGWCS5MFXM4C") + .unwrap() + .to_account_principal(), + ), + balance: Value::UInt(5120000000000), + locked: Value::UInt(5120000000000), + burnchain_unlock_height: Value::UInt(unlock_height.into()), + }; + check_pox_print_event(delegate_stack_increase_tx, common_data, delegate_op_data); + } +} + +#[test] +fn stack_increase() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7105, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let num_blocks = 35; + + let alice = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let alice_principal = PrincipalData::from(alice_address.clone()); + let mut alice_nonce = 0; + + let mut coinbase_nonce = 0; + + let first_lockup_amt = 512 * POX_THRESHOLD_STEPS_USTX; + let total_balance = 1024 * POX_THRESHOLD_STEPS_USTX; + let increase_amt = total_balance - first_lockup_amt; + + // produce blocks until epoch 2.1 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 2 should now exist. + let tip = get_tip(peer.sortdb.as_ref()); + + // submit an increase: this should fail, because Alice is not yet locked + let fail_no_lock_tx = alice_nonce; + let alice_increase = make_pox_2_increase(&alice, alice_nonce, increase_amt); + alice_nonce += 1; + + let alice_lockup = make_pox_2_lockup( + &alice, + alice_nonce, + first_lockup_amt, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip.block_height, + ); + alice_nonce += 1; + + let mut latest_block = + peer.tenure_with_txs(&[alice_increase, alice_lockup], &mut coinbase_nonce); + + let expected_pox_2_unlock_ht = + burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 6) - 1; + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), first_lockup_amt); + assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); + assert_eq!(alice_bal.get_total_balance(), total_balance,); + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + // we'll produce blocks until the 1st reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(EXPECTED_FIRST_V2_CYCLE + 1) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // check that the "raw" reward sets for all cycles contains entries for alice + for cycle_number in EXPECTED_FIRST_V2_CYCLE..first_v3_cycle { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + let mut txs_to_submit = vec![]; + let fail_bad_amount = alice_nonce; + txs_to_submit.push(make_pox_2_increase(&alice, alice_nonce, 0)); + alice_nonce += 1; + + // this stack-increase tx should work + let pox_2_success_increase = alice_nonce; + txs_to_submit.push(make_pox_2_increase(&alice, alice_nonce, increase_amt)); + alice_nonce += 1; + + // increase by an amount we don't have! + let fail_not_enough_funds = alice_nonce; + txs_to_submit.push(make_pox_2_increase(&alice, alice_nonce, 1)); + alice_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), first_lockup_amt + increase_amt,); + assert_eq!(alice_bal.unlock_height(), expected_pox_2_unlock_ht); + assert_eq!(alice_bal.get_total_balance(), total_balance,); + + // check that the total reward cycle amounts have incremented correctly + for cycle_number in first_v2_cycle..(first_v2_cycle + 2) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + first_lockup_amt, + ); + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + assert!( + first_v2_cycle + 2 < first_v3_cycle, + "Make sure that we can actually test a stack-increase in pox-2 before pox-3 activates" + ); + + for cycle_number in (first_v2_cycle + 2)..first_v3_cycle { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + first_lockup_amt + increase_amt, + ); + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[0].amount_stacked, + first_lockup_amt + increase_amt, + ); + } + + // Roll to Epoch-2.4 and re-do the above tests + // okay, now let's progress through epochs 2.2-2.4, and perform the delegation tests + // on pox-3 + + // roll the chain forward until just before Epoch-2.2 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should always include this half lockup + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + first_lockup_amt + increase_amt, + ); + } + + // this block is mined in epoch-2.2 + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + first_lockup_amt + increase_amt, + ); + + // this block should unlock alice's balance + + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + 0, + ); + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_unlocked(), + total_balance, + ); + + // Roll to Epoch-2.4 and re-do the above stack-increase tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 3 should now exist. + let tip = get_tip(peer.sortdb.as_ref()); + + // submit an increase: this should fail, because Alice is not yet locked + let pox_3_fail_no_lock_tx = alice_nonce; + let alice_increase = make_pox_3_contract_call( + &alice, + alice_nonce, + "stack-increase", + vec![Value::UInt(increase_amt)], + ); + alice_nonce += 1; + + let alice_lockup = make_pox_3_lockup( + &alice, + alice_nonce, + first_lockup_amt, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + tip.block_height, + ); + alice_nonce += 1; + + let mut latest_block = + peer.tenure_with_txs(&[alice_increase, alice_lockup], &mut coinbase_nonce); + + let expected_pox_3_unlock_ht = burnchain.reward_cycle_to_block_height(first_v3_cycle + 6) - 1; + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), first_lockup_amt); + assert_eq!(alice_bal.unlock_height(), expected_pox_3_unlock_ht); + assert_eq!(alice_bal.get_total_balance(), total_balance,); + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + // we'll produce blocks until the 3rd reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + let mut txs_to_submit = vec![]; + let pox_3_fail_bad_amount = alice_nonce; + let bad_amount_tx = + make_pox_3_contract_call(&alice, alice_nonce, "stack-increase", vec![Value::UInt(0)]); + txs_to_submit.push(bad_amount_tx); + alice_nonce += 1; + + // this stack-increase tx should work + let pox_3_success_increase = alice_nonce; + let good_amount_tx = make_pox_3_contract_call( + &alice, + alice_nonce, + "stack-increase", + vec![Value::UInt(increase_amt)], + ); + txs_to_submit.push(good_amount_tx); + alice_nonce += 1; + + // increase by an amount we don't have! + let pox_3_fail_not_enough_funds = alice_nonce; + let not_enough_tx = + make_pox_3_contract_call(&alice, alice_nonce, "stack-increase", vec![Value::UInt(1)]); + txs_to_submit.push(not_enough_tx); + alice_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), first_lockup_amt + increase_amt,); + assert_eq!(alice_bal.unlock_height(), expected_pox_3_unlock_ht); + assert_eq!(alice_bal.get_total_balance(), total_balance,); + + // check that the total reward cycle amounts have incremented correctly + for cycle_number in first_v3_cycle..(first_v3_cycle + 4) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + first_lockup_amt, + ); + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, first_lockup_amt,); + } + + for cycle_number in (first_v3_cycle + 4)..(first_v3_cycle + 6) { + assert_eq!( + get_reward_cycle_total(&mut peer, &latest_block, cycle_number), + first_lockup_amt + increase_amt, + ); + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[0].amount_stacked, + first_lockup_amt + increase_amt, + ); + } + + // now let's check some tx receipts + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len() as u64, alice_nonce); + + // transaction should fail because lock isn't applied + assert_eq!(&alice_txs[&fail_no_lock_tx].result.to_string(), "(err 27)"); + + // transaction should fail because Alice doesn't have enough funds + assert_eq!( + &alice_txs[&fail_not_enough_funds].result.to_string(), + "(err 1)" + ); + + // transaction should fail because the amount supplied is invalid (i.e., 0) + assert_eq!(&alice_txs[&fail_bad_amount].result.to_string(), "(err 18)"); + + // transaction should fail because lock isn't applied + assert_eq!( + &alice_txs[&pox_3_fail_no_lock_tx].result.to_string(), + "(err 27)" + ); + + // transaction should fail because Alice doesn't have enough funds + assert_eq!( + &alice_txs[&pox_3_fail_not_enough_funds].result.to_string(), + "(err 1)" + ); + + // transaction should fail because the amount supplied is invalid (i.e., 0) + assert_eq!( + &alice_txs[&pox_3_fail_bad_amount].result.to_string(), + "(err 18)" + ); + + // Check that the call to `stack-increase` has a well-formed print event. + for (increase_nonce, unlock_height) in [ + (pox_2_success_increase, expected_pox_2_unlock_ht), + (pox_3_success_increase, expected_pox_3_unlock_ht), + ] { + let stack_increase_tx = &alice_txs.get(&increase_nonce).unwrap().clone().events[0]; + let pox_addr_val = generate_pox_clarity_value("ae1593226f85e49a7eaff5b633ff687695438cc9"); + let stack_op_data = HashMap::from([ + ("increase-by", Value::UInt(5120000000000)), + ("total-locked", Value::UInt(10240000000000)), + ("pox-addr", pox_addr_val), + ]); + let common_data = PoxPrintFields { + op_name: "stack-increase".to_string(), + stacker: Value::Principal( + StacksAddress::from_string("ST2Q1B4S2DY2Y96KYNZTVCCZZD1V9AGWCS5MFXM4C") + .unwrap() + .to_account_principal(), + ), + balance: Value::UInt(5120000000000), + locked: Value::UInt(5120000000000), + burnchain_unlock_height: Value::UInt(unlock_height.into()), + }; + check_pox_print_event(stack_increase_tx, common_data, stack_op_data); + } +} + +#[test] +fn pox_extend_transition() { + let EXPECTED_FIRST_V2_CYCLE = 8; + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + assert_eq!(first_v2_cycle, EXPECTED_FIRST_V2_CYCLE); + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7110, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = + Some((EXPECTED_FIRST_V2_CYCLE, EXPECTED_FIRST_V2_CYCLE + 10)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let alice_principal = PrincipalData::from(alice_address.clone()); + let bob_address = key_to_stacks_addr(&bob); + let bob_principal = PrincipalData::from(bob_address.clone()); + + let EXPECTED_ALICE_FIRST_REWARD_CYCLE = 6; + let mut coinbase_nonce = 0; + + let INITIAL_BALANCE = 1024 * POX_THRESHOLD_STEPS_USTX; + let ALICE_LOCKUP = 1024 * POX_THRESHOLD_STEPS_USTX; + let BOB_LOCKUP = 512 * POX_THRESHOLD_STEPS_USTX; + + // these checks should pass between Alice's first reward cycle, + // and the start of V2 reward cycles + let alice_rewards_to_v2_start_checks = |tip_index_block, peer: &mut TestPeer| { + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap() as u128; + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + ( + c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), + get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), + c.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) + .unwrap(), + ) + }); + + assert!( + cur_reward_cycle >= EXPECTED_ALICE_FIRST_REWARD_CYCLE + && cur_reward_cycle < first_v2_cycle as u128 + ); + // Alice is the only Stacker, so check that. + let (amount_ustx, pox_addr, lock_period, first_reward_cycle) = + get_stacker_info(peer, &key_to_stacks_addr(&alice).into()).unwrap(); + eprintln!( + "\nAlice: {} uSTX stacked for {} cycle(s); addr is {:?}; first reward cycle is {}\n", + amount_ustx, lock_period, &pox_addr, first_reward_cycle + ); + + // one reward address, and it's Alice's + // either way, there's a single reward address + assert_eq!(reward_addrs.len(), 1); + assert_eq!( + (reward_addrs[0].0).version(), + AddressHashMode::SerializeP2PKH as u8 + ); + assert_eq!( + (reward_addrs[0].0).hash160(), + key_to_stacks_addr(&alice).bytes + ); + assert_eq!(reward_addrs[0].1, ALICE_LOCKUP); + }; + + // these checks should pass after the start of V2 reward cycles + let v2_rewards_checks = |tip_index_block, peer: &mut TestPeer| { + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap() as u128; + let (min_ustx, reward_addrs, total_stacked) = with_sortdb(peer, |ref mut c, ref sortdb| { + ( + c.get_stacking_minimum(sortdb, &tip_index_block).unwrap(), + get_reward_addresses_with_par_tip(c, &burnchain, sortdb, &tip_index_block).unwrap(), + c.test_get_total_ustx_stacked(sortdb, &tip_index_block, cur_reward_cycle) + .unwrap(), + ) + }); + + eprintln!( + "reward_cycle = {}, reward_addrs = {}, total_stacked = {}", + cur_reward_cycle, + reward_addrs.len(), + total_stacked + ); + + assert!(cur_reward_cycle >= first_v2_cycle as u128); + // v2 reward cycles have begun, so reward addrs should be read from PoX2 which is Bob + Alice + assert_eq!(reward_addrs.len(), 2); + assert_eq!( + (reward_addrs[0].0).version(), + AddressHashMode::SerializeP2PKH as u8 + ); + assert_eq!( + (reward_addrs[0].0).hash160(), + key_to_stacks_addr(&bob).bytes + ); + assert_eq!(reward_addrs[0].1, BOB_LOCKUP); + + assert_eq!( + (reward_addrs[1].0).version(), + AddressHashMode::SerializeP2PKH as u8 + ); + assert_eq!( + (reward_addrs[1].0).hash160(), + key_to_stacks_addr(&alice).bytes + ); + assert_eq!(reward_addrs[1].1, ALICE_LOCKUP); + }; + + // first tenure is empty + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + let alice_account = get_account(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_account.stx_balance.amount_unlocked(), INITIAL_BALANCE); + assert_eq!(alice_account.stx_balance.amount_locked(), 0); + assert_eq!(alice_account.stx_balance.unlock_height(), 0); + + // next tenure include Alice's lockup + let tip = get_tip(peer.sortdb.as_ref()); + let alice_lockup = make_pox_lockup( + &alice, + 0, + ALICE_LOCKUP, + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + 4, + tip.block_height, + ); + + let tip_index_block = peer.tenure_with_txs(&[alice_lockup], &mut coinbase_nonce); + + // check the stacking minimum + let total_liquid_ustx = get_liquid_ustx(&mut peer); + let min_ustx = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + chainstate.get_stacking_minimum(sortdb, &tip_index_block) + }) + .unwrap(); + assert_eq!( + min_ustx, + total_liquid_ustx / POX_TESTNET_STACKING_THRESHOLD_25 + ); + + // no reward addresses + let reward_addrs = with_sortdb(&mut peer, |ref mut chainstate, ref sortdb| { + get_reward_addresses_with_par_tip(chainstate, &burnchain, sortdb, &tip_index_block) + }) + .unwrap(); + assert_eq!(reward_addrs.len(), 0); + + // check the first reward cycle when Alice's tokens get stacked + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &tip_index_block); + let alice_first_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap(); + + assert_eq!( + alice_first_reward_cycle as u128, + EXPECTED_ALICE_FIRST_REWARD_CYCLE + ); + let height_target = burnchain.reward_cycle_to_block_height(alice_first_reward_cycle) + 1; + + // alice locked, so balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // produce blocks until epoch 2.1 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[3].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + alice_rewards_to_v2_start_checks(latest_block, &mut peer); + } + + // in the next tenure, PoX 2 should now exist. + // Lets have Bob lock up for v2 + // this will lock for cycles 8, 9, 10 + // the first v2 cycle will be 8 + let tip = get_tip(peer.sortdb.as_ref()); + + let bob_lockup = make_pox_2_lockup( + &bob, + 0, + BOB_LOCKUP, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 3, + tip.block_height, + ); + + // Alice _will_ auto-unlock: she can stack-extend in PoX v2 + let alice_lockup = make_pox_2_extend( + &alice, + 1, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + ); + + latest_block = peer.tenure_with_txs(&[bob_lockup, alice_lockup], &mut coinbase_nonce); + alice_rewards_to_v2_start_checks(latest_block, &mut peer); + + // Extend bob's lockup via `stack-extend` for 1 more cycle + let bob_extend = make_pox_2_extend( + &bob, + 1, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 1, + ); + + latest_block = peer.tenure_with_txs(&[bob_extend], &mut coinbase_nonce); + + alice_rewards_to_v2_start_checks(latest_block, &mut peer); + + // produce blocks until the v2 reward cycles start + let height_target = burnchain.reward_cycle_to_block_height(first_v2_cycle) - 1; + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // alice is still locked, balance should be 0 + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + + alice_rewards_to_v2_start_checks(latest_block, &mut peer); + } + + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + v2_rewards_checks(latest_block, &mut peer); + + // Roll to Epoch-2.4 and re-do the above tests + + // roll the chain forward until just before Epoch-2.2 + while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // at this point, alice's balance should be locked, and so should bob's + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + let bob_balance = get_balance(&mut peer, &key_to_stacks_addr(&bob).into()); + assert_eq!(bob_balance, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // this block is mined in epoch-2.2 + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + let bob_balance = get_balance(&mut peer, &key_to_stacks_addr(&bob).into()); + assert_eq!(bob_balance, 512 * POX_THRESHOLD_STEPS_USTX); + + // this block should unlock alice and bob's balance + + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_account = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + let bob_account = get_stx_account_at(&mut peer, &latest_block, &bob_principal); + assert_eq!(alice_account.amount_locked(), 0); + assert_eq!(alice_account.amount_unlocked(), INITIAL_BALANCE); + assert_eq!(bob_account.amount_locked(), 0); + assert_eq!(bob_account.amount_unlocked(), INITIAL_BALANCE); + + // Roll to Epoch-2.4 and re-do the above stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let alice_lockup = make_pox_3_lockup( + &alice, + 2, + ALICE_LOCKUP, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 4, + tip.block_height, + ); + let alice_pox_3_lock_nonce = 2; + let alice_first_pox_3_unlock_height = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 4) - 1; + let alice_pox_3_start_burn_height = tip.block_height; + + latest_block = peer.tenure_with_txs(&[alice_lockup], &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 4) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, ALICE_LOCKUP,); + } + + // check the first reward cycle when Alice's tokens get stacked + let tip_burn_block_height = get_par_burn_block_height(peer.chainstate(), &latest_block); + let alice_first_v3_reward_cycle = 1 + burnchain + .block_height_to_reward_cycle(tip_burn_block_height) + .unwrap(); + + let height_target = burnchain.reward_cycle_to_block_height(alice_first_v3_reward_cycle) + 1; + + // alice locked, so balance should be 0 + let alice_balance = get_balance(&mut peer, &alice_principal); + assert_eq!(alice_balance, 0); + + // advance to the first v3 reward cycle + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let bob_lockup = make_pox_3_lockup( + &bob, + 2, + BOB_LOCKUP, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&bob).bytes, + ), + 3, + tip.block_height, + ); + + // Alice can stack-extend in PoX v2 + let alice_lockup = make_pox_3_extend( + &alice, + 3, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 6, + ); + + let alice_pox_3_extend_nonce = 3; + let alice_extend_pox_3_unlock_height = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 10) - 1; + + latest_block = peer.tenure_with_txs(&[bob_lockup, alice_lockup], &mut coinbase_nonce); + + // check that the "raw" reward set will contain entries for alice at the cycle start + for cycle_number in first_v3_cycle..(first_v3_cycle + 1) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, ALICE_LOCKUP,); + } + + for cycle_number in (first_v3_cycle + 1)..(first_v3_cycle + 4) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 2); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[1].amount_stacked, ALICE_LOCKUP,); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&bob).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, BOB_LOCKUP,); + } + + for cycle_number in (first_v3_cycle + 4)..(first_v3_cycle + 10) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&alice).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, ALICE_LOCKUP,); + } + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len(), 4); + assert_eq!(bob_txs.len(), 3); + + for tx in alice_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Alice txs should all have committed okay" + ); + } + + for tx in bob_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Bob txs should all have committed okay" + ); + } + + // Check that the call to `stack-stx` has a well-formed print event. + let stack_tx = &alice_txs + .get(&alice_pox_3_lock_nonce) + .unwrap() + .clone() + .events[0]; + let pox_addr_val = generate_pox_clarity_value("ae1593226f85e49a7eaff5b633ff687695438cc9"); + let stack_op_data = HashMap::from([ + ("lock-amount", Value::UInt(ALICE_LOCKUP)), + ( + "unlock-burn-height", + Value::UInt(alice_first_pox_3_unlock_height.into()), + ), + ( + "start-burn-height", + Value::UInt(alice_pox_3_start_burn_height.into()), + ), + ("pox-addr", pox_addr_val.clone()), + ("lock-period", Value::UInt(4)), + ]); + let common_data = PoxPrintFields { + op_name: "stack-stx".to_string(), + stacker: Value::Principal(alice_principal.clone()), + balance: Value::UInt(10240000000000), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + check_pox_print_event(stack_tx, common_data, stack_op_data); + + // Check that the call to `stack-extend` has a well-formed print event. + let stack_extend_tx = &alice_txs + .get(&alice_pox_3_extend_nonce) + .unwrap() + .clone() + .events[0]; + let stack_ext_op_data = HashMap::from([ + ("extend-count", Value::UInt(6)), + ("pox-addr", pox_addr_val), + ( + "unlock-burn-height", + Value::UInt(alice_extend_pox_3_unlock_height.into()), + ), + ]); + let common_data = PoxPrintFields { + op_name: "stack-extend".to_string(), + stacker: Value::Principal(alice_principal.clone()), + balance: Value::UInt(0), + locked: Value::UInt(ALICE_LOCKUP), + burnchain_unlock_height: Value::UInt(alice_first_pox_3_unlock_height.into()), + }; + check_pox_print_event(stack_extend_tx, common_data, stack_ext_op_data); +} + +#[test] +fn delegate_extend_pox_3() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7114, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let charlie = keys.pop().unwrap(); + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let charlie_address = key_to_stacks_addr(&charlie); + + let mut coinbase_nonce = 0; + + let INITIAL_BALANCE = 1024 * POX_THRESHOLD_STEPS_USTX; + let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; + + // our "tenure counter" is now at 0 + let tip = get_tip(peer.sortdb.as_ref()); + assert_eq!(tip.block_height, 0 + EMPTY_SORTITIONS as u64); + + // first tenure is empty + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + // Roll to Epoch-2.4 and perform the delegate-stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + // in the next tenure, PoX 3 should now exist. + // charlie will lock bob and alice through the delegation interface + let tip = get_tip(peer.sortdb.as_ref()); + + let mut alice_nonce = 0; + let mut bob_nonce = 0; + let mut charlie_nonce = 0; + + let bob_delegate_tx = make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stx", + vec![ + Value::UInt(2048 * POX_THRESHOLD_STEPS_USTX), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::none(), + ], + ); + bob_nonce += 1; + + let alice_delegate_tx = make_pox_3_contract_call( + &alice, + alice_nonce, + "delegate-stx", + vec![ + Value::UInt(2048 * POX_THRESHOLD_STEPS_USTX), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::none(), + ], + ); + alice_nonce += 1; + + let delegate_stack_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "delegate-stack-stx", + vec![ + PrincipalData::from(bob_address.clone()).into(), + Value::UInt(LOCKUP_AMT), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(tip.block_height as u128), + Value::UInt(3), + ], + ); + let delegate_stack_stx_nonce = charlie_nonce; + let delegate_stack_stx_unlock_ht = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) - 1; + let delegate_stack_stx_lock_ht = tip.block_height; + charlie_nonce += 1; + + let delegate_alice_stack_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "delegate-stack-stx", + vec![ + PrincipalData::from(alice_address.clone()).into(), + Value::UInt(LOCKUP_AMT), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(tip.block_height as u128), + Value::UInt(6), + ], + ); + charlie_nonce += 1; + + // Charlie agg commits the first 3 cycles, but wait until delegate-extended bob to + // agg commit the 4th cycle + // aggregate commit to each cycle delegate-stack-stx locked for (cycles 6, 7, 8, 9) + let agg_commit_txs = [0, 1, 2].map(|ix| { + let tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128 + ix), + ], + ); + charlie_nonce += 1; + tx + }); + let mut txs = vec![ + bob_delegate_tx, + alice_delegate_tx, + delegate_stack_tx, + delegate_alice_stack_tx, + ]; + + txs.extend(agg_commit_txs); + + latest_block = peer.tenure_with_txs(txs.as_slice(), &mut coinbase_nonce); + + for cycle_number in first_v3_cycle..(first_v3_cycle + 3) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&charlie).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, 2 * LOCKUP_AMT); + } + + for cycle_number in (first_v3_cycle + 3)..(first_v3_cycle + 6) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 0); + } + + let alice_principal = alice_address.clone().into(); + let bob_principal = bob_address.clone().into(); + let charlie_principal: PrincipalData = charlie_address.clone().into(); + + let StackingStateCheckData { + first_cycle: alice_first_cycle, + lock_period: alice_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &alice_principal, + false, + POX_3_NAME, + ); + let StackingStateCheckData { + first_cycle: bob_first_cycle, + lock_period: bob_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &bob_principal, + false, + POX_3_NAME, + ); + + assert_eq!( + alice_first_cycle as u64, first_v3_cycle, + "Alice's first cycle in PoX-3 stacking state is the next cycle, which is 12" + ); + assert_eq!(alice_lock_period, 6); + assert_eq!( + bob_first_cycle as u64, first_v3_cycle, + "Bob's first cycle in PoX-3 stacking state is the next cycle, which is 12" + ); + assert_eq!(bob_lock_period, 3); + + // Extend bob's lockup via `delegate-stack-extend` for 1 more cycle + let delegate_extend_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "delegate-stack-extend", + vec![ + PrincipalData::from(bob_address.clone()).into(), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(1), + ], + ); + let delegate_stack_extend_nonce = charlie_nonce; + let delegate_stack_extend_unlock_ht = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 4) - 1; + charlie_nonce += 1; + + let agg_commit_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128 + 3), + ], + ); + let stack_agg_nonce = charlie_nonce; + let stack_agg_cycle = first_v3_cycle + 3; + let delegate_stack_extend_unlock_ht = + burnchain.reward_cycle_to_block_height(first_v3_cycle + 4) - 1; + charlie_nonce += 1; + + latest_block = peer.tenure_with_txs(&[delegate_extend_tx, agg_commit_tx], &mut coinbase_nonce); + let StackingStateCheckData { + first_cycle: alice_first_cycle, + lock_period: alice_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &alice_principal, + false, + POX_3_NAME, + ); + let StackingStateCheckData { + first_cycle: bob_first_cycle, + lock_period: bob_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &bob_principal, + false, + POX_3_NAME, + ); + + assert_eq!( + alice_first_cycle as u64, first_v3_cycle, + "Alice's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(alice_lock_period, 6); + assert_eq!( + bob_first_cycle as u64, first_v3_cycle, + "Bob's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(bob_lock_period, 4); + + for cycle_number in first_v3_cycle..(first_v3_cycle + 4) { + let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); + let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + key_to_stacks_addr(&charlie).bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[0].amount_stacked, 2 * LOCKUP_AMT); + } + + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle) + 1; + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); + assert_eq!(alice_balance, 0); + } + + let tip = get_tip(peer.sortdb.as_ref()); + + // Extend bob's lockup via `delegate-stack-extend` for 1 more cycle + // so that we can check the first-reward-cycle is correctly updated + let delegate_extend_tx = make_pox_3_contract_call( + &charlie, + charlie_nonce, + "delegate-stack-extend", + vec![ + PrincipalData::from(bob_address.clone()).into(), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(3), + ], + ); + charlie_nonce += 1; + + latest_block = peer.tenure_with_txs(&[delegate_extend_tx], &mut coinbase_nonce); + let StackingStateCheckData { + first_cycle: alice_first_cycle, + lock_period: alice_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &alice_principal, + false, + POX_3_NAME, + ); + let StackingStateCheckData { + first_cycle: bob_first_cycle, + lock_period: bob_lock_period, + .. + } = check_stacking_state_invariants( + &mut peer, + &latest_block, + &bob_principal, + false, + POX_3_NAME, + ); + + assert_eq!( + alice_first_cycle as u64, first_v3_cycle, + "Alice's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(alice_lock_period, 6); + assert_eq!( + bob_first_cycle as u64, first_v3_cycle, + "Bob's first cycle in PoX-2 stacking state is the next cycle, which is 8" + ); + assert_eq!(bob_lock_period, 7); + + // now let's check some tx receipts + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut charlie_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == charlie_address { + charlie_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len(), alice_nonce as usize); + assert_eq!(bob_txs.len(), bob_nonce as usize); + assert_eq!(charlie_txs.len(), charlie_nonce as usize); + + for tx in alice_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Alice txs should all have committed okay" + ); + } + for tx in bob_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Bob txs should all have committed okay" + ); + } + for tx in charlie_txs.iter() { + assert!( + if let Value::Response(ref r) = tx.1.result { + r.committed + } else { + false + }, + "Charlie txs should all have committed okay" + ); + } + + // Check that the call to `delegate-stack-stx` has a well-formed print event. + let delegate_stack_tx = &charlie_txs + .get(&delegate_stack_stx_nonce) + .unwrap() + .clone() + .events[0]; + let pox_addr_val = generate_pox_clarity_value("12d93ae7b61e5b7d905c85828d4320e7c221f433"); + let delegate_op_data = HashMap::from([ + ("lock-amount", Value::UInt(LOCKUP_AMT)), + ( + "unlock-burn-height", + Value::UInt(delegate_stack_stx_unlock_ht.into()), + ), + ( + "start-burn-height", + Value::UInt(delegate_stack_stx_lock_ht.into()), + ), + ("pox-addr", pox_addr_val.clone()), + ("lock-period", Value::UInt(3)), + ("delegator", Value::Principal(charlie_principal.clone())), + ]); + let common_data = PoxPrintFields { + op_name: "delegate-stack-stx".to_string(), + stacker: Value::Principal(bob_principal.clone()), + balance: Value::UInt(LOCKUP_AMT), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + check_pox_print_event(delegate_stack_tx, common_data, delegate_op_data); + + // Check that the call to `delegate-stack-extend` has a well-formed print event. + let delegate_stack_extend_tx = &charlie_txs + .get(&delegate_stack_extend_nonce) + .unwrap() + .clone() + .events[0]; + let delegate_ext_op_data = HashMap::from([ + ("pox-addr", pox_addr_val.clone()), + ( + "unlock-burn-height", + Value::UInt(delegate_stack_extend_unlock_ht.into()), + ), + ("extend-count", Value::UInt(1)), + ("delegator", Value::Principal(charlie_principal.clone())), + ]); + let common_data = PoxPrintFields { + op_name: "delegate-stack-extend".to_string(), + stacker: Value::Principal(bob_principal.clone()), + balance: Value::UInt(0), + locked: Value::UInt(LOCKUP_AMT), + burnchain_unlock_height: Value::UInt(delegate_stack_stx_unlock_ht.into()), + }; + check_pox_print_event(delegate_stack_extend_tx, common_data, delegate_ext_op_data); + + // Check that the call to `stack-aggregation-commit` has a well-formed print event. + let stack_agg_commit_tx = &charlie_txs.get(&stack_agg_nonce).unwrap().clone().events[0]; + let stack_agg_commit_op_data = HashMap::from([ + ("pox-addr", pox_addr_val), + ("reward-cycle", Value::UInt(stack_agg_cycle.into())), + ("amount-ustx", Value::UInt(2 * LOCKUP_AMT)), + ]); + let common_data = PoxPrintFields { + op_name: "stack-aggregation-commit".to_string(), + stacker: Value::Principal(charlie_principal.clone()), + balance: Value::UInt(LOCKUP_AMT), + locked: Value::UInt(0), + burnchain_unlock_height: Value::UInt(0), + }; + check_pox_print_event(stack_agg_commit_tx, common_data, stack_agg_commit_op_data); +} + +#[test] +fn pox_3_getters() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7115, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let charlie = keys.pop().unwrap(); + let danielle = keys.pop().unwrap(); + + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let charlie_address = key_to_stacks_addr(&charlie); + let mut coinbase_nonce = 0; + + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // Roll to Epoch-2.4 and perform the delegate-stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; + + // alice locks in v2 + let alice_lockup = make_pox_3_lockup( + &alice, + 0, + LOCKUP_AMT, + PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + key_to_stacks_addr(&alice).bytes, + ), + 4, + tip.block_height, + ); + + // bob deleates to charlie + let bob_delegate_tx = make_pox_3_contract_call( + &bob, + 0, + "delegate-stx", + vec![ + Value::UInt(LOCKUP_AMT), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::none(), + ], + ); + + // charlie calls delegate-stack-stx for bob + let charlie_delegate_stack_tx = make_pox_3_contract_call( + &charlie, + 0, + "delegate-stack-stx", + vec![ + PrincipalData::from(bob_address.clone()).into(), + Value::UInt(LOCKUP_AMT), + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(tip.block_height as u128), + Value::UInt(4), + ], + ); + + let agg_commit_tx_1 = make_pox_3_contract_call( + &charlie, + 1, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128), + ], + ); + + let agg_commit_tx_2 = make_pox_3_contract_call( + &charlie, + 2, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128 + 1), + ], + ); + + let agg_commit_tx_3 = make_pox_3_contract_call( + &charlie, + 3, + "stack-aggregation-commit", + vec![ + make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ), + Value::UInt(first_v3_cycle as u128 + 2), + ], + ); + + let reject_pox = make_pox_3_contract_call(&danielle, 0, "reject-pox", vec![]); + + peer.tenure_with_txs( + &[ + alice_lockup, + bob_delegate_tx, + charlie_delegate_stack_tx, + agg_commit_tx_1, + agg_commit_tx_2, + agg_commit_tx_3, + reject_pox, + ], + &mut coinbase_nonce, + ); + + let result = eval_at_tip(&mut peer, "pox-3", &format!(" + {{ + ;; should be none + get-delegation-info-alice: (get-delegation-info '{}), + ;; should be (some $charlie_address) + get-delegation-info-bob: (get-delegation-info '{}), + ;; should be none + get-allowance-contract-callers: (get-allowance-contract-callers '{} '{}), + ;; should be 1 + get-num-reward-set-pox-addresses-current: (get-num-reward-set-pox-addresses u{}), + ;; should be 0 + get-num-reward-set-pox-addresses-future: (get-num-reward-set-pox-addresses u1000), + ;; should be 0 + get-partial-stacked-by-cycle-bob-0: (get-partial-stacked-by-cycle {{ version: 0x00, hashbytes: 0x{} }} u{} '{}), + get-partial-stacked-by-cycle-bob-1: (get-partial-stacked-by-cycle {{ version: 0x00, hashbytes: 0x{} }} u{} '{}), + get-partial-stacked-by-cycle-bob-2: (get-partial-stacked-by-cycle {{ version: 0x00, hashbytes: 0x{} }} u{} '{}), + ;; should be LOCKUP_AMT + get-partial-stacked-by-cycle-bob-3: (get-partial-stacked-by-cycle {{ version: 0x00, hashbytes: 0x{} }} u{} '{}), + ;; should be LOCKUP_AMT + get-total-pox-rejection-now: (get-total-pox-rejection u{}), + ;; should be 0 + get-total-pox-rejection-next: (get-total-pox-rejection u{}), + ;; should be 0 + get-total-pox-rejection-future: (get-total-pox-rejection u{}) + }}", &alice_address, + &bob_address, + &bob_address, &format!("{}.hello-world", &charlie_address), first_v3_cycle + 1, + &charlie_address.bytes, first_v3_cycle + 0, &charlie_address, + &charlie_address.bytes, first_v3_cycle + 1, &charlie_address, + &charlie_address.bytes, first_v3_cycle + 2, &charlie_address, + &charlie_address.bytes, first_v3_cycle + 3, &charlie_address, + first_v3_cycle, + first_v3_cycle + 1, + first_v3_cycle + 2, + )); + + eprintln!("{}", &result); + let data = result.expect_tuple().data_map; + + let alice_delegation_info = data + .get("get-delegation-info-alice") + .cloned() + .unwrap() + .expect_optional(); + assert!(alice_delegation_info.is_none()); + + let bob_delegation_info = data + .get("get-delegation-info-bob") + .cloned() + .unwrap() + .expect_optional() + .unwrap() + .expect_tuple() + .data_map; + let bob_delegation_addr = bob_delegation_info + .get("delegated-to") + .cloned() + .unwrap() + .expect_principal(); + let bob_delegation_amt = bob_delegation_info + .get("amount-ustx") + .cloned() + .unwrap() + .expect_u128(); + let bob_pox_addr_opt = bob_delegation_info + .get("pox-addr") + .cloned() + .unwrap() + .expect_optional(); + assert_eq!(bob_delegation_addr, charlie_address.to_account_principal()); + assert_eq!(bob_delegation_amt, LOCKUP_AMT as u128); + assert!(bob_pox_addr_opt.is_none()); + + let allowance = data + .get("get-allowance-contract-callers") + .cloned() + .unwrap() + .expect_optional(); + assert!(allowance.is_none()); + + let current_num_reward_addrs = data + .get("get-num-reward-set-pox-addresses-current") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(current_num_reward_addrs, 2); + + let future_num_reward_addrs = data + .get("get-num-reward-set-pox-addresses-future") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(future_num_reward_addrs, 0); + + for i in 0..3 { + let key = + ClarityName::try_from(format!("get-partial-stacked-by-cycle-bob-{}", &i)).unwrap(); + let partial_stacked = data.get(&key).cloned().unwrap().expect_optional(); + assert!(partial_stacked.is_none()); + } + let partial_stacked = data + .get("get-partial-stacked-by-cycle-bob-3") + .cloned() + .unwrap() + .expect_optional() + .unwrap() + .expect_tuple() + .data_map + .get("stacked-amount") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(partial_stacked, LOCKUP_AMT as u128); + + let rejected = data + .get("get-total-pox-rejection-now") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(rejected, LOCKUP_AMT as u128); + + let rejected = data + .get("get-total-pox-rejection-next") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(rejected, 0); + + let rejected = data + .get("get-total-pox-rejection-future") + .cloned() + .unwrap() + .expect_u128(); + assert_eq!(rejected, 0); +} + +fn get_burn_pox_addr_info(peer: &mut TestPeer) -> (Vec, u128) { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + let addrs_and_payout = with_sortdb(peer, |ref mut chainstate, ref mut sortdb| { + let addrs = chainstate + .maybe_read_only_clarity_tx(&sortdb.index_conn(), &tip_index_block, |clarity_tx| { + clarity_tx + .with_readonly_clarity_env( + false, + 0x80000000, + ClarityVersion::Clarity2, + PrincipalData::Standard(StandardPrincipalData::transient()), + None, + LimitedCostTracker::new_free(), + |env| { + env.eval_read_only( + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", &burn_height), + ) + }, + ) + .unwrap() + }) + .unwrap(); + addrs + }) + .unwrap() + .expect_optional() + .expect("FATAL: expected list") + .expect_tuple(); + + let addrs = addrs_and_payout + .get("addrs") + .unwrap() + .to_owned() + .expect_list() + .into_iter() + .map(|tuple| PoxAddress::try_from_pox_tuple(false, &tuple).unwrap()) + .collect(); + + let payout = addrs_and_payout + .get("payout") + .unwrap() + .to_owned() + .expect_u128(); + (addrs, payout) +} + +#[test] +fn get_pox_addrs() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let (mut peer, keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7142, + Some(epochs.clone()), + None, + ); + + assert_eq!(burnchain.pox_constants.reward_slots(), 6); + let mut coinbase_nonce = 0; + + let assert_latest_was_burn = |peer: &mut TestPeer| { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + + let conn = peer.sortdb().conn(); + + // check the *parent* burn block, because that's what we'll be + // checking with get_burn_pox_addr_info + let mut burn_ops = + SortitionDB::get_block_commits_by_block(conn, &tip.parent_sortition_id).unwrap(); + assert_eq!(burn_ops.len(), 1); + let commit = burn_ops.pop().unwrap(); + assert!(commit.all_outputs_burn()); + assert!(commit.burn_fee > 0); + + let (addrs, payout) = get_burn_pox_addr_info(peer); + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + info!("Checking burn outputs at burn_height = {}", burn_height); + if peer.config.burnchain.is_in_prepare_phase(burn_height) { + assert_eq!(addrs.len(), 1); + assert_eq!(payout, 1000); + assert!(addrs[0].is_burn()); + } else { + assert_eq!(addrs.len(), 2); + assert_eq!(payout, 500); + assert!(addrs[0].is_burn()); + assert!(addrs[1].is_burn()); + } + }; + + let assert_latest_was_pox = |peer: &mut TestPeer| { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + + let conn = peer.sortdb().conn(); + + // check the *parent* burn block, because that's what we'll be + // checking with get_burn_pox_addr_info + let mut burn_ops = + SortitionDB::get_block_commits_by_block(conn, &tip.parent_sortition_id).unwrap(); + assert_eq!(burn_ops.len(), 1); + let commit = burn_ops.pop().unwrap(); + assert!(!commit.all_outputs_burn()); + let commit_addrs = commit.commit_outs; + + let (addrs, payout) = get_burn_pox_addr_info(peer); + info!( + "Checking pox outputs at burn_height = {}, commit_addrs = {:?}, fetch_addrs = {:?}", + burn_height, commit_addrs, addrs + ); + assert_eq!(addrs.len(), 2); + assert_eq!(payout, 500); + assert!(commit_addrs.contains(&addrs[0])); + assert!(commit_addrs.contains(&addrs[1])); + addrs + }; + + // produce blocks until epoch 2.2 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + // if we reach epoch 2.1, perform the check + if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + assert_latest_was_burn(&mut peer); + } + } + + let mut txs = vec![]; + let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let stackers: Vec<_> = keys + .iter() + .zip([ + AddressHashMode::SerializeP2PKH, + AddressHashMode::SerializeP2SH, + AddressHashMode::SerializeP2WPKH, + AddressHashMode::SerializeP2WSH, + ]) + .map(|(key, hash_mode)| { + let pox_addr = PoxAddress::from_legacy(hash_mode, key_to_stacks_addr(key).bytes); + txs.push(make_pox_3_lockup( + key, + 0, + 1024 * POX_THRESHOLD_STEPS_USTX, + pox_addr.clone(), + 2, + tip_height, + )); + pox_addr + }) + .collect(); + + let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + + let target_height = burnchain.reward_cycle_to_block_height(first_v3_cycle); + // produce blocks until the first reward phase that everyone should be in + while get_tip(peer.sortdb.as_ref()).block_height < target_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we should be in the reward phase, produce the reward blocks + let reward_blocks = + burnchain.pox_constants.reward_cycle_length - burnchain.pox_constants.prepare_length; + let mut rewarded = HashSet::new(); + for i in 0..reward_blocks { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // only the first 2 reward blocks contain pox outputs, because there are 6 slots and only 4 are occuppied + if i < 2 { + assert_latest_was_pox(&mut peer) + .into_iter() + .filter(|addr| !addr.is_burn()) + .for_each(|addr| { + rewarded.insert(addr); + }); + } else { + assert_latest_was_burn(&mut peer); + } + } + + assert_eq!(rewarded.len(), 4); + for stacker in stackers.iter() { + assert!( + rewarded.contains(stacker), + "Reward cycle should include {}", + stacker + ); + } + + // now we should be back in a prepare phase + for _i in 0..burnchain.pox_constants.prepare_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we should be in the reward phase, produce the reward blocks + let mut rewarded = HashSet::new(); + for i in 0..reward_blocks { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // only the first 2 reward blocks contain pox outputs, because there are 6 slots and only 4 are occuppied + if i < 2 { + assert_latest_was_pox(&mut peer) + .into_iter() + .filter(|addr| !addr.is_burn()) + .for_each(|addr| { + rewarded.insert(addr); + }); + } else { + assert_latest_was_burn(&mut peer); + } + } + + assert_eq!(rewarded.len(), 4); + for stacker in stackers.iter() { + assert!( + rewarded.contains(stacker), + "Reward cycle should include {}", + stacker + ); + } + + // now we should be back in a prepare phase + for _i in 0..burnchain.pox_constants.prepare_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we're in the next reward cycle, but everyone is unstacked + for _i in 0..burnchain.pox_constants.reward_cycle_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } +} + +#[test] +fn stack_with_segwit() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v2_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .unwrap() + + 1; + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let (mut peer, keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7120, + Some(epochs.clone()), + None, + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + assert_eq!(burnchain.pox_constants.reward_slots(), 6); + let mut coinbase_nonce = 0; + + let assert_latest_was_burn = |peer: &mut TestPeer| { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + + let conn = peer.sortdb().conn(); + + // check the *parent* burn block, because that's what we'll be + // checking with get_burn_pox_addr_info + let mut burn_ops = + SortitionDB::get_block_commits_by_block(conn, &tip.parent_sortition_id).unwrap(); + assert_eq!(burn_ops.len(), 1); + let commit = burn_ops.pop().unwrap(); + assert!(commit.all_outputs_burn()); + assert!(commit.burn_fee > 0); + + let (addrs, payout) = get_burn_pox_addr_info(peer); + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + info!("Checking burn outputs at burn_height = {}", burn_height); + if peer.config.burnchain.is_in_prepare_phase(burn_height) { + assert_eq!(addrs.len(), 1); + assert_eq!(payout, 1000); + assert!(addrs[0].is_burn()); + } else { + assert_eq!(addrs.len(), 2); + assert_eq!(payout, 500); + assert!(addrs[0].is_burn()); + assert!(addrs[1].is_burn()); + } + }; + + let assert_latest_was_pox = |peer: &mut TestPeer| { + let tip = get_tip(peer.sortdb.as_ref()); + let tip_index_block = tip.get_canonical_stacks_block_id(); + let burn_height = tip.block_height - 1; + + let conn = peer.sortdb().conn(); + + // check the *parent* burn block, because that's what we'll be + // checking with get_burn_pox_addr_info + let mut burn_ops = + SortitionDB::get_block_commits_by_block(conn, &tip.parent_sortition_id).unwrap(); + assert_eq!(burn_ops.len(), 1); + let commit = burn_ops.pop().unwrap(); + assert!(!commit.all_outputs_burn()); + let commit_addrs = commit.commit_outs; + + let (addrs, payout) = get_burn_pox_addr_info(peer); + info!( + "Checking pox outputs at burn_height = {}, commit_addrs = {:?}, fetch_addrs = {:?}", + burn_height, commit_addrs, addrs + ); + assert_eq!(addrs.len(), 2); + assert_eq!(payout, 500); + assert!(commit_addrs.contains(&addrs[0])); + assert!(commit_addrs.contains(&addrs[1])); + addrs + }; + + // produce blocks until epoch 2.2 + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + peer.tenure_with_txs(&[], &mut coinbase_nonce); + // if we reach epoch 2.1, perform the check + if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + assert_latest_was_burn(&mut peer); + } + } + + let mut txs = vec![]; + let tip_height = get_tip(peer.sortdb.as_ref()).block_height; + let stackers: Vec<_> = keys + .iter() + .zip([ + PoxAddress::Addr20(false, PoxAddressType20::P2WPKH, [0x01; 20]), + PoxAddress::Addr32(false, PoxAddressType32::P2WSH, [0x02; 32]), + PoxAddress::Addr32(false, PoxAddressType32::P2TR, [0x03; 32]), + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, Hash160([0x04; 20])), + ]) + .map(|(key, pox_addr)| { + txs.push(make_pox_3_lockup( + key, + 0, + 1024 * POX_THRESHOLD_STEPS_USTX, + pox_addr.clone(), + 2, + tip_height, + )); + pox_addr + }) + .collect(); + + let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + + let target_height = burnchain.reward_cycle_to_block_height(first_v3_cycle); + // produce blocks until the first reward phase that everyone should be in + while get_tip(peer.sortdb.as_ref()).block_height < target_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we should be in the reward phase, produce the reward blocks + let reward_blocks = + burnchain.pox_constants.reward_cycle_length - burnchain.pox_constants.prepare_length; + let mut rewarded = HashSet::new(); + for i in 0..reward_blocks { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // only the first 2 reward blocks contain pox outputs, because there are 6 slots and only 4 are occuppied + if i < 2 { + assert_latest_was_pox(&mut peer) + .into_iter() + .filter(|addr| !addr.is_burn()) + .for_each(|addr| { + rewarded.insert(addr); + }); + } else { + assert_latest_was_burn(&mut peer); + } + } + + assert_eq!(rewarded.len(), 4); + for stacker in stackers.iter() { + assert!( + rewarded.contains(stacker), + "Reward cycle should include {}", + stacker + ); + } + + // now we should be back in a prepare phase + for _i in 0..burnchain.pox_constants.prepare_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we should be in the reward phase, produce the reward blocks + let mut rewarded = HashSet::new(); + for i in 0..reward_blocks { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // only the first 2 reward blocks contain pox outputs, because there are 6 slots and only 4 are occuppied + if i < 2 { + assert_latest_was_pox(&mut peer) + .into_iter() + .filter(|addr| !addr.is_burn()) + .for_each(|addr| { + rewarded.insert(addr); + }); + } else { + assert_latest_was_burn(&mut peer); + } + } + + assert_eq!(rewarded.len(), 4); + for stacker in stackers.iter() { + assert!( + rewarded.contains(stacker), + "Reward cycle should include {}", + stacker + ); + } + + // now we should be back in a prepare phase + for _i in 0..burnchain.pox_constants.prepare_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } + + // now we're in the next reward cycle, but everyone is unstacked + for _i in 0..burnchain.pox_constants.reward_cycle_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } +} + +/// In this test case, Alice delegates to Bob. +/// Bob stacks Alice's funds via PoX v2 for 6 cycles. In the third cycle, +/// Bob increases Alice's stacking amount by less than the stacking min. +/// Bob is able to increase the pool's aggregate amount anyway. +/// +#[test] +fn stack_aggregation_increase() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let observer = TestEventObserver::new(); + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7117, + Some(epochs.clone()), + Some(&observer), + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + let alice = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let alice_principal = PrincipalData::from(alice_address.clone()); + let bob = keys.pop().unwrap(); + let bob_address = key_to_stacks_addr(&bob); + let bob_principal = PrincipalData::from(bob_address.clone()); + let bob_pox_addr = make_pox_addr(AddressHashMode::SerializeP2PKH, bob_address.bytes.clone()); + let charlie = keys.pop().unwrap(); + let charlie_address = key_to_stacks_addr(&charlie); + let charlie_pox_addr = make_pox_addr( + AddressHashMode::SerializeP2PKH, + charlie_address.bytes.clone(), + ); + let dan = keys.pop().unwrap(); + let dan_address = key_to_stacks_addr(&dan); + let dan_principal = PrincipalData::from(dan_address.clone()); + let dan_pox_addr = make_pox_addr(AddressHashMode::SerializeP2PKH, dan_address.bytes.clone()); + let alice_nonce = 0; + let mut bob_nonce = 0; + let mut charlie_nonce = 0; + let mut dan_nonce = 0; + + let alice_first_lock_amount = 512 * POX_THRESHOLD_STEPS_USTX; + let alice_delegation_amount = alice_first_lock_amount + 1; + let dan_delegation_amount = alice_first_lock_amount + 1; + let dan_stack_amount = 511 * POX_THRESHOLD_STEPS_USTX; + + let mut coinbase_nonce = 0; + + // first tenure is empty + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + // Roll to Epoch-2.4 and perform the delegate-stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + + // submit delegation tx for alice + let alice_delegation_1 = make_pox_3_contract_call( + &alice, + alice_nonce, + "delegate-stx", + vec![ + Value::UInt(alice_delegation_amount), + bob_principal.clone().into(), + Value::none(), + Value::none(), + ], + ); + + // bob locks some of alice's tokens + let delegate_stack_tx_bob = make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-stx", + vec![ + alice_principal.clone().into(), + Value::UInt(alice_first_lock_amount), + bob_pox_addr.clone(), + Value::UInt(tip.block_height as u128), + Value::UInt(6), + ], + ); + bob_nonce += 1; + + // dan stacks some tokens + let stack_tx_dan = make_pox_3_lockup( + &dan, + dan_nonce, + dan_stack_amount, + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, dan_address.bytes.clone()), + 12, + tip.block_height, + ); + dan_nonce += 1; + + latest_block = peer.tenure_with_txs( + &[alice_delegation_1, delegate_stack_tx_bob, stack_tx_dan], + &mut coinbase_nonce, + ); + + // check that the partial stacking state contains entries for bob + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + // we'll produce blocks until the 3rd reward cycle gets through the "handled start" code + // this is one block after the reward cycle starts + let height_target = burnchain.reward_cycle_to_block_height(first_v3_cycle + 3) + 1; + + while get_tip(peer.sortdb.as_ref()).block_height < height_target { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let expected_alice_unlock = burnchain.reward_cycle_to_block_height(first_v3_cycle + 6) - 1; + let expected_dan_unlock = burnchain.reward_cycle_to_block_height(first_v3_cycle + 12) - 1; + + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_first_lock_amount); + assert_eq!(alice_bal.unlock_height(), expected_alice_unlock); + + let dan_bal = get_stx_account_at(&mut peer, &latest_block, &dan_principal); + assert_eq!(dan_bal.amount_locked(), dan_stack_amount); + assert_eq!(dan_bal.unlock_height(), expected_dan_unlock); + + // check that the partial stacking state still contains entries for bob + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 512 * POX_THRESHOLD_STEPS_USTX); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + let mut txs_to_submit = vec![]; + + // bob locks in alice's tokens to a PoX address, + // which clears the partially-stacked state + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-commit-indexed", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + ], + )); + let bob_stack_aggregation_commit_indexed = bob_nonce; + bob_nonce += 1; + + // bob tries to lock tokens in a reward cycle that's already committed (should fail with + // ERR_STACKING_NO_SUCH_PRINCIPAL) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + Value::UInt(0), + ], + )); + let bob_err_stacking_no_such_principal = bob_nonce; + bob_nonce += 1; + + // bob locks up 1 more of alice's tokens + // takes effect in the _next_ reward cycle + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "delegate-stack-increase", + vec![ + alice_principal.clone().into(), + bob_pox_addr.clone(), + Value::UInt(1), + ], + )); + bob_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + let tip = get_tip(peer.sortdb.as_ref()); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // locked up more tokens, but unlock height is unchanged + let alice_bal = get_stx_account_at(&mut peer, &latest_block, &alice_principal); + assert_eq!(alice_bal.amount_locked(), alice_delegation_amount); + assert_eq!(alice_bal.unlock_height(), expected_alice_unlock); + + // only 1 uSTX to lock in this next cycle for Alice + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cur_reward_cycle + 1, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, 1); + + for cycle_number in (cur_reward_cycle + 2)..(first_v3_cycle + 6) { + // alice has 512 * POX_THRESHOLD_STEPS_USTX partially-stacked STX in all cycles after + let partial_stacked = get_partial_stacked( + &mut peer, + &latest_block, + &bob_pox_addr, + cycle_number, + &bob_principal, + POX_3_NAME, + ); + assert_eq!(partial_stacked, alice_delegation_amount); + } + + let mut txs_to_submit = vec![]; + + // charlie tries to lock alice's additional tokens to his own PoX address (should fail with + // ERR_STACKING_NO_SUCH_PRINCIPAL) + txs_to_submit.push(make_pox_3_contract_call( + &charlie, + charlie_nonce, + "stack-aggregation-increase", + vec![ + charlie_pox_addr.clone(), + Value::UInt(cur_reward_cycle as u128), + Value::UInt(0), + ], + )); + let charlie_err_stacking_no_principal = charlie_nonce; + charlie_nonce += 1; + + // charlie tries to lock alice's additional tokens to bob's PoX address (should fail with + // ERR_STACKING_NO_SUCH_PRINCIPAL) + txs_to_submit.push(make_pox_3_contract_call( + &charlie, + charlie_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt(cur_reward_cycle as u128), + Value::UInt(0), + ], + )); + let charlie_err_stacking_no_principal_2 = charlie_nonce; + charlie_nonce += 1; + + // bob tries to retcon a reward cycle lockup (should fail with ERR_STACKING_INVALID_LOCK_PERIOD) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt(cur_reward_cycle as u128), + Value::UInt(0), + ], + )); + let bob_err_stacking_invalid_lock_period = bob_nonce; + bob_nonce += 1; + + // bob tries to lock tokens in a reward cycle that has no tokens stacked in it yet (should + // fail with ERR_DELEGATION_NO_REWARD_CYCLE) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 13) as u128), + Value::UInt(0), + ], + )); + let bob_err_delegation_no_reward_cycle = bob_nonce; + bob_nonce += 1; + + // bob tries to lock tokens to a non-existant PoX reward address (should fail with + // ERR_DELEGATION_NO_REWARD_SLOT) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + Value::UInt(2), + ], + )); + let bob_err_delegation_no_reward_slot = bob_nonce; + bob_nonce += 1; + + // bob tries to lock tokens to the wrong PoX address (should fail with ERR_DELEGATION_WRONG_REWARD_SLOT). + // slot 0 belongs to dan. + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + Value::UInt(0), + ], + )); + let bob_err_delegation_wrong_reward_slot = bob_nonce; + bob_nonce += 1; + + // bob locks tokens for Alice (bob's previous stack-aggregation-commit put his PoX address in + // slot 1 for this reward cycle) + txs_to_submit.push(make_pox_3_contract_call( + &bob, + bob_nonce, + "stack-aggregation-increase", + vec![ + bob_pox_addr.clone(), + Value::UInt((cur_reward_cycle + 1) as u128), + Value::UInt(1), + ], + )); + bob_nonce += 1; + + latest_block = peer.tenure_with_txs(&txs_to_submit, &mut coinbase_nonce); + + assert_eq!( + get_stx_account_at(&mut peer, &latest_block, &alice_principal).amount_locked(), + alice_delegation_amount + ); + + // now let's check some tx receipts + + let alice_address = key_to_stacks_addr(&alice); + let blocks = observer.get_blocks(); + + let mut alice_txs = HashMap::new(); + let mut bob_txs = HashMap::new(); + let mut charlie_txs = HashMap::new(); + + for b in blocks.into_iter() { + for r in b.receipts.into_iter() { + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == charlie_address { + charlie_txs.insert(t.auth.get_origin_nonce(), r); + } + } + } + } + + assert_eq!(alice_txs.len(), 1); + assert_eq!(bob_txs.len(), 9); + assert_eq!(charlie_txs.len(), 2); + + // bob's stack-aggregation-commit-indexed succeeded and returned the right index + assert_eq!( + &bob_txs[&bob_stack_aggregation_commit_indexed] + .result + .to_string(), + "(ok u1)" + ); + + // check bob's errors + assert_eq!( + &bob_txs[&bob_err_stacking_no_such_principal] + .result + .to_string(), + "(err 4)" + ); + assert_eq!( + &bob_txs[&bob_err_stacking_invalid_lock_period] + .result + .to_string(), + "(err 2)" + ); + assert_eq!( + &bob_txs[&bob_err_delegation_no_reward_cycle] + .result + .to_string(), + "(err 4)" + ); + assert_eq!( + &bob_txs[&bob_err_delegation_no_reward_slot] + .result + .to_string(), + "(err 28)" + ); + assert_eq!( + &bob_txs[&bob_err_delegation_wrong_reward_slot] + .result + .to_string(), + "(err 29)" + ); + + // check charlie's errors + assert_eq!( + &charlie_txs[&charlie_err_stacking_no_principal] + .result + .to_string(), + "(err 4)" + ); + assert_eq!( + &charlie_txs[&charlie_err_stacking_no_principal_2] + .result + .to_string(), + "(err 4)" + ); +} + +/// Verify that delegate-stx validates the PoX addr, if given +#[test] +fn pox_3_delegate_stx_addr_validation() { + // the sim environment produces 25 empty sortitions before + // tenures start being tracked. + let EMPTY_SORTITIONS = 25; + + let (epochs, pox_constants) = make_test_epochs_pox(); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( + &burnchain, + function_name!(), + 7100, + Some(epochs.clone()), + None, + ); + + peer.config.check_pox_invariants = Some((first_v3_cycle, first_v3_cycle + 10)); + + let mut coinbase_nonce = 0; + let alice = keys.pop().unwrap(); + let bob = keys.pop().unwrap(); + let charlie = keys.pop().unwrap(); + let danielle = keys.pop().unwrap(); + let alice_address = key_to_stacks_addr(&alice); + let bob_address = key_to_stacks_addr(&bob); + let charlie_address = key_to_stacks_addr(&charlie); + let LOCKUP_AMT = 1024 * POX_THRESHOLD_STEPS_USTX; + + // first tenure is empty + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + + // Roll to Epoch-2.4 and perform the delegate-stack-extend tests + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + } + + let tip = get_tip(peer.sortdb.as_ref()); + let cur_reward_cycle = burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); + + // alice delegates to charlie in v3 to a valid address + let alice_delegation = make_pox_3_contract_call( + &alice, + 0, + "delegate-stx", + vec![ + Value::UInt(LOCKUP_AMT), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::some(make_pox_addr( + AddressHashMode::SerializeP2PKH, + alice_address.bytes.clone(), + )) + .unwrap(), + ], + ); + + let bob_bad_pox_addr = Value::Tuple( + TupleData::from_data(vec![ + ( + ClarityName::try_from("version".to_owned()).unwrap(), + Value::buff_from_byte(0xff), + ), + ( + ClarityName::try_from("hashbytes".to_owned()).unwrap(), + Value::Sequence(SequenceData::Buffer(BuffData { + data: bob_address.bytes.as_bytes().to_vec(), + })), + ), + ]) + .unwrap(), + ); + + // bob delegates to charlie in v3 with an invalid address + let bob_delegation = make_pox_3_contract_call( + &bob, + 0, + "delegate-stx", + vec![ + Value::UInt(LOCKUP_AMT), + PrincipalData::from(charlie_address.clone()).into(), + Value::none(), + Value::some(bob_bad_pox_addr).unwrap(), + ], + ); + + peer.tenure_with_txs(&[alice_delegation, bob_delegation], &mut coinbase_nonce); + + let result = eval_at_tip( + &mut peer, + "pox-3", + &format!( + " + {{ + ;; should be (some $charlie_address) + get-delegation-info-alice: (get-delegation-info '{}), + ;; should be none + get-delegation-info-bob: (get-delegation-info '{}), + }}", + &alice_address, &bob_address, + ), + ); + + eprintln!("{}", &result); + let data = result.expect_tuple().data_map; + + // bob had an invalid PoX address + let bob_delegation_info = data + .get("get-delegation-info-bob") + .cloned() + .unwrap() + .expect_optional(); + assert!(bob_delegation_info.is_none()); + + // alice was valid + let alice_delegation_info = data + .get("get-delegation-info-alice") + .cloned() + .unwrap() + .expect_optional() + .unwrap() + .expect_tuple() + .data_map; + let alice_delegation_addr = alice_delegation_info + .get("delegated-to") + .cloned() + .unwrap() + .expect_principal(); + let alice_delegation_amt = alice_delegation_info + .get("amount-ustx") + .cloned() + .unwrap() + .expect_u128(); + let alice_pox_addr_opt = alice_delegation_info + .get("pox-addr") + .cloned() + .unwrap() + .expect_optional(); + assert_eq!( + alice_delegation_addr, + charlie_address.to_account_principal() + ); + assert_eq!(alice_delegation_amt, LOCKUP_AMT as u128); + assert!(alice_pox_addr_opt.is_some()); + + let alice_pox_addr = alice_pox_addr_opt.unwrap(); + + assert_eq!( + alice_pox_addr, + make_pox_addr(AddressHashMode::SerializeP2PKH, alice_address.bytes.clone(),) + ); +} diff --git a/src/chainstate/stacks/db/accounts.rs b/src/chainstate/stacks/db/accounts.rs index 8c2dc2b985..6c7e0a12fe 100644 --- a/src/chainstate/stacks/db/accounts.rs +++ b/src/chainstate/stacks/db/accounts.rs @@ -390,6 +390,127 @@ impl StacksChainState { .expect("FATAL: failed to set account nonce") } + /////////////////////// PoX-3 ///////////////////////////////// + + /// Lock up STX for PoX for a time. Does NOT touch the account nonce. + pub fn pox_lock_v3( + db: &mut ClarityDatabase, + principal: &PrincipalData, + lock_amount: u128, + unlock_burn_height: u64, + ) -> Result<(), Error> { + assert!(unlock_burn_height > 0); + assert!(lock_amount > 0); + + let mut snapshot = db.get_stx_balance_snapshot(principal); + + if snapshot.has_locked_tokens() { + return Err(Error::PoxAlreadyLocked); + } + if !snapshot.can_transfer(lock_amount) { + return Err(Error::PoxInsufficientBalance); + } + snapshot.lock_tokens_v3(lock_amount, unlock_burn_height); + + debug!( + "PoX v3 lock applied"; + "pox_locked_ustx" => snapshot.balance().amount_locked(), + "available_ustx" => snapshot.balance().amount_unlocked(), + "unlock_burn_height" => unlock_burn_height, + "account" => %principal, + ); + + snapshot.save(); + Ok(()) + } + + /// Extend a STX lock up for PoX for a time. Does NOT touch the account nonce. + /// Returns Ok(lock_amount) when successful + /// + /// # Errors + /// - Returns Error::PoxExtendNotLocked if this function was called on an account + /// which isn't locked. This *should* have been checked by the PoX v3 contract, + /// so this should surface in a panic. + pub fn pox_lock_extend_v3( + db: &mut ClarityDatabase, + principal: &PrincipalData, + unlock_burn_height: u64, + ) -> Result { + assert!(unlock_burn_height > 0); + + let mut snapshot = db.get_stx_balance_snapshot(principal); + + if !snapshot.has_locked_tokens() { + return Err(Error::PoxExtendNotLocked); + } + + snapshot.extend_lock_v3(unlock_burn_height); + + let amount_locked = snapshot.balance().amount_locked(); + + debug!( + "PoX v3 lock applied"; + "pox_locked_ustx" => amount_locked, + "available_ustx" => snapshot.balance().amount_unlocked(), + "unlock_burn_height" => unlock_burn_height, + "account" => %principal, + ); + + snapshot.save(); + Ok(amount_locked) + } + + /// Increase a STX lock up for PoX-3. Does NOT touch the account nonce. + /// Returns Ok( account snapshot ) when successful + /// + /// # Errors + /// - Returns Error::PoxExtendNotLocked if this function was called on an account + /// which isn't locked. This *should* have been checked by the PoX v3 contract, + /// so this should surface in a panic. + pub fn pox_lock_increase_v3( + db: &mut ClarityDatabase, + principal: &PrincipalData, + new_total_locked: u128, + ) -> Result { + assert!(new_total_locked > 0); + + let mut snapshot = db.get_stx_balance_snapshot(principal); + + if !snapshot.has_locked_tokens() { + return Err(Error::PoxExtendNotLocked); + } + + let bal = snapshot.canonical_balance_repr(); + let total_amount = bal + .amount_unlocked() + .checked_add(bal.amount_locked()) + .expect("STX balance overflowed u128"); + if total_amount < new_total_locked { + return Err(Error::PoxInsufficientBalance); + } + + if bal.amount_locked() > new_total_locked { + return Err(Error::PoxInvalidIncrease); + } + + snapshot.increase_lock_v3(new_total_locked); + + let out_balance = snapshot.canonical_balance_repr(); + + debug!( + "PoX v3 lock increased"; + "pox_locked_ustx" => out_balance.amount_locked(), + "available_ustx" => out_balance.amount_unlocked(), + "unlock_burn_height" => out_balance.unlock_height(), + "account" => %principal, + ); + + snapshot.save(); + Ok(out_balance) + } + + /////////////////////// PoX-2 ///////////////////////////////// + /// Increase a STX lock up for PoX. Does NOT touch the account nonce. /// Returns Ok( account snapshot ) when successful /// @@ -511,6 +632,8 @@ impl StacksChainState { Ok(()) } + /////////////////////// PoX (first version) ///////////////////////////////// + /// Lock up STX for PoX for a time. Does NOT touch the account nonce. pub fn pox_lock_v1( db: &mut ClarityDatabase, diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index 8ea78a90b0..44ea2b3916 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -3704,13 +3704,18 @@ impl StacksChainState { Ok(count - to_write) } - /// Check whether or not there exists a Stacks block at or higher than a given height that is - /// unprocessed. This is used by miners to determine whether or not the block-commit they're - /// about to send is about to be invalidated - pub fn has_higher_unprocessed_blocks(conn: &DBConn, height: u64) -> Result { + /// Check whether or not there exists a Stacks block at or higher + /// than a given height that is unprocessed and relatively + /// new. This is used by miners to determine whether or not the + /// block-commit they're about to send is about to be invalidated. + pub fn has_higher_unprocessed_blocks( + conn: &DBConn, + height: u64, + deadline: u64, + ) -> Result { let sql = - "SELECT 1 FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND height >= ?1"; - let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + "SELECT 1 FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND height >= ?1 AND arrival_time >= ?2"; + let args: &[&dyn ToSql] = &[&u64_to_sql(height)?, &u64_to_sql(deadline)?]; let res = conn .query_row(sql, args, |_r| Ok(())) .optional() @@ -3720,10 +3725,13 @@ impl StacksChainState { /// Get the metadata of the highest unprocessed block. /// The block data will not be returned - pub fn get_highest_unprocessed_block(conn: &DBConn) -> Result, Error> { + pub fn get_highest_unprocessed_block( + conn: &DBConn, + deadline: u64, + ) -> Result, Error> { let sql = - "SELECT * FROM staging_blocks WHERE orphaned = 0 AND processed = 0 ORDER BY height DESC LIMIT 1"; - let res = query_row(conn, sql, NO_PARAMS)?; + "SELECT * FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND arrival_time >= ?1 ORDER BY height DESC LIMIT 1"; + let res = query_row(conn, sql, &[u64_to_sql(deadline)?])?; Ok(res) } @@ -4878,21 +4886,103 @@ impl StacksChainState { receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); applied = true; } + StacksEpochId::Epoch22 => { + receipts.push(clarity_tx.block.initialize_epoch_2_05()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + applied = true; + } + StacksEpochId::Epoch23 => { + receipts.push(clarity_tx.block.initialize_epoch_2_05()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + applied = true; + } + StacksEpochId::Epoch24 => { + receipts.push(clarity_tx.block.initialize_epoch_2_05()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); + applied = true; + } _ => { panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); } }, - StacksEpochId::Epoch2_05 => { + StacksEpochId::Epoch2_05 => match sortition_epoch.epoch_id { + StacksEpochId::Epoch21 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + applied = true; + } + StacksEpochId::Epoch22 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + applied = true; + } + StacksEpochId::Epoch23 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + applied = true; + } + StacksEpochId::Epoch24 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); + applied = true; + } + _ => { + panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); + } + }, + StacksEpochId::Epoch21 => match sortition_epoch.epoch_id { + StacksEpochId::Epoch22 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + applied = true; + } + StacksEpochId::Epoch23 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + applied = true; + } + StacksEpochId::Epoch24 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_2()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); + applied = true; + } + _ => { + panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); + } + }, + StacksEpochId::Epoch22 => match sortition_epoch.epoch_id { + StacksEpochId::Epoch23 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + applied = true; + } + StacksEpochId::Epoch24 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_3()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); + applied = true; + } + _ => { + panic!("Bad Stacks epoch transition; parent_epoch = {}, current_epoch = {}", &stacks_parent_epoch, &sortition_epoch.epoch_id); + } + }, + StacksEpochId::Epoch23 => { assert_eq!( sortition_epoch.epoch_id, - StacksEpochId::Epoch21, - "Should only transition from Epoch2_05 to Epoch21" + StacksEpochId::Epoch24, + "Should only transition from Epoch23 to Epoch24" ); - receipts.append(&mut clarity_tx.block.initialize_epoch_2_1()?); + receipts.append(&mut clarity_tx.block.initialize_epoch_2_4()?); applied = true; } - StacksEpochId::Epoch21 => { - panic!("No defined transition from Epoch21 forward") + StacksEpochId::Epoch24 => { + panic!("No defined transition from Epoch23 forward") } } } @@ -5265,12 +5355,14 @@ impl StacksChainState { clarity_tx .connection() .as_transaction(|tx_connection| { + let epoch = tx_connection.get_epoch(); let result = tx_connection.with_clarity_db(|db| { let block_height = Value::UInt(db.get_current_block_height().into()); let res = db.fetch_entry_unknown_descriptor( &lockup_contract_id, "lockups", &block_height, + &epoch, )?; Ok(res) })?; @@ -5479,7 +5571,10 @@ impl StacksChainState { // The DelegateStx bitcoin wire format does not exist before Epoch 2.1. Ok((stack_ops, transfer_ops, vec![])) } - StacksEpochId::Epoch21 => { + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, parent_index_hash, @@ -5512,38 +5607,67 @@ impl StacksChainState { // Do not try to handle auto-unlocks on pox_reward_cycle 0 // This cannot even occur in the mainchain, because 2.1 starts much // after the 1st reward cycle, however, this could come up in mocknets or regtest. - if pox_reward_cycle > 1 { - // do not try to handle auto-unlocks before the reward set has been calculated (at block = 0 of cycle) - // or written to the sortition db (at block = 1 of cycle) - if Burnchain::is_before_reward_cycle( - burn_dbconn.get_burn_start_height().into(), - burn_tip_height, - burn_dbconn.get_pox_reward_cycle_length().into(), - ) { - debug!("check_and_handle_reward_start: before reward cycle"); - return Ok(vec![]); - } - let handled = clarity_tx.with_clarity_db_readonly(|clarity_db| { - Self::handled_pox_cycle_start(clarity_db, pox_reward_cycle) - }); - debug!("check_and_handle_reward_start: handled = {}", handled); + if pox_reward_cycle <= 1 { + return Ok(vec![]); + } - if !handled { - let pox_start_cycle_info = sortition_dbconn.get_pox_start_cycle_info( - parent_sortition_id, - chain_tip.burn_header_height.into(), - pox_reward_cycle, - )?; - debug!("check_and_handle_reward_start: got pox reward cycle info"); - let events = clarity_tx.block.as_free_transaction(|clarity_tx| { - Self::handle_pox_cycle_start(clarity_tx, pox_reward_cycle, pox_start_cycle_info) - })?; - debug!("check_and_handle_reward_start: handled pox cycle start"); - return Ok(events); - } + // do not try to handle auto-unlocks before the reward set has been calculated (at block = 0 of cycle) + // or written to the sortition db (at block = 1 of cycle) + if Burnchain::is_before_reward_cycle( + burn_dbconn.get_burn_start_height().into(), + burn_tip_height, + burn_dbconn.get_pox_reward_cycle_length().into(), + ) { + debug!("check_and_handle_reward_start: before reward cycle"); + return Ok(vec![]); + } + let handled = clarity_tx.with_clarity_db_readonly(|clarity_db| { + Self::handled_pox_cycle_start(clarity_db, pox_reward_cycle) + }); + debug!("check_and_handle_reward_start: handled = {}", handled); + + if handled { + // already handled this cycle, don't need to do anything + return Ok(vec![]); } - Ok(vec![]) + let active_epoch = clarity_tx.get_epoch(); + + let pox_start_cycle_info = sortition_dbconn.get_pox_start_cycle_info( + parent_sortition_id, + chain_tip.burn_header_height.into(), + pox_reward_cycle, + )?; + debug!("check_and_handle_reward_start: got pox reward cycle info"); + let events = clarity_tx.block.as_free_transaction(|clarity_tx| { + match active_epoch { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 => { + // prior to epoch-2.4, the semantics of this method were such that any epoch + // would invoke the `handle_pox_cycle_start_pox_2()` method. + // however, only epoch-2.1 ever actually *does* invoke this method, + // so, with some careful testing, this branch could perhaps be simplified + // such that only Epoch21 matches, and all the other ones _panic_. + // For now, I think it's better to preserve the exact prior semantics. + Self::handle_pox_cycle_start_pox_2( + clarity_tx, + pox_reward_cycle, + pox_start_cycle_info, + ) + } + StacksEpochId::Epoch24 => Self::handle_pox_cycle_start_pox_3( + clarity_tx, + pox_reward_cycle, + pox_start_cycle_info, + ), + } + })?; + debug!("check_and_handle_reward_start: handled pox cycle start"); + return Ok(events); } /// Called in both follower and miner block assembly paths. @@ -6975,6 +7099,7 @@ impl StacksChainState { /// unconfirmed microblock stream trailing off of it. pub fn will_admit_mempool_tx( &mut self, + burn_state_db: &dyn BurnStateDB, current_consensus_hash: &ConsensusHash, current_block: &BlockHeaderHash, tx: &StacksTransaction, @@ -7019,7 +7144,7 @@ impl StacksChainState { let current_tip = StacksChainState::get_parent_index_block(current_consensus_hash, current_block); - let res = match self.with_read_only_clarity_tx(&NULL_BURN_STATE_DB, ¤t_tip, |conn| { + let res = match self.with_read_only_clarity_tx(burn_state_db, ¤t_tip, |conn| { StacksChainState::can_include_tx(conn, &conf, has_microblock_pubk, tx, tx_size) }) { Some(r) => r, @@ -7039,7 +7164,7 @@ impl StacksChainState { { debug!("Transaction {} is unminable in the confirmed chain tip due to nonce {} != {}; trying the unconfirmed chain tip", &tx.txid(), mismatch_error.expected, mismatch_error.actual); - self.with_read_only_unconfirmed_clarity_tx(&NULL_BURN_STATE_DB, |conn| { + self.with_read_only_unconfirmed_clarity_tx(burn_state_db, |conn| { StacksChainState::can_include_tx( conn, &conf, @@ -7137,11 +7262,12 @@ impl StacksChainState { return Err(MemPoolRejection::BadAddressVersionByte); } - let (block_height, v1_unlock_height) = - clarity_connection.with_clarity_db_readonly(|ref mut db| { + let (block_height, v1_unlock_height, v2_unlock_height) = clarity_connection + .with_clarity_db_readonly(|ref mut db| { ( db.get_current_burnchain_block_height() as u64, db.get_v1_unlock_height(), + db.get_v2_unlock_height(), ) }); @@ -7150,6 +7276,7 @@ impl StacksChainState { fee as u128, block_height, v1_unlock_height, + v2_unlock_height, ) { match &tx.payload { TransactionPayload::TokenTransfer(..) => { @@ -7158,9 +7285,11 @@ impl StacksChainState { _ => { return Err(MemPoolRejection::NotEnoughFunds( fee as u128, - payer - .stx_balance - .get_available_balance_at_burn_block(block_height, v1_unlock_height), + payer.stx_balance.get_available_balance_at_burn_block( + block_height, + v1_unlock_height, + v2_unlock_height, + ), )); } } @@ -7183,12 +7312,15 @@ impl StacksChainState { total_spent, block_height, v1_unlock_height, + v2_unlock_height, ) { return Err(MemPoolRejection::NotEnoughFunds( total_spent, - origin - .stx_balance - .get_available_balance_at_burn_block(block_height, v1_unlock_height), + origin.stx_balance.get_available_balance_at_burn_block( + block_height, + v1_unlock_height, + v2_unlock_height, + ), )); } @@ -7198,12 +7330,14 @@ impl StacksChainState { fee as u128, block_height, v1_unlock_height, + v2_unlock_height, ) { return Err(MemPoolRejection::NotEnoughFunds( fee as u128, payer.stx_balance.get_available_balance_at_burn_block( block_height, v1_unlock_height, + v2_unlock_height, ), )); } diff --git a/src/chainstate/stacks/db/contracts.rs b/src/chainstate/stacks/db/contracts.rs index 498bcb3ecb..25584011b6 100644 --- a/src/chainstate/stacks/db/contracts.rs +++ b/src/chainstate/stacks/db/contracts.rs @@ -60,9 +60,10 @@ impl StacksChainState { contract_id: &QualifiedContractIdentifier, data_var: &str, ) -> Result, Error> { + let epoch = clarity_tx.get_epoch(); clarity_tx .with_clarity_db_readonly(|ref mut db| { - match db.lookup_variable_unknown_descriptor(contract_id, data_var) { + match db.lookup_variable_unknown_descriptor(contract_id, data_var, &epoch) { Ok(c) => Ok(Some(c)), Err(clarity_vm_error::Unchecked(CheckErrors::NoSuchDataVariable(_))) => { Ok(None) diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index 27842de535..c143816024 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -222,6 +222,9 @@ impl DBConfig { self.version == "2" || self.version == "3" || self.version == "4" } StacksEpochId::Epoch21 => self.version == "3" || self.version == "4", + StacksEpochId::Epoch22 => self.version == "3" || self.version == "4", + StacksEpochId::Epoch23 => self.version == "3" || self.version == "4", + StacksEpochId::Epoch24 => self.version == "3" || self.version == "4", } } } @@ -1295,16 +1298,18 @@ impl StacksChainState { } let lockup_contract_id = boot_code_id("lockup", mainnet); + let epoch = clarity.get_epoch(); clarity .with_clarity_db(|db| { for (block_height, schedule) in lockups_per_block.into_iter() { let key = Value::UInt(block_height.into()); - let value = Value::list_from(schedule).unwrap(); + let value = Value::cons_list(schedule, &epoch).unwrap(); db.insert_entry_unknown_descriptor( &lockup_contract_id, "lockups", key, value, + &epoch, )?; } Ok(()) @@ -1316,6 +1321,7 @@ impl StacksChainState { let bns_contract_id = boot_code_id("bns", mainnet); if let Some(get_namespaces) = boot_data.get_bulk_initial_namespaces.take() { info!("Initializing chain with namespaces"); + let epoch = clarity.get_epoch(); clarity .with_clarity_db(|db| { let initial_namespaces = get_namespaces(); @@ -1354,7 +1360,10 @@ impl StacksChainState { assert_eq!(buckets.len(), 16); TupleData::from_data(vec![ - ("buckets".into(), Value::list_from(buckets).unwrap()), + ( + "buckets".into(), + Value::cons_list(buckets, &epoch).unwrap(), + ), ("base".into(), base), ("coeff".into(), coeff), ("nonalpha-discount".into(), nonalpha_discount), @@ -1380,6 +1389,7 @@ impl StacksChainState { "namespaces", namespace, namespace_props, + &epoch, )?; } Ok(()) @@ -1390,6 +1400,7 @@ impl StacksChainState { // BNS Names if let Some(get_names) = boot_data.get_bulk_initial_names.take() { info!("Initializing chain with names"); + let epoch = clarity.get_epoch(); clarity .with_clarity_db(|db| { let initial_names = get_names(); @@ -1445,6 +1456,7 @@ impl StacksChainState { &fqn, &owner_address, &expected_asset_type, + &epoch, )?; let registered_at = Value::UInt(0); @@ -1466,6 +1478,7 @@ impl StacksChainState { "name-properties", fqn.clone(), name_props, + &epoch, )?; db.insert_entry_unknown_descriptor( @@ -1473,6 +1486,7 @@ impl StacksChainState { "owner-name", Value::Principal(owner_address), fqn, + &epoch, )?; } Ok(()) diff --git a/src/chainstate/stacks/db/transactions.rs b/src/chainstate/stacks/db/transactions.rs index d5a0cc65d3..b7ff556a09 100644 --- a/src/chainstate/stacks/db/transactions.rs +++ b/src/chainstate/stacks/db/transactions.rs @@ -328,6 +328,9 @@ pub fn handle_clarity_runtime_error(error: clarity_error) -> ClarityRuntimeTxErr err_type: "short return/panic", } } + clarity_error::Interpreter(InterpreterError::Unchecked(CheckErrors::SupertypeTooLarge)) => { + ClarityRuntimeTxError::Rejectable(error) + } clarity_error::Interpreter(InterpreterError::Unchecked(check_error)) => { ClarityRuntimeTxError::AnalysisError(check_error) } @@ -424,17 +427,22 @@ impl StacksChainState { fee: u64, payer_account: StacksAccount, ) -> Result { - let (cur_burn_block_height, v1_unlock_ht) = - clarity_tx.with_clarity_db_readonly(|ref mut db| { + let (cur_burn_block_height, v1_unlock_ht, v2_unlock_ht) = clarity_tx + .with_clarity_db_readonly(|ref mut db| { ( db.get_current_burnchain_block_height(), db.get_v1_unlock_height(), + db.get_v2_unlock_height(), ) }); let consolidated_balance = payer_account .stx_balance - .get_available_balance_at_burn_block(cur_burn_block_height as u64, v1_unlock_ht); + .get_available_balance_at_burn_block( + cur_burn_block_height as u64, + v1_unlock_ht, + v2_unlock_ht, + ); if consolidated_balance < fee as u128 { return Err(Error::InvalidFee); @@ -1109,6 +1117,12 @@ impl StacksChainState { } } } + if let clarity_error::Analysis(err) = &other_error { + if let CheckErrors::SupertypeTooLarge = err.err { + info!("Transaction {} is problematic and should have prevented this block from being relayed", tx.txid()); + return Err(Error::ClarityError(other_error)); + } + } // this analysis isn't free -- convert to runtime error let mut analysis_cost = clarity_tx.cost_so_far(); analysis_cost @@ -7846,7 +7860,7 @@ pub mod test { assert_eq!( StacksChainState::get_account(&mut conn, &addr.into()) .stx_balance - .get_available_balance_at_burn_block(0, 0), + .get_available_balance_at_burn_block(0, 0, 0), (1000000000 - fee) as u128 ); @@ -8284,6 +8298,12 @@ pub mod test { fn get_v1_unlock_height(&self) -> u32 { 2 } + fn get_v2_unlock_height(&self) -> u32 { + u32::MAX + } + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX + } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) } @@ -8345,6 +8365,9 @@ pub mod test { StacksEpochId::Epoch20 => self.get_stacks_epoch(0), StacksEpochId::Epoch2_05 => self.get_stacks_epoch(1), StacksEpochId::Epoch21 => self.get_stacks_epoch(2), + StacksEpochId::Epoch22 => self.get_stacks_epoch(3), + StacksEpochId::Epoch23 => self.get_stacks_epoch(4), + StacksEpochId::Epoch24 => self.get_stacks_epoch(5), } } fn get_pox_payout_addrs( @@ -8489,6 +8512,12 @@ pub mod test { fn get_v1_unlock_height(&self) -> u32 { 2 } + fn get_v2_unlock_height(&self) -> u32 { + u32::MAX + } + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX + } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) } diff --git a/src/chainstate/stacks/mod.rs b/src/chainstate/stacks/mod.rs index babf7a046a..0e2ce3f956 100644 --- a/src/chainstate/stacks/mod.rs +++ b/src/chainstate/stacks/mod.rs @@ -83,7 +83,7 @@ pub use stacks_common::address::{ }; pub use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; -pub const STACKS_BLOCK_VERSION: u8 = 4; +pub const STACKS_BLOCK_VERSION: u8 = 6; pub const STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE: u8 = 1; pub const MAX_BLOCK_LEN: u32 = 2 * 1024 * 1024; diff --git a/src/chainstate/stacks/tests/block_construction.rs b/src/chainstate/stacks/tests/block_construction.rs index 59c36cf534..3c423da117 100644 --- a/src/chainstate/stacks/tests/block_construction.rs +++ b/src/chainstate/stacks/tests/block_construction.rs @@ -30,6 +30,7 @@ use std::path::{Path, PathBuf}; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::database::ClarityDatabase; use clarity::vm::test_util::TEST_BURN_STATE_DB; use clarity::vm::types::*; use rand::seq::SliceRandom; @@ -243,6 +244,7 @@ fn test_build_anchored_blocks_stx_transfers_single() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -377,6 +379,7 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -511,6 +514,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -536,6 +540,7 @@ fn test_build_anchored_blocks_stx_transfers_multi() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -1225,6 +1230,7 @@ fn test_build_anchored_blocks_incrementing_nonces() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &tx, @@ -1425,6 +1431,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -1446,6 +1453,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &contract_tx, @@ -1466,6 +1474,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer, @@ -1612,6 +1621,7 @@ fn test_build_anchored_blocks_multiple_chaintips() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &contract_tx, @@ -1766,6 +1776,7 @@ fn test_build_anchored_blocks_empty_chaintips() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &contract_tx, @@ -1896,6 +1907,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, contract_tx_bytes, @@ -1923,6 +1935,7 @@ fn test_build_anchored_blocks_too_expensive_transactions() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, contract_tx_bytes, @@ -2257,6 +2270,7 @@ fn test_build_anchored_blocks_bad_nonces() { mempool .submit_raw( chainstate, + sortdb, &parent_tip_ch, &parent_header_hash, contract_tx_bytes, @@ -2285,6 +2299,7 @@ fn test_build_anchored_blocks_bad_nonces() { mempool .submit_raw( chainstate, + sortdb, &parent_tip_ch, &parent_header_hash, contract_tx_bytes, @@ -2321,6 +2336,7 @@ fn test_build_anchored_blocks_bad_nonces() { mempool .submit_raw( chainstate, + sortdb, &parent_tip_ch, &parent_header_hash, contract_tx_bytes, @@ -2349,6 +2365,7 @@ fn test_build_anchored_blocks_bad_nonces() { mempool .submit_raw( chainstate, + sortdb, &parent_tip_ch, &parent_header_hash, contract_tx_bytes, @@ -2594,6 +2611,7 @@ fn test_build_microblock_stream_forks() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx_bytes, @@ -2920,6 +2938,7 @@ fn test_build_microblock_stream_forks_with_descendants() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx_bytes, @@ -2995,6 +3014,7 @@ fn test_build_microblock_stream_forks_with_descendants() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx_bytes, @@ -3027,6 +3047,7 @@ fn test_build_microblock_stream_forks_with_descendants() { mempool .submit_raw( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx_bytes, @@ -3984,6 +4005,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, tx, @@ -4010,6 +4032,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &contract_spends_too_much_tx, @@ -4058,6 +4081,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &spend_too_much, @@ -4108,6 +4132,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &runtime_checkerror_problematic, @@ -4156,6 +4181,7 @@ fn test_is_tx_problematic() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &runtime_checkerror_problematic, @@ -4208,6 +4234,241 @@ fn test_is_tx_problematic() { } } +#[test] +fn mempool_incorporate_pox_unlocks() { + let mut initial_balances = vec![]; + let total_balance = 10_000_000_000; + let pk = StacksPrivateKey::new(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&pk)], + ) + .unwrap(); + initial_balances.push((addr.to_account_principal(), total_balance)); + let principal = PrincipalData::from(addr.clone()); + + let mut peer_config = TestPeerConfig::new(function_name!(), 2020, 2021); + peer_config.initial_balances = initial_balances; + peer_config.epochs = Some(vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 1, + end_height: 36, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 36, + end_height: i64::MAX as u64, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ]); + peer_config.burnchain.pox_constants.v1_unlock_height = + peer_config.epochs.as_ref().unwrap()[1].end_height as u32 + 1; + let pox_constants = peer_config.burnchain.pox_constants.clone(); + + let mut peer = TestPeer::new(peer_config); + + let chainstate_path = peer.chainstate_path.clone(); + + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + }; + + let first_block_height = peer.sortdb.as_ref().unwrap().first_block_height; + let first_pox_cycle = pox_constants + .block_height_to_reward_cycle(first_block_height, first_stacks_block_height) + .unwrap(); + let active_pox_cycle_start = + pox_constants.reward_cycle_to_block_height(first_block_height, first_pox_cycle + 1); + let lockup_end = pox_constants.v1_unlock_height as u64; + + // test for two PoX cycles + let num_blocks = 3 + lockup_end - first_stacks_block_height; + info!( + "Starting test"; + "num_blocks" => num_blocks, + "first_stacks_block_height" => first_stacks_block_height, + "active_pox_cycle_start" => active_pox_cycle_start, + "active_pox_cycle_end" => lockup_end, + "first_block_height" => first_block_height, + ); + + let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; + let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); + + let mut last_block = None; + for tenure_id in 0..num_blocks { + // send transactions to the mempool + let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) + .unwrap(); + + let (burn_ops, stacks_block, microblocks) = peer.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = match parent_opt { + None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), + Some(block) => { + let ic = sortdb.index_conn(); + let snapshot = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &block.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_height = parent_tip.burn_header_height; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let coinbase_tx = make_coinbase(miner, tenure_id as usize); + + let mut mempool = + MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let mut expected_txids = vec![]; + expected_txids.push(coinbase_tx.txid()); + + // this will be the height of the block that includes this new tenure + let my_height = first_stacks_block_height + 1 + tenure_id; + + let available_balance = chainstate.with_read_only_clarity_tx(&sortdb.index_conn(), &parent_tip.index_block_hash(), |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|db| { + let burn_block_height = db.get_current_burnchain_block_height() as u64; + let v1_unlock_height = db.get_v1_unlock_height(); + let v2_unlock_height = db.get_v2_unlock_height(); + let balance = db.get_account_stx_balance(&principal); + info!("Checking balance"; "v1_unlock_height" => v1_unlock_height, "burn_block_height" => burn_block_height); + balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height, v2_unlock_height) + }) + }).unwrap(); + + if tenure_id <= 1 { + assert_eq!(available_balance, total_balance as u128, "Failed at tenure_id={}", tenure_id); + } else if my_height <= lockup_end + 1 { + assert_eq!(available_balance, 0, "Failed at tenure_id={}", tenure_id); + } else if my_height == lockup_end + 2 { + assert_eq!(available_balance, total_balance as u128 - 10_000, "Failed at tenure_id={}", tenure_id); + } else { + assert_eq!(available_balance, 0, "Failed at tenure_id={}", tenure_id); + } + + if tenure_id == 1 { + let stack_stx = make_user_contract_call( + &pk, + 0, + 10_000, + &StacksAddress::burn_address(false), + "pox", + "stack-stx", + vec![ + Value::UInt(total_balance as u128 - 10_000), + Value::Tuple( + TupleData::from_data(vec![ + ("version".into(), Value::buff_from(vec![0x00]).unwrap()), + ("hashbytes".into(), Value::buff_from(vec![0; 20]).unwrap()), + ]).unwrap(), + ), + Value::UInt(my_height as u128), + Value::UInt(10) + ], + ); + mempool + .submit( + chainstate, + sortdb, + &parent_consensus_hash, + &parent_header_hash, + &stack_stx, + None, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch2_05, + ) + .unwrap(); + expected_txids.push(stack_stx.txid()); + } else if my_height == lockup_end + 2 { + let stx_transfer = make_user_stacks_transfer( + &pk, + 1, + 10_000, + &StacksAddress::burn_address(false).into(), + total_balance - 10_000 - 10_000, + ); + mempool + .submit( + chainstate, + sortdb, + &parent_consensus_hash, + &parent_header_hash, + &stx_transfer, + None, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch2_05, + ) + .unwrap(); + expected_txids.push(stx_transfer.txid()); + } + + let anchored_block = StacksBlockBuilder::build_anchored_block( + chainstate, + &sortdb.index_conn(), + &mut mempool, + &parent_tip, + tip.total_burn, + vrf_proof, + Hash160([tenure_id as u8; 20]), + &coinbase_tx, + BlockBuilderSettings::limited(), + None, + ) + .unwrap(); + + // make sure the right txs get included + let txids : Vec<_> = anchored_block.0.txs.iter().map(|tx| tx.txid()).collect(); + assert_eq!(txids, expected_txids); + + (anchored_block.0, vec![]) + }, + ); + + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + last_block = Some(StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + )); + } +} + #[test] /// Test the situation in which the nonce order of transactions from a user. That is, /// nonce 1 has a higher fee than nonce 0. @@ -4291,6 +4552,7 @@ fn test_fee_order_mismatch_nonce_order() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer0, @@ -4303,6 +4565,7 @@ fn test_fee_order_mismatch_nonce_order() { mempool .submit( chainstate, + sortdb, &parent_consensus_hash, &parent_header_hash, &stx_transfer1, diff --git a/src/clarity_vm/clarity.rs b/src/clarity_vm/clarity.rs index 067ec9fe17..c6272fe1f4 100644 --- a/src/clarity_vm/clarity.rs +++ b/src/clarity_vm/clarity.rs @@ -47,10 +47,12 @@ use stacks_common::util::secp256k1::MessageSignature; use crate::chainstate::stacks::boot::BOOT_CODE_COSTS_2_TESTNET; use crate::chainstate::stacks::boot::POX_2_MAINNET_CODE; use crate::chainstate::stacks::boot::POX_2_TESTNET_CODE; +use crate::chainstate::stacks::boot::POX_3_MAINNET_CODE; +use crate::chainstate::stacks::boot::POX_3_TESTNET_CODE; use crate::chainstate::stacks::boot::{ BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_3, BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, COSTS_2_NAME, - COSTS_3_NAME, POX_2_NAME, + COSTS_3_NAME, POX_2_NAME, POX_3_NAME, }; use crate::chainstate::stacks::db::StacksAccount; use crate::chainstate::stacks::db::StacksChainState; @@ -68,8 +70,7 @@ use crate::chainstate::stacks::TransactionSpendingCondition; use crate::chainstate::stacks::TransactionVersion; use crate::chainstate::stacks::{SinglesigHashMode, SinglesigSpendingCondition, StacksTransaction}; use crate::core::StacksEpoch; -use crate::core::FIRST_STACKS_BLOCK_ID; -use crate::core::GENESIS_EPOCH; +use crate::core::{FIRST_STACKS_BLOCK_ID, GENESIS_EPOCH}; use crate::types::chainstate::BlockHeaderHash; use crate::types::chainstate::BurnchainHeaderHash; use crate::types::chainstate::SortitionId; @@ -1081,6 +1082,200 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { }) } + pub fn initialize_epoch_2_2(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch22; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch22); + Ok(()) + }) + .unwrap(); + + // require 2.2 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch22; + }); + + debug!("Epoch 2.2 initialized"); + + (old_cost_tracker, Ok(vec![])) + }) + } + + pub fn initialize_epoch_2_3(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + + // first, upgrade the epoch + self.epoch = StacksEpochId::Epoch23; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch23); + Ok(()) + }) + .unwrap(); + + // require 2.3 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch23; + }); + + debug!("Epoch 2.3 initialized"); + + (old_cost_tracker, Ok(vec![])) + }) + } + + pub fn initialize_epoch_2_4(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch24; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch24); + Ok(()) + }) + .unwrap(); + + // require 2.4 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch24; + }); + + /////////////////// .pox-3 //////////////////////// + let mainnet = self.mainnet; + let first_block_height = self.burn_state_db.get_burn_start_height(); + let pox_prepare_length = self.burn_state_db.get_pox_prepare_length(); + let pox_reward_cycle_length = self.burn_state_db.get_pox_reward_cycle_length(); + let pox_rejection_fraction = self.burn_state_db.get_pox_rejection_fraction(); + let pox_3_activation_height = self.burn_state_db.get_pox_3_activation_height(); + + let pox_3_first_cycle = PoxConstants::static_block_height_to_reward_cycle( + pox_3_activation_height as u64, + first_block_height as u64, + pox_reward_cycle_length as u64, + ) + .expect("PANIC: PoX-3 first reward cycle begins *before* first burn block height") + + 1; + + // get tx_version & boot code account information for pox-3 contract init + let tx_version = if mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let boot_code_address = boot_code_addr(mainnet); + + let boot_code_auth = TransactionAuth::Standard( + TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { + signer: boot_code_address.bytes.clone(), + hash_mode: SinglesigHashMode::P2PKH, + key_encoding: TransactionPublicKeyEncoding::Uncompressed, + nonce: 0, + tx_fee: 0, + signature: MessageSignature::empty(), + }), + ); + + let boot_code_nonce = self.with_clarity_db_readonly(|db| { + db.get_account_nonce(&boot_code_address.clone().into()) + }); + + let boot_code_account = StacksAccount { + principal: PrincipalData::Standard(boot_code_address.into()), + nonce: boot_code_nonce, + stx_balance: STXBalance::zero(), + }; + + let pox_3_code = if mainnet { + &*POX_3_MAINNET_CODE + } else { + &*POX_3_TESTNET_CODE + }; + + let pox_3_contract_id = boot_code_id(POX_3_NAME, mainnet); + + let payload = TransactionPayload::SmartContract( + TransactionSmartContract { + name: ContractName::try_from(POX_3_NAME) + .expect("FATAL: invalid boot-code contract name"), + code_body: StacksString::from_str(pox_3_code) + .expect("FATAL: invalid boot code body"), + }, + Some(ClarityVersion::Clarity2), + ); + + let pox_3_contract_tx = + StacksTransaction::new(tx_version.clone(), boot_code_auth.clone(), payload); + + let pox_3_initialization_receipt = self.as_transaction(|tx_conn| { + // initialize with a synthetic transaction + debug!("Instantiate {} contract", &pox_3_contract_id); + let receipt = StacksChainState::process_transaction_payload( + tx_conn, + &pox_3_contract_tx, + &boot_code_account, + ASTRules::PrecheckSize, + ) + .expect("FATAL: Failed to process PoX 3 contract initialization"); + + // set burnchain params + let consts_setter = PrincipalData::from(pox_3_contract_id.clone()); + let params = vec![ + Value::UInt(first_block_height as u128), + Value::UInt(pox_prepare_length as u128), + Value::UInt(pox_reward_cycle_length as u128), + Value::UInt(pox_rejection_fraction as u128), + Value::UInt(pox_3_first_cycle as u128), + ]; + + let (_, _, _burnchain_params_events) = tx_conn + .run_contract_call( + &consts_setter, + None, + &pox_3_contract_id, + "set-burnchain-parameters", + ¶ms, + |_, _| false, + ) + .expect("Failed to set burnchain parameters in PoX-3 contract"); + + receipt + }); + + if pox_3_initialization_receipt.result != Value::okay_true() + || pox_3_initialization_receipt.post_condition_aborted + { + panic!( + "FATAL: Failure processing PoX 3 contract initialization: {:#?}", + &pox_3_initialization_receipt + ); + } + + debug!("Epoch 2.4 initialized"); + + (old_cost_tracker, Ok(vec![pox_3_initialization_receipt])) + }) + } + pub fn start_transaction_processing<'c>(&'c mut self) -> ClarityTransactionConnection<'c, 'a> { let store = &mut self.datastore; let cost_track = &mut self.cost_track; @@ -2244,8 +2439,17 @@ mod tests { ) -> Option { self.get_stacks_epoch(0) } + + fn get_v2_unlock_height(&self) -> u32 { + u32::MAX + } + fn get_v1_unlock_height(&self) -> u32 { - u32::max_value() + u32::MAX + } + + fn get_pox_3_activation_height(&self) -> u32 { + u32::MAX } fn get_pox_prepare_length(&self) -> u32 { diff --git a/src/clarity_vm/database/marf.rs b/src/clarity_vm/database/marf.rs index a6cdaaa129..ac4e6f7212 100644 --- a/src/clarity_vm/database/marf.rs +++ b/src/clarity_vm/database/marf.rs @@ -1,4 +1,5 @@ use std::path::PathBuf; +use std::str::FromStr; use clarity::vm::analysis::AnalysisDatabase; use clarity::vm::database::SpecialCaseHandler; @@ -113,7 +114,7 @@ impl MarfedKV { use rand::Rng; use stacks_common::util::hash::to_hex; - let mut path = env::temp_dir(); + let mut path = PathBuf::from_str("/tmp/stacks-node-tests/unit-tests-marf").unwrap(); let random_bytes = rand::thread_rng().gen::<[u8; 32]>(); path.push(to_hex(&random_bytes)); diff --git a/src/clarity_vm/database/mod.rs b/src/clarity_vm/database/mod.rs index c3f5048608..dafec94ee3 100644 --- a/src/clarity_vm/database/mod.rs +++ b/src/clarity_vm/database/mod.rs @@ -454,6 +454,14 @@ impl BurnStateDB for SortitionHandleTx<'_> { self.context.pox_constants.v1_unlock_height } + fn get_v2_unlock_height(&self) -> u32 { + self.context.pox_constants.v2_unlock_height + } + + fn get_pox_3_activation_height(&self) -> u32 { + self.context.pox_constants.pox_3_activation_height + } + fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length } @@ -532,7 +540,7 @@ impl BurnStateDB for SortitionDBConn<'_> { Some(height) => height, }; - if height >= current_height { + if height > current_height { return None; } @@ -565,6 +573,14 @@ impl BurnStateDB for SortitionDBConn<'_> { self.context.pox_constants.v1_unlock_height } + fn get_v2_unlock_height(&self) -> u32 { + self.context.pox_constants.v2_unlock_height + } + + fn get_pox_3_activation_height(&self) -> u32 { + self.context.pox_constants.pox_3_activation_height + } + fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length } diff --git a/src/clarity_vm/special.rs b/src/clarity_vm/special.rs index 0eba05d4fe..eddb3dacae 100644 --- a/src/clarity_vm/special.rs +++ b/src/clarity_vm/special.rs @@ -17,7 +17,11 @@ use std::cmp; use std::convert::{TryFrom, TryInto}; -use clarity::vm::ast::ASTRules; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainstateError; +use crate::chainstate::stacks::StacksMicroblockHeader; +use crate::util_lib::boot::boot_code_id; use clarity::vm::clarity::Error as clarity_interpreter_error; use clarity::vm::contexts::{Environment, GlobalContext}; use clarity::vm::costs::cost_functions::ClarityCostFunction; @@ -37,17 +41,12 @@ use clarity::vm::{ast, eval_all}; use stacks_common::util::hash::Hash160; use crate::chainstate::stacks::address::PoxAddress; -use crate::chainstate::stacks::boot::POX_1_NAME; -use crate::chainstate::stacks::boot::POX_2_NAME; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::Error as ChainstateError; -use crate::chainstate::stacks::StacksMicroblockHeader; use crate::core::StacksEpochId; -use crate::util_lib::boot::boot_code_id; +use crate::vm::ast::ASTRules; use crate::vm::costs::runtime_cost; /// Parse the returned value from PoX `stack-stx` and `delegate-stack-stx` functions -/// from pox-2.clar into a format more readily digestible in rust. +/// from pox-2.clar or pox-3.clar into a format more readily digestible in rust. /// Panics if the supplied value doesn't match the expected tuple structure fn parse_pox_stacking_result( result: &Value, @@ -118,7 +117,7 @@ fn parse_pox_stacking_result_v1( } } -/// Parse the returned value from PoX2 `stack-extend` and `delegate-stack-extend` functions +/// Parse the returned value from PoX2 or PoX3 `stack-extend` and `delegate-stack-extend` functions /// into a format more readily digestible in rust. /// Panics if the supplied value doesn't match the expected tuple structure fn parse_pox_extend_result(result: &Value) -> std::result::Result<(PrincipalData, u64), i128> { @@ -147,7 +146,7 @@ fn parse_pox_extend_result(result: &Value) -> std::result::Result<(PrincipalData } } -/// Parse the returned value from PoX2 `stack-increase` function +/// Parse the returned value from PoX2 or PoX3 `stack-increase` function /// into a format more readily digestible in rust. /// Panics if the supplied value doesn't match the expected tuple structure fn parse_pox_increase(result: &Value) -> std::result::Result<(PrincipalData, u128), i128> { @@ -563,10 +562,10 @@ fn create_event_info_data_code(function_name: &str, args: &[Value]) -> String { } } -/// Synthesize an events data tuple to return on the successful execution of a pox-2 stacking +/// Synthesize an events data tuple to return on the successful execution of a pox-2 or pox-3 stacking /// function. It runs a series of Clarity queries against the PoX contract's data space (including /// calling PoX functions). -fn synthesize_pox_2_event_info( +fn synthesize_pox_2_or_3_event_info( global_context: &mut GlobalContext, contract_id: &QualifiedContractIdentifier, sender_opt: Option<&PrincipalData>, @@ -606,7 +605,7 @@ fn synthesize_pox_2_event_info( let pox_2_contract = global_context .database .get_contract(contract_id) - .expect("FATAL: could not load PoX-2 contract metadata"); + .expect("FATAL: could not load PoX contract metadata"); let event_info = global_context .special_cc_handler_execute_read_only( @@ -655,12 +654,12 @@ fn synthesize_pox_2_event_info( }, ) .map_err(|e: ChainstateError| { - error!("Failed to synthesize PoX-2 event: {:?}", &e); + error!("Failed to synthesize PoX event: {:?}", &e); e })?; test_debug!( - "Synthesized PoX-2 event info for '{}''s call to '{}': {:?}", + "Synthesized PoX event info for '{}''s call to '{}': {:?}", sender, function_name, &event_info @@ -672,7 +671,7 @@ fn synthesize_pox_2_event_info( } /// Handle responses from stack-stx and delegate-stack-stx -- functions that *lock up* STX -fn handle_stack_lockup( +fn handle_stack_lockup_pox_v2( global_context: &mut GlobalContext, function_name: &str, value: &Value, @@ -733,7 +732,7 @@ fn handle_stack_lockup( /// Handle responses from stack-extend and delegate-stack-extend -- functions that *extend /// already-locked* STX. -fn handle_stack_lockup_extension( +fn handle_stack_lockup_extension_pox_v2( global_context: &mut GlobalContext, function_name: &str, value: &Value, @@ -793,9 +792,9 @@ fn handle_stack_lockup_extension( } } -/// Handle resposnes from stack-increase and delegate-stack-increase -- functions that *increase +/// Handle responses from stack-increase and delegate-stack-increase -- functions that *increase /// already-locked* STX amounts. -fn handle_stack_lockup_increase( +fn handle_stack_lockup_increase_pox_v2( global_context: &mut GlobalContext, function_name: &str, value: &Value, @@ -869,7 +868,7 @@ fn handle_pox_v2_api_contract_call( // for some reason. // Failure to synthesize an event due to a bug is *NOT* an excuse to crash the whole // network! Event capture is not consensus-critical. - let event_info_opt = match synthesize_pox_2_event_info( + let event_info_opt = match synthesize_pox_2_or_3_event_info( global_context, contract_id, sender_opt, @@ -901,11 +900,265 @@ fn handle_pox_v2_api_contract_call( // Execute function specific logic to complete the lock-up let lock_event_opt = if function_name == "stack-stx" || function_name == "delegate-stack-stx" { - handle_stack_lockup(global_context, function_name, value)? + handle_stack_lockup_pox_v2(global_context, function_name, value)? + } else if function_name == "stack-extend" || function_name == "delegate-stack-extend" { + handle_stack_lockup_extension_pox_v2(global_context, function_name, value)? + } else if function_name == "stack-increase" || function_name == "delegate-stack-increase" { + handle_stack_lockup_increase_pox_v2(global_context, function_name, value)? + } else { + None + }; + + // append the lockup event, so it looks as if the print event happened before the lock-up + if let Some(batch) = global_context.event_batches.last_mut() { + if let Some(print_event) = print_event_opt { + batch.events.push(print_event); + } + if let Some(lock_event) = lock_event_opt { + batch.events.push(lock_event); + } + } + + Ok(()) +} + +/////////////// PoX-3 ////////////////////////////////////////// + +/// Handle responses from stack-stx and delegate-stack-stx in pox-3 -- functions that *lock up* STX +fn handle_stack_lockup_pox_v3( + global_context: &mut GlobalContext, + function_name: &str, + value: &Value, +) -> Result> { + debug!( + "Handle special-case contract-call to {:?} {} (which returned {:?})", + boot_code_id(POX_3_NAME, global_context.mainnet), + function_name, + value + ); + // applying a pox lock at this point is equivalent to evaluating a transfer + runtime_cost( + ClarityCostFunction::StxTransfer, + &mut global_context.cost_track, + 1, + )?; + + match parse_pox_stacking_result(value) { + Ok((stacker, locked_amount, unlock_height)) => { + match StacksChainState::pox_lock_v3( + &mut global_context.database, + &stacker, + locked_amount, + unlock_height as u64, + ) { + Ok(_) => { + let event = StacksTransactionEvent::STXEvent(STXEventType::STXLockEvent( + STXLockEventData { + locked_amount, + unlock_height, + locked_address: stacker, + contract_identifier: boot_code_id(POX_3_NAME, global_context.mainnet), + }, + )); + return Ok(Some(event)); + } + Err(ChainstateError::DefunctPoxContract) => { + return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)); + } + Err(ChainstateError::PoxAlreadyLocked) => { + // the caller tried to lock tokens into multiple pox contracts + return Err(Error::Runtime(RuntimeErrorType::PoxAlreadyLocked, None)); + } + Err(e) => { + panic!( + "FATAL: failed to lock {} from {} until {}: '{:?}'", + locked_amount, stacker, unlock_height, &e + ); + } + } + } + Err(_) => { + // nothing to do -- the function failed + return Ok(None); + } + } +} + +/// Handle responses from stack-extend and delegate-stack-extend in pox-3 -- functions that *extend +/// already-locked* STX. +fn handle_stack_lockup_extension_pox_v3( + global_context: &mut GlobalContext, + function_name: &str, + value: &Value, +) -> Result> { + // in this branch case, the PoX-3 contract has stored the extension information + // and performed the extension checks. Now, the VM needs to update the account locks + // (because the locks cannot be applied directly from the Clarity code itself) + // applying a pox lock at this point is equivalent to evaluating a transfer + debug!( + "Handle special-case contract-call to {:?} {} (which returned {:?})", + boot_code_id("pox-3", global_context.mainnet), + function_name, + value + ); + + runtime_cost( + ClarityCostFunction::StxTransfer, + &mut global_context.cost_track, + 1, + )?; + + if let Ok((stacker, unlock_height)) = parse_pox_extend_result(value) { + match StacksChainState::pox_lock_extend_v3( + &mut global_context.database, + &stacker, + unlock_height as u64, + ) { + Ok(locked_amount) => { + let event = StacksTransactionEvent::STXEvent(STXEventType::STXLockEvent( + STXLockEventData { + locked_amount, + unlock_height, + locked_address: stacker, + contract_identifier: boot_code_id(POX_3_NAME, global_context.mainnet), + }, + )); + return Ok(Some(event)); + } + Err(ChainstateError::DefunctPoxContract) => { + return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)) + } + Err(e) => { + // Error results *other* than a DefunctPoxContract panic, because + // those errors should have been caught by the PoX contract before + // getting to this code path. + panic!( + "FATAL: failed to extend lock from {} until {}: '{:?}'", + stacker, unlock_height, &e + ); + } + } + } else { + // The stack-extend function returned an error: we do not need to apply a lock + // in this case, and can just return and let the normal VM codepath surface the + // error response type. + return Ok(None); + } +} + +/// Handle responses from stack-increase and delegate-stack-increase in PoX-3 -- functions +/// that *increase already-locked* STX amounts. +fn handle_stack_lockup_increase_pox_v3( + global_context: &mut GlobalContext, + function_name: &str, + value: &Value, +) -> Result> { + // in this branch case, the PoX-3 contract has stored the increase information + // and performed the increase checks. Now, the VM needs to update the account locks + // (because the locks cannot be applied directly from the Clarity code itself) + // applying a pox lock at this point is equivalent to evaluating a transfer + debug!( + "Handle special-case contract-call"; + "contract" => ?boot_code_id("pox-3", global_context.mainnet), + "function" => function_name, + "return-value" => %value, + ); + + runtime_cost( + ClarityCostFunction::StxTransfer, + &mut global_context.cost_track, + 1, + )?; + + if let Ok((stacker, total_locked)) = parse_pox_increase(value) { + match StacksChainState::pox_lock_increase_v3( + &mut global_context.database, + &stacker, + total_locked, + ) { + Ok(new_balance) => { + let event = StacksTransactionEvent::STXEvent(STXEventType::STXLockEvent( + STXLockEventData { + locked_amount: new_balance.amount_locked(), + unlock_height: new_balance.unlock_height(), + locked_address: stacker, + contract_identifier: boot_code_id(POX_3_NAME, global_context.mainnet), + }, + )); + + return Ok(Some(event)); + } + Err(ChainstateError::DefunctPoxContract) => { + return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)) + } + Err(e) => { + // Error results *other* than a DefunctPoxContract panic, because + // those errors should have been caught by the PoX contract before + // getting to this code path. + panic!( + "FATAL: failed to increase lock from {}: '{:?}'", + stacker, &e + ); + } + } + } else { + Ok(None) + } +} + +/// Handle special cases when calling into the PoX-3 API contract +fn handle_pox_v3_api_contract_call( + global_context: &mut GlobalContext, + sender_opt: Option<&PrincipalData>, + contract_id: &QualifiedContractIdentifier, + function_name: &str, + args: &[Value], + value: &Value, +) -> Result<()> { + // Generate a synthetic print event for all functions that alter stacking state + let print_event_opt = if let Value::Response(response) = value { + if response.committed { + // method succeeded. Synthesize event info, but default to no event report if we fail + // for some reason. + // Failure to synthesize an event due to a bug is *NOT* an excuse to crash the whole + // network! Event capture is not consensus-critical. + let event_info_opt = match synthesize_pox_2_or_3_event_info( + global_context, + contract_id, + sender_opt, + function_name, + args, + ) { + Ok(Some(event_info)) => Some(event_info), + Ok(None) => None, + Err(e) => { + error!("Failed to synthesize PoX-3 event info: {:?}", &e); + None + } + }; + if let Some(event_info) = event_info_opt { + let event_response = + Value::okay(event_info).expect("FATAL: failed to construct (ok event-info)"); + let tx_event = + Environment::construct_print_transaction_event(contract_id, &event_response); + Some(tx_event) + } else { + None + } + } else { + None + } + } else { + None + }; + + // Execute function specific logic to complete the lock-up + let lock_event_opt = if function_name == "stack-stx" || function_name == "delegate-stack-stx" { + handle_stack_lockup_pox_v3(global_context, function_name, value)? } else if function_name == "stack-extend" || function_name == "delegate-stack-extend" { - handle_stack_lockup_extension(global_context, function_name, value)? + handle_stack_lockup_extension_pox_v3(global_context, function_name, value)? } else if function_name == "stack-increase" || function_name == "delegate-stack-increase" { - handle_stack_lockup_increase(global_context, function_name, value)? + handle_stack_lockup_increase_pox_v3(global_context, function_name, value)? } else { None }; @@ -938,6 +1191,32 @@ fn is_pox_v1_read_only(func_name: &str) -> bool { || func_name == "get-pox-info" } +fn is_pox_v2_read_only(func_name: &str) -> bool { + "get-pox-rejection" == func_name + || "is-pox-active" == func_name + || "burn-height-to-reward-cycle" == func_name + || "reward-cycle-to-burn-height" == func_name + || "current-pox-reward-cycle" == func_name + || "get-stacker-info" == func_name + || "get-check-delegation" == func_name + || "get-reward-set-size" == func_name + || "next-cycle-rejection-votes" == func_name + || "get-total-ustx-stacked" == func_name + || "get-reward-set-pox-address" == func_name + || "get-stacking-minimum" == func_name + || "check-pox-addr-version" == func_name + || "check-pox-addr-hashbytes" == func_name + || "check-pox-lock-period" == func_name + || "can-stack-stx" == func_name + || "minimal-can-stack-stx" == func_name + || "get-pox-info" == func_name + || "get-delegation-info" == func_name + || "get-allowance-contract-callers" == func_name + || "get-num-reward-set-pox-addresses" == func_name + || "get-partial-stacked-by-cycle" == func_name + || "get-total-pox-rejection" == func_name +} + /// Handle special cases of contract-calls -- namely, those into PoX that should lock up STX pub fn handle_contract_call_special_cases( global_context: &mut GlobalContext, @@ -964,6 +1243,17 @@ pub fn handle_contract_call_special_cases( } return handle_pox_v1_api_contract_call(global_context, sender, function_name, result); } else if *contract_id == boot_code_id(POX_2_NAME, global_context.mainnet) { + if !is_pox_v2_read_only(function_name) && global_context.epoch_id >= StacksEpochId::Epoch22 + { + warn!("PoX-2 function call attempted on an account after Epoch 2.2"; + "v2_unlock_ht" => global_context.database.get_v2_unlock_height(), + "current_burn_ht" => global_context.database.get_current_burnchain_block_height(), + "function_name" => function_name, + "contract_id" => %contract_id + ); + return Err(Error::Runtime(RuntimeErrorType::DefunctPoxContract, None)); + } + return handle_pox_v2_api_contract_call( global_context, sender, @@ -972,6 +1262,15 @@ pub fn handle_contract_call_special_cases( args, result, ); + } else if *contract_id == boot_code_id(POX_3_NAME, global_context.mainnet) { + return handle_pox_v3_api_contract_call( + global_context, + sender, + contract_id, + function_name, + args, + result, + ); } // TODO: insert more special cases here, as needed diff --git a/src/clarity_vm/tests/analysis_costs.rs b/src/clarity_vm/tests/analysis_costs.rs index bc0f07a039..f9ffeef1c6 100644 --- a/src/clarity_vm/tests/analysis_costs.rs +++ b/src/clarity_vm/tests/analysis_costs.rs @@ -27,9 +27,7 @@ use clarity::vm::functions::NativeFunctions; use clarity::vm::representations::SymbolicExpression; use clarity::vm::test_util::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; use clarity::vm::tests::test_only_mainnet_to_chain_id; -use clarity::vm::tests::{ - execute, symbols_from_values, with_memory_environment, UnitTestBurnStateDB, -}; +use clarity::vm::tests::{execute, symbols_from_values, UnitTestBurnStateDB}; use clarity::vm::types::{ AssetIdentifier, PrincipalData, QualifiedContractIdentifier, ResponseData, Value, }; diff --git a/src/clarity_vm/tests/ast.rs b/src/clarity_vm/tests/ast.rs index 3758f05028..64adebd271 100644 --- a/src/clarity_vm/tests/ast.rs +++ b/src/clarity_vm/tests/ast.rs @@ -1,5 +1,6 @@ use clarity::vm::ast::build_ast; use clarity::vm::test_util::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; +use clarity::vm::tests::test_clarity_versions; use clarity::vm::types::QualifiedContractIdentifier; use clarity::vm::version::ClarityVersion; #[cfg(test)] @@ -13,17 +14,6 @@ use stacks_common::types::StacksEpochId; use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::clarity_vm::{clarity::ClarityInstance, database::marf::MarfedKV}; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_edge_counting_runtime_template( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - fn dependency_edge_counting_runtime( iters: usize, version: ClarityVersion, @@ -71,7 +61,7 @@ fn dependency_edge_counting_runtime( cost_track.get_total().runtime } -#[apply(test_edge_counting_runtime_template)] +#[apply(test_clarity_versions)] fn test_edge_counting_runtime(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { let ratio_4_8 = dependency_edge_counting_runtime(8, version, epoch) / dependency_edge_counting_runtime(4, version, epoch); diff --git a/src/clarity_vm/tests/contracts.rs b/src/clarity_vm/tests/contracts.rs index 7976b6c8a1..6cebb22c1d 100644 --- a/src/clarity_vm/tests/contracts.rs +++ b/src/clarity_vm/tests/contracts.rs @@ -28,8 +28,8 @@ use clarity::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use clarity::vm::execute as vm_execute; use clarity::vm::representations::SymbolicExpression; use clarity::vm::tests::{ - execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, - with_memory_environment, BurnStateDB, TEST_BURN_STATE_DB, TEST_HEADER_DB, + execute, is_committed, is_err_code_i128 as is_err_code, symbols_from_values, BurnStateDB, + TEST_BURN_STATE_DB, TEST_HEADER_DB, }; use clarity::vm::types::BuffData; use clarity::vm::types::SequenceData::Buffer; @@ -393,10 +393,11 @@ fn trait_invocation_205_with_stored_principal() { } /// Publish a trait in epoch 2.05 and then invoke it in epoch 2.1. +/// Test the behaviors in 2.2 and 2.3 as well. #[test] fn trait_invocation_cross_epoch() { let mut sim = ClarityTestSim::new(); - sim.epoch_bounds = vec![0, 3, 5]; + sim.epoch_bounds = vec![0, 3, 5, 7, 9]; // Advance two blocks so we get to Stacks 2.05. sim.execute_next_block(|_env| {}); @@ -422,6 +423,7 @@ fn trait_invocation_cross_epoch() { let sender = StacksAddress::burn_address(false).into(); + info!("Sim height = {}", sim.height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); let clarity_version = ClarityVersion::default_for_epoch(epoch); @@ -430,6 +432,7 @@ fn trait_invocation_cross_epoch() { publish_contract(conn, &use_contract_id, use_contract, clarity_version).unwrap(); }); // Advance another block so we get to Stacks 2.1. This is the last block in 2.05 + info!("Sim height = {}", sim.height); sim.execute_next_block(|_| {}); // now in Stacks 2.1 sim.execute_next_block_as_conn(|conn| { @@ -439,6 +442,72 @@ fn trait_invocation_cross_epoch() { publish_contract(conn, &invoke_contract_id, invoke_contract, clarity_version).unwrap(); }); + info!("Sim height = {}", sim.height); + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + conn.as_transaction(|clarity_db| { + clarity_db + .run_contract_call( + &sender, + None, + &invoke_contract_id, + "invocation-1", + &[], + |_, _| false, + ) + .unwrap(); + }); + }); + + info!("Sim height = {}", sim.height); + // now in Stacks 2.2 + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + conn.as_transaction(|clarity_db| { + let error = clarity_db + .run_contract_call( + &sender, + None, + &invoke_contract_id, + "invocation-1", + &[], + |_, _| false, + ) + .unwrap_err(); + + if let ClarityError::Interpreter(Error::Unchecked(CheckErrors::TypeValueError(TypeSignature::TraitReferenceType(_), value))) = error { + // pass + } else { + panic!("Expected an Interpreter(UncheckedError(TypeValue(TraitReferenceType, Principal))) during Epoch-2.2"); + }; + }); + }); + + info!("Sim height = {}", sim.height); + sim.execute_next_block_as_conn(|conn| { + let epoch = conn.get_epoch(); + conn.as_transaction(|clarity_db| { + let error = clarity_db + .run_contract_call( + &sender, + None, + &invoke_contract_id, + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + |_, _| false, + ) + .unwrap_err(); + + if let ClarityError::Interpreter(Error::Unchecked(CheckErrors::TypeValueError(TypeSignature::TraitReferenceType(_), value))) = error { + // pass + } else { + panic!("Expected an Interpreter(UncheckedError(TypeValue(TraitReferenceType, Principal))) during Epoch-2.2"); + }; + }); + }); + + // should now be in Stacks 2.3, so the invocation should work again! + info!("Sim height = {}", sim.height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -455,6 +524,7 @@ fn trait_invocation_cross_epoch() { }); }); + info!("Sim height = {}", sim.height); sim.execute_next_block_as_conn(|conn| { let epoch = conn.get_epoch(); conn.as_transaction(|clarity_db| { @@ -464,7 +534,7 @@ fn trait_invocation_cross_epoch() { None, &invoke_contract_id, "invocation-2", - &[Value::Principal(impl_contract_id.into())], + &[Value::Principal(impl_contract_id.clone().into())], |_, _| false, ) .unwrap(); diff --git a/src/clarity_vm/tests/costs.rs b/src/clarity_vm/tests/costs.rs index 4b6a53fe9d..fe1f89403b 100644 --- a/src/clarity_vm/tests/costs.rs +++ b/src/clarity_vm/tests/costs.rs @@ -1191,11 +1191,13 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity confirmed-height: u1 }}", intercepted, "\"intercepted-function\"", cost_definer, "\"cost-definition\"" ); + let epoch = db.get_clarity_epoch_version(); db.set_entry_unknown_descriptor( voting_contract_to_use, "confirmed-proposals", execute_on_network("{ confirmed-id: u0 }", use_mainnet), execute_on_network(&value, use_mainnet), + &epoch, ) .unwrap(); db.commit(); @@ -1481,6 +1483,12 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi let bad_proposals = bad_cases.len(); + let voting_contract_to_use: &QualifiedContractIdentifier = if use_mainnet { + &COST_VOTING_MAINNET_CONTRACT + } else { + &COST_VOTING_TESTNET_CONTRACT + }; + { let mut store = marf_kv.begin(&StacksBlockId([1 as u8; 32]), &StacksBlockId([2 as u8; 32])); @@ -1488,7 +1496,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi db.begin(); db.set_variable_unknown_descriptor( - &COST_VOTING_TESTNET_CONTRACT, + voting_contract_to_use, "confirmed-proposal-count", Value::UInt(bad_proposals as u128), ) @@ -1505,11 +1513,13 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi confirmed-height: u1 }}", intercepted_ct, intercepted_f, cost_ct, cost_f ); + let epoch = db.get_clarity_epoch_version(); db.set_entry_unknown_descriptor( - &COST_VOTING_TESTNET_CONTRACT, + voting_contract_to_use, "confirmed-proposals", execute(&format!("{{ confirmed-id: u{} }}", ix)), execute(&value), + &epoch, ) .unwrap(); } @@ -1543,7 +1553,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi for (target, referenced_function) in tracker.cost_function_references().into_iter() { assert_eq!( &referenced_function.contract_id, - &boot_code_id("costs", false), + &boot_code_id("costs", use_mainnet), "All cost functions should still point to the boot costs" ); assert_eq!( @@ -1565,7 +1575,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi "cost-definition", ), ( - boot_code_id("costs", false), + boot_code_id("costs", use_mainnet), "cost_le", cost_definer.clone(), "cost-definition-le", @@ -1586,7 +1596,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi let good_proposals = good_cases.len() as u128; db.set_variable_unknown_descriptor( - &COST_VOTING_TESTNET_CONTRACT, + voting_contract_to_use, "confirmed-proposal-count", Value::UInt(bad_proposals as u128 + good_proposals), ) @@ -1603,11 +1613,13 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi confirmed-height: u1 }}", intercepted_ct, intercepted_f, cost_ct, cost_f ); + let epoch = db.get_clarity_epoch_version(); db.set_entry_unknown_descriptor( - &COST_VOTING_TESTNET_CONTRACT, + voting_contract_to_use, "confirmed-proposals", execute(&format!("{{ confirmed-id: u{} }}", ix + bad_proposals)), execute(&value), + &epoch, ) .unwrap(); } @@ -1660,7 +1672,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi } else { assert_eq!( &referenced_function.contract_id, - &boot_code_id("costs", false), + &boot_code_id("costs", use_mainnet), "Cost function should still point to the boot costs" ); assert_eq!( @@ -1674,11 +1686,11 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi }; } -// TODO: Reinstate this test. We couldn't get it working in time for pr/2940. -//#[test] -//fn test_cost_voting_integration_mainnet() { -// test_cost_voting_integration(true) -//} +#[test] +fn test_cost_voting_integration_mainnet() { + test_cost_voting_integration(true, ClarityVersion::Clarity1); + test_cost_voting_integration(true, ClarityVersion::Clarity2); +} #[test] fn test_cost_voting_integration_testnet() { diff --git a/src/clarity_vm/tests/forking.rs b/src/clarity_vm/tests/forking.rs index 284082ef95..45e349f8a8 100644 --- a/src/clarity_vm/tests/forking.rs +++ b/src/clarity_vm/tests/forking.rs @@ -34,21 +34,11 @@ use stacks_common::types::chainstate::StacksBlockId; use crate::chainstate::stacks::index::storage::TrieFileStorage; use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::clarity_vm::database::marf::MarfedKV; +use clarity::vm::tests::test_clarity_versions; const p1_str: &str = "'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR"; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn test_clarity_versions_type_checker( - #[case] version: ClarityVersion, - #[case] epoch: StacksEpochId, -) { -} - -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_forking_simple(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { with_separate_forks_environment( version, @@ -66,7 +56,7 @@ fn test_forking_simple(#[case] version: ClarityVersion, #[case] epoch: StacksEpo ); } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_at_block_mutations(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { // test how at-block works when a mutation has occurred fn initialize(owned_env: &mut OwnedEnvironment) { @@ -143,7 +133,7 @@ fn test_at_block_mutations(#[case] version: ClarityVersion, #[case] epoch: Stack ); } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_at_block_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { fn initialize(owned_env: &mut OwnedEnvironment) { let c = QualifiedContractIdentifier::local("contract").unwrap(); @@ -222,7 +212,7 @@ fn test_at_block_good(#[case] version: ClarityVersion, #[case] epoch: StacksEpoc ); } -#[apply(test_clarity_versions_type_checker)] +#[apply(test_clarity_versions)] fn test_at_block_missing_defines(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { fn initialize_1(owned_env: &mut OwnedEnvironment) { let c_a = QualifiedContractIdentifier::local("contract-a").unwrap(); diff --git a/src/clarity_vm/tests/large_contract.rs b/src/clarity_vm/tests/large_contract.rs index e6aa40aee1..2b2ca1694c 100644 --- a/src/clarity_vm/tests/large_contract.rs +++ b/src/clarity_vm/tests/large_contract.rs @@ -26,6 +26,8 @@ use clarity::vm::errors::Error as InterpreterError; use clarity::vm::errors::{CheckErrors, Error, RuntimeErrorType}; use clarity::vm::representations::SymbolicExpression; use clarity::vm::test_util::*; + +use clarity::vm::tests::test_clarity_versions; use clarity::vm::types::{ OptionalData, PrincipalData, QualifiedContractIdentifier, ResponseData, StandardPrincipalData, TypeSignature, Value, @@ -33,6 +35,7 @@ use clarity::vm::types::{ use clarity::vm::version::ClarityVersion; use clarity::vm::ContractContext; use clarity::vm::MAX_CALL_STACK_DEPTH; + #[cfg(test)] use rstest::rstest; #[cfg(test)] @@ -46,17 +49,9 @@ use crate::chainstate::stacks::index::ClarityMarfTrieId; use crate::clarity_vm::clarity::{ClarityInstance, Error as ClarityError}; use crate::clarity_vm::database::marf::MarfedKV; use crate::clarity_vm::database::MemoryBackingStore; -use crate::types::chainstate::BlockHeaderHash; use crate::types::chainstate::StacksBlockId; -use crate::util_lib::boot::boot_code_id; -use crate::vm::tests::with_memory_environment; -#[template] -#[rstest] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch2_05)] -#[case(ClarityVersion::Clarity1, StacksEpochId::Epoch21)] -#[case(ClarityVersion::Clarity2, StacksEpochId::Epoch21)] -fn clarity_version_template(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) {} +use crate::util_lib::boot::boot_code_id; fn test_block_headers(n: u8) -> StacksBlockId { StacksBlockId([n as u8; 32]) @@ -99,8 +94,11 @@ const SIMPLE_TOKENS: &str = "(define-map tokens { account: principal } { balance (token-credit! 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G u200) (token-credit! .tokens u4))"; -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { + if epoch < StacksEpochId::Epoch2_05 { + return; + } let mut clarity = ClarityInstance::new(false, CHAIN_ID_TESTNET, MarfedKV::temporary()); let p1 = PrincipalData::from( PrincipalData::parse_standard_principal("SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR") @@ -111,12 +109,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac .unwrap(), ); let contract_identifier = QualifiedContractIdentifier::local("tokens").unwrap(); - let burn_db = match epoch { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch), - }; + let burn_db = &generate_test_burn_state_db(epoch); let mut gb = clarity.begin_test_genesis_block( &StacksBlockId::sentinel(), @@ -132,44 +125,49 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac }) .unwrap(); - if epoch == StacksEpochId::Epoch2_05 { - let (ast, _analysis) = tx - .analyze_smart_contract( + match epoch { + StacksEpochId::Epoch2_05 => { + let (ast, _analysis) = tx + .analyze_smart_contract( + &boot_code_id("costs-2", false), + ClarityVersion::Clarity1, + BOOT_CODE_COSTS_2, + ASTRules::PrecheckSize, + ) + .unwrap(); + tx.initialize_smart_contract( &boot_code_id("costs-2", false), ClarityVersion::Clarity1, + &ast, BOOT_CODE_COSTS_2, - ASTRules::PrecheckSize, + None, + |_, _| false, ) .unwrap(); - tx.initialize_smart_contract( - &boot_code_id("costs-2", false), - ClarityVersion::Clarity1, - &ast, - BOOT_CODE_COSTS_2, - None, - |_, _| false, - ) - .unwrap(); - } - - if epoch == StacksEpochId::Epoch21 { - let (ast, _analysis) = tx - .analyze_smart_contract( + } + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 => { + let (ast, _analysis) = tx + .analyze_smart_contract( + &boot_code_id("costs-3", false), + ClarityVersion::Clarity2, + BOOT_CODE_COSTS_3, + ASTRules::PrecheckSize, + ) + .unwrap(); + tx.initialize_smart_contract( &boot_code_id("costs-3", false), ClarityVersion::Clarity2, + &ast, BOOT_CODE_COSTS_3, - ASTRules::PrecheckSize, + None, + |_, _| false, ) .unwrap(); - tx.initialize_smart_contract( - &boot_code_id("costs-3", false), - ClarityVersion::Clarity2, - &ast, - BOOT_CODE_COSTS_3, - None, - |_, _| false, - ) - .unwrap(); + } + _ => panic!("Epoch {} not covered.", &epoch), } }); @@ -427,7 +425,7 @@ where f(&mut owned_env, version) } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] fn test_simple_naming_system(#[case] version: ClarityVersion, #[case] epoch: StacksEpochId) { with_versioned_memory_environment(inner_test_simple_naming_system, version, false); } @@ -661,7 +659,7 @@ fn inner_test_simple_naming_system(owned_env: &mut OwnedEnvironment, version: Cl * `(define-data-var var-x ...)` uses more than 1048576 bytes of memory. * this is mainly due to using hex encoding in the sqlite storage. */ -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn rollback_log_memory_test( #[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId, @@ -669,12 +667,7 @@ pub fn rollback_log_memory_test( let marf = MarfedKV::temporary(); let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); let EXPLODE_N = 100; - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); clarity_instance @@ -738,17 +731,12 @@ pub fn rollback_log_memory_test( } } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId) { let marf = MarfedKV::temporary(); let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); let EXPLODE_N = 100; - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); @@ -818,7 +806,7 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id } } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn argument_memory_test( #[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId, @@ -828,12 +816,7 @@ pub fn argument_memory_test( let EXPLODE_N = 100; let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); clarity_instance .begin_test_genesis_block( @@ -901,18 +884,13 @@ pub fn argument_memory_test( } } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId) { let marf = MarfedKV::temporary(); let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); let COUNT_PER_FUNC = 10; let FUNCS = 10; - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap(); @@ -1026,18 +1004,13 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ } } -#[apply(clarity_version_template)] +#[apply(test_clarity_versions)] pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id: StacksEpochId) { let marf = MarfedKV::temporary(); let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); let COUNT_PER_CONTRACT = 20; let CONTRACTS = 5; - let burn_db = match epoch_id { - StacksEpochId::Epoch20 => &TEST_BURN_STATE_DB, - StacksEpochId::Epoch2_05 => &TEST_BURN_STATE_DB_205, - StacksEpochId::Epoch21 => &TEST_BURN_STATE_DB_21, - _ => panic!("Epoch {} not covered", &epoch_id), - }; + let burn_db = &generate_test_burn_state_db(epoch_id); clarity_instance .begin_test_genesis_block( diff --git a/src/core/mempool.rs b/src/core/mempool.rs index c6706b00cf..5394675ff5 100644 --- a/src/core/mempool.rs +++ b/src/core/mempool.rs @@ -43,6 +43,7 @@ use stacks_common::util::hash::to_hex; use stacks_common::util::hash::Sha512Trunc256Sum; use crate::burnchains::Txid; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::events::StacksTransactionReceipt; use crate::chainstate::stacks::miner::TransactionEvent; @@ -169,10 +170,17 @@ impl MemPoolAdmitter { pub fn will_admit_tx( &mut self, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, tx: &StacksTransaction, tx_size: u64, ) -> Result<(), MemPoolRejection> { - chainstate.will_admit_mempool_tx(&self.cur_consensus_hash, &self.cur_block, tx, tx_size) + chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), + &self.cur_consensus_hash, + &self.cur_block, + tx, + tx_size, + ) } } @@ -1968,6 +1976,7 @@ impl MemPoolDB { fn tx_submit( mempool_tx: &mut MemPoolTx, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, tx: &StacksTransaction, @@ -2022,7 +2031,9 @@ impl MemPoolDB { mempool_tx .admitter .set_block(&block_hash, (*consensus_hash).clone()); - mempool_tx.admitter.will_admit_tx(chainstate, tx, len)?; + mempool_tx + .admitter + .will_admit_tx(chainstate, sortdb, tx, len)?; } MemPoolDB::try_add_tx( @@ -2059,6 +2070,7 @@ impl MemPoolDB { pub fn submit( &mut self, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, tx: &StacksTransaction, @@ -2096,6 +2108,7 @@ impl MemPoolDB { MemPoolDB::tx_submit( &mut mempool_tx, chainstate, + sortdb, consensus_hash, block_hash, tx, @@ -2111,6 +2124,7 @@ impl MemPoolDB { pub fn miner_submit( &mut self, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, tx: &StacksTransaction, @@ -2124,6 +2138,7 @@ impl MemPoolDB { MemPoolDB::tx_submit( &mut mempool_tx, chainstate, + sortdb, consensus_hash, block_hash, tx, @@ -2141,6 +2156,7 @@ impl MemPoolDB { pub fn submit_raw( &mut self, chainstate: &mut StacksChainState, + sortdb: &SortitionDB, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, tx_bytes: Vec, @@ -2182,6 +2198,7 @@ impl MemPoolDB { MemPoolDB::tx_submit( &mut mempool_tx, chainstate, + sortdb, consensus_hash, block_hash, &tx, diff --git a/src/core/mod.rs b/src/core/mod.rs index 54cb085a89..d77d395550 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -50,15 +50,24 @@ pub use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, STACKS_EPOCH // first byte == major network protocol version (currently 0x18) // second and third bytes are unused // fourth byte == highest epoch supported by this node -// - 0x05 for 2.05 -// - 0x06 for 2.1 -pub const PEER_VERSION_MAINNET: u32 = 0x18000006; -pub const PEER_VERSION_TESTNET: u32 = 0xfacade06; +pub const PEER_VERSION_MAINNET_MAJOR: u32 = 0x18000000; +pub const PEER_VERSION_TESTNET_MAJOR: u32 = 0xfacade00; pub const PEER_VERSION_EPOCH_1_0: u8 = 0x00; pub const PEER_VERSION_EPOCH_2_0: u8 = 0x00; pub const PEER_VERSION_EPOCH_2_05: u8 = 0x05; pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; +pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; +pub const PEER_VERSION_EPOCH_2_3: u8 = 0x08; +pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; + +// this should be updated to the latest network epoch version supported by +// this node. this will be checked by the `validate_epochs()` method. +pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_4 as u32; + +// set the fourth byte of the peer version +pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; +pub const PEER_VERSION_TESTNET: u32 = PEER_VERSION_TESTNET_MAJOR | PEER_NETWORK_EPOCH; // network identifiers pub const NETWORK_ID_MAINNET: u32 = 0x17000000; @@ -106,18 +115,23 @@ pub const BITCOIN_MAINNET_FIRST_BLOCK_HASH: &str = "0000000000000000000ab248c8e35c574514d052a83dbc12669e19bc43df486e"; pub const BITCOIN_MAINNET_INITIAL_REWARD_START_BLOCK: u64 = 651389; pub const BITCOIN_MAINNET_STACKS_2_05_BURN_HEIGHT: u64 = 713_000; - -// TODO: Pick a real height for Stacks 2.1. -pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 2_000_000; - -pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2_412_530; -pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1671825973; +pub const BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT: u64 = 781_551; +/// This is Epoch-2.2 activation height proposed in SIP-022 +pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; +/// This is Epoch-2.3 activation height proposed in SIP-023 +pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_240; +/// This is Epoch-2.3, now Epoch-2.4, activation height proposed in SIP-024 +pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551; + +pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; +pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = - "000000000000002a57f75a9bf78dde774da64899ff85ded8a9075f6b4959c959"; -pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_412_531; - -// TODO: Pick a real height for Stacks 2.1. -pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_412_532; + "000000000000010dd0863ec3d7a0bae17c1957ae1de9cbcdae8e77aad33e3b8c"; +pub const BITCOIN_TESTNET_STACKS_2_05_BURN_HEIGHT: u64 = 2_104_380; +pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; +pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_431_300; +pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; +pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_432_545; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; @@ -157,12 +171,16 @@ pub const POX_MAX_NUM_CYCLES: u8 = 12; pub const POX_TESTNET_STACKING_THRESHOLD_25: u128 = 8000; pub const POX_TESTNET_CYCLE_LENGTH: u128 = 1050; -// TODO: pick a real value for Stacks 2.1 pub const POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT as u32) + 1; pub const POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT as u32) + 1; +pub const POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = + (BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT as u32) + 1; +pub const POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = + (BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT as u32) + 1; + /// Burn block height at which the ASTRules::PrecheckSize becomes the default behavior on mainnet pub const AST_RULES_PRECHECK_SIZE: u64 = 752000; // on or about Aug 30 2022 @@ -227,7 +245,7 @@ pub fn check_fault_injection(fault_name: &str) -> bool { } lazy_static! { - pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 4] = [ + pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 7] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -252,15 +270,36 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch21, start_height: BITCOIN_MAINNET_STACKS_21_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT, + end_height: BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT, + end_height: BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4 + }, ]; } lazy_static! { - pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 4] = [ + pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 7] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -285,15 +324,36 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch21, start_height: BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT, + end_height: BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT, + end_height: BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4 + }, ]; } lazy_static! { - pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 4] = [ + pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 7] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -318,10 +378,31 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch21, start_height: 2000, - end_height: STACKS_EPOCH_MAX, + end_height: 3000, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_1 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 3000, + end_height: 4000, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_2 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 4000, + end_height: 5000, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_3 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 5000, + end_height: STACKS_EPOCH_MAX, + block_limit: HELIUM_BLOCK_LIMIT_20.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4 + }, ]; } @@ -333,6 +414,18 @@ pub static STACKS_EPOCH_2_05_MARKER: u8 = 0x05; /// *or greater*. pub static STACKS_EPOCH_2_1_MARKER: u8 = 0x06; +/// Stacks 2.2 epoch marker. All block-commits in 2.2 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_2_2_MARKER: u8 = 0x07; + +/// Stacks 2.3 epoch marker. All block-commits in 2.3 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_2_3_MARKER: u8 = 0x08; + +/// Stacks 2.4 epoch marker. All block-commits in 2.4 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_2_4_MARKER: u8 = 0x09; + #[test] fn test_ord_for_stacks_epoch() { let epochs = STACKS_EPOCHS_MAINNET.clone(); @@ -342,9 +435,18 @@ fn test_ord_for_stacks_epoch() { assert_eq!(epochs[0].cmp(&epochs[0]), Ordering::Equal); assert_eq!(epochs[1].cmp(&epochs[1]), Ordering::Equal); assert_eq!(epochs[2].cmp(&epochs[2]), Ordering::Equal); + assert_eq!(epochs[3].cmp(&epochs[3]), Ordering::Equal); + assert_eq!(epochs[4].cmp(&epochs[4]), Ordering::Equal); assert_eq!(epochs[2].cmp(&epochs[0]), Ordering::Greater); assert_eq!(epochs[2].cmp(&epochs[1]), Ordering::Greater); assert_eq!(epochs[1].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[3].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[3].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[3].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[4].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[4].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[4].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[4].cmp(&epochs[3]), Ordering::Greater); } #[test] @@ -398,6 +500,12 @@ pub trait StacksEpochExtension { #[cfg(test)] fn unit_test_2_1(epoch_2_0_block_height: u64) -> Vec; #[cfg(test)] + fn unit_test_2_2(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] + fn unit_test_2_3(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] + fn unit_test_2_4(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, @@ -560,6 +668,237 @@ impl StacksEpochExtension for StacksEpoch { ] } + #[cfg(test)] + fn unit_test_2_2(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + ] + } + + #[cfg(test)] + fn unit_test_2_3(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test_2_3 first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 220220, + write_count: 220220, + read_length: 220220, + read_count: 220220, + runtime: 220220, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 230230, + write_count: 230230, + read_length: 230230, + read_count: 230230, + runtime: 230230, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + ] + } + + #[cfg(test)] + fn unit_test_2_4(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test_2_4 first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 20, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 20, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + ] + } + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( @@ -619,6 +958,9 @@ impl StacksEpochExtension for StacksEpoch { } StacksEpochId::Epoch2_05 => StacksEpoch::unit_test_2_05(first_burnchain_height), StacksEpochId::Epoch21 => StacksEpoch::unit_test_2_1(first_burnchain_height), + StacksEpochId::Epoch22 => StacksEpoch::unit_test_2_2(first_burnchain_height), + StacksEpochId::Epoch23 => StacksEpoch::unit_test_2_3(first_burnchain_height), + StacksEpochId::Epoch24 => StacksEpoch::unit_test_2_4(first_burnchain_height), } } @@ -669,6 +1011,20 @@ impl StacksEpochExtension for StacksEpoch { let mut seen_epochs = HashSet::new(); epochs.sort(); + let max_epoch = epochs_ref + .iter() + .max() + .expect("FATAL: expect at least one epoch"); + assert!( + max_epoch.network_epoch as u32 <= PEER_NETWORK_EPOCH, + "stacks-blockchain static network epoch should be greater than or equal to the max epoch's" + ); + + assert!( + StacksEpochId::latest() >= max_epoch.epoch_id, + "StacksEpochId::latest() should be greater than or equal to any epoch defined in the node" + ); + let mut epoch_end_height = 0; for epoch in epochs.iter() { assert!( diff --git a/src/cost_estimates/pessimistic.rs b/src/cost_estimates/pessimistic.rs index 84533f783a..c93bc3e307 100644 --- a/src/cost_estimates/pessimistic.rs +++ b/src/cost_estimates/pessimistic.rs @@ -228,6 +228,12 @@ impl PessimisticEstimator { StacksEpochId::Epoch20 => "", StacksEpochId::Epoch2_05 => ":2.05", StacksEpochId::Epoch21 => ":2.1", + // reuse cost estimates in Epoch22 + StacksEpochId::Epoch22 => ":2.1", + // reuse cost estimates in Epoch23 + StacksEpochId::Epoch23 => ":2.1", + // reuse cost estimates in Epoch24 + StacksEpochId::Epoch24 => ":2.1", }; format!( "cc{}:{}:{}.{}", diff --git a/src/main.rs b/src/main.rs index 57fd7e7fd7..ab627edb58 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1000,6 +1000,59 @@ simulating a miner. return; } + if argv[1] == "deserialize-db" { + if argv.len() < 4 { + eprintln!("Usage: {} clarity_sqlite_db [byte-prefix]", &argv[0]); + process::exit(1); + } + let db_path = &argv[2]; + let byte_prefix = &argv[3]; + let conn = Connection::open_with_flags(db_path, OpenFlags::SQLITE_OPEN_READ_ONLY).unwrap(); + let query = format!( + "SELECT value FROM data_table WHERE key LIKE \"{}%\"", + byte_prefix + ); + let mut stmt = conn.prepare(&query).unwrap(); + let mut rows = stmt.query(rusqlite::NO_PARAMS).unwrap(); + while let Ok(Some(row)) = rows.next() { + let val_string: String = row.get(0).unwrap(); + let clarity_value = match clarity::vm::Value::try_deserialize_hex_untyped(&val_string) { + Ok(x) => x, + Err(_e) => continue, + }; + println!("{} => {}", val_string, clarity_value); + } + + process::exit(0); + } + + if argv[1] == "check-deser-data" { + if argv.len() < 3 { + eprintln!("Usage: {} check-file.txt", &argv[0]); + process::exit(1); + } + let txt_path = &argv[2]; + let check_file = File::open(txt_path).unwrap(); + let mut i = 1; + for line in io::BufReader::new(check_file).lines() { + if i % 100000 == 0 { + println!("{}...", i); + } + i += 1; + let line = line.unwrap().trim().to_string(); + if line.len() == 0 { + continue; + } + let vals: Vec<_> = line.split(" => ").map(|x| x.trim()).collect(); + let hex_string = &vals[0]; + let expected_value_display = &vals[1]; + let value = clarity::vm::Value::try_deserialize_hex_untyped(&hex_string).unwrap(); + assert_eq!(&value.to_string(), expected_value_display); + } + + process::exit(0); + } + if argv[1] == "replay-chainstate" { if argv.len() < 7 { eprintln!("Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", &argv[0]); @@ -1428,6 +1481,7 @@ simulating a miner. } let result = mempool_db.submit( &mut chain_state, + &sort_db, &stacks_block.consensus_hash, &stacks_block.anchored_block_hash, &raw_tx, diff --git a/src/net/chat.rs b/src/net/chat.rs index ecf5ce48d0..26ac70eaa6 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -41,6 +41,8 @@ use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::StacksPublicKey; use crate::core::StacksEpoch; +use crate::core::PEER_VERSION_EPOCH_2_2; +use crate::core::PEER_VERSION_EPOCH_2_3; use crate::monitoring; use crate::net::asn::ASEntry4; use crate::net::codec::*; @@ -711,6 +713,17 @@ impl ConversationP2P { return true; } + // be a little more permissive with epochs 2.3 and 2.2, because 2.3.0.0.0 shipped with + // PEER_VERSION_MAINNET = 0x18000007 and PEER_VERSION_TESTNET = 0xfacade07 + if cur_epoch == PEER_VERSION_EPOCH_2_3 && remote_epoch == PEER_VERSION_EPOCH_2_2 { + debug!( + "Remote peer has epoch {} and current epoch is {}, but we're permissive about 2.2/2.3 boundary", + remote_epoch, + cur_epoch + ); + return true; + } + return false; } @@ -1074,7 +1087,6 @@ impl ConversationP2P { /// Handle an inbound NAT-punch request -- just tell the peer what we think their IP/port are. /// No authentication from the peer is necessary. fn handle_natpunch_request(&self, chain_view: &BurnchainView, nonce: u32) -> StacksMessage { - // monitoring::increment_p2p_msg_nat_punch_request_received_counter(); monitoring::increment_msg_counter("p2p_nat_punch_request".to_string()); let natpunch_data = NatPunchData { @@ -1243,7 +1255,6 @@ impl ConversationP2P { chain_view: &BurnchainView, message: &mut StacksMessage, ) -> Result, net_error> { - // monitoring::increment_p2p_msg_ping_received_counter(); monitoring::increment_msg_counter("p2p_ping".to_string()); let ping_data = match message.payload { @@ -1267,7 +1278,6 @@ impl ConversationP2P { chain_view: &BurnchainView, preamble: &Preamble, ) -> Result { - // monitoring::increment_p2p_msg_get_neighbors_received_counter(); monitoring::increment_msg_counter("p2p_get_neighbors".to_string()); let epoch = self.get_current_epoch(chain_view.burn_block_height); @@ -1476,7 +1486,6 @@ impl ConversationP2P { preamble: &Preamble, get_blocks_inv: &GetBlocksInv, ) -> Result { - // monitoring::increment_p2p_msg_get_blocks_inv_received_counter(); monitoring::increment_msg_counter("p2p_get_blocks_inv".to_string()); let mut response = ConversationP2P::make_getblocksinv_response( @@ -2040,7 +2049,6 @@ impl ConversationP2P { // already have public key; match payload let reply_opt = match msg.payload { StacksMessageType::Handshake(_) => { - // monitoring::increment_p2p_msg_authenticated_handshake_received_counter(); monitoring::increment_msg_counter("p2p_authenticated_handshake".to_string()); debug!("{:?}: Got Handshake", &self); @@ -2112,7 +2120,6 @@ impl ConversationP2P { let solicited = self.connection.is_solicited(&msg); let reply_opt = match msg.payload { StacksMessageType::Handshake(_) => { - // monitoring::increment_p2p_msg_unauthenticated_handshake_received_counter(); monitoring::increment_msg_counter("p2p_unauthenticated_handshake".to_string()); test_debug!("{:?}: Got unauthenticated Handshake", &self); let (reply_opt, handled) = @@ -2188,7 +2195,6 @@ impl ConversationP2P { nack_payload, ); - // monitoring::increment_p2p_msg_nack_sent_counter(); monitoring::increment_msg_counter("p2p_nack_sent".to_string()); // unauthenticated, so don't forward it (but do consume it, and do nack it) diff --git a/src/net/inv.rs b/src/net/inv.rs index a4f7f1fcbb..51e05b6367 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -3113,9 +3113,11 @@ mod test { 3, 25, 5, - u64::max_value(), - u64::max_value(), - u32::max_value(), + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); @@ -3140,9 +3142,11 @@ mod test { 3, 25, 5, - u64::max_value(), - u64::max_value(), - u32::max_value(), + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); diff --git a/src/net/mod.rs b/src/net/mod.rs index caf988eac6..168881f866 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -2497,9 +2497,11 @@ pub mod test { 3, 25, 5, - u64::max_value(), - u64::max_value(), - u32::max_value(), + u64::MAX, + u64::MAX, + u32::MAX, + u32::MAX, + u32::MAX, ); let mut spending_account = TestMinerFactory::new().next_miner( diff --git a/src/net/p2p.rs b/src/net/p2p.rs index 33f1757da3..48b8f6b131 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -5243,6 +5243,7 @@ impl PeerNetwork { if let Err(e) = mempool.submit( chainstate, + sortdb, consensus_hash, block_hash, &tx, diff --git a/src/net/relay.rs b/src/net/relay.rs index 98bc161afe..1d43c49efd 100644 --- a/src/net/relay.rs +++ b/src/net/relay.rs @@ -5821,6 +5821,7 @@ pub mod test { let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); match node.chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, @@ -5870,6 +5871,7 @@ pub mod test { let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); match node.chainstate.will_admit_mempool_tx( + &sortdb.index_conn(), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, diff --git a/src/net/rpc.rs b/src/net/rpc.rs index 0e5d441cc1..f8257caee9 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -128,6 +128,9 @@ use crate::net::{RPCNeighbor, RPCNeighborsInfo}; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::DBConn; use crate::util_lib::db::Error as db_error; + +use crate::chainstate::stacks::boot::POX_3_NAME; + use crate::{ chainstate::burn::operations::leader_block_commit::OUTPUTS_PER_COMMIT, types, util, util::hash::Sha256Sum, version_string, @@ -315,6 +318,13 @@ impl RPCPoxInfoData { ))? + 1; + let pox_3_first_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .ok_or(net_error::ChainstateError( + "PoX-3 first reward cycle begins before first burn block height".to_string(), + ))? + + 1; + let data = chainstate .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { clarity_tx.with_readonly_clarity_env( @@ -518,6 +528,14 @@ impl RPCPoxInfoData { as u64, first_reward_cycle_id: pox_2_first_cycle, }, + RPCPoxContractVersion { + contract_id: boot_code_id(POX_3_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain + .pox_constants + .pox_3_activation_height + as u64, + first_reward_cycle_id: pox_3_first_cycle, + }, ], }) } @@ -1338,6 +1356,7 @@ impl ConversationHttp { let key = ClarityDatabase::make_key_for_account_balance(&account); let burn_block_height = clarity_db.get_current_burnchain_block_height() as u64; let v1_unlock_height = clarity_db.get_v1_unlock_height(); + let v2_unlock_height = clarity_db.get_v2_unlock_height(); let (balance, balance_proof) = if with_proof { clarity_db .get_with_proof::(&key) @@ -1363,10 +1382,16 @@ impl ConversationHttp { .unwrap_or_else(|| (0, None)) }; - let unlocked = balance - .get_available_balance_at_burn_block(burn_block_height, v1_unlock_height); - let (locked, unlock_height) = balance - .get_locked_balance_at_burn_block(burn_block_height, v1_unlock_height); + let unlocked = balance.get_available_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ); + let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( + burn_block_height, + v1_unlock_height, + v2_unlock_height, + ); let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); let locked = format!("0x{}", to_hex(&locked.to_be_bytes())); @@ -1419,15 +1444,15 @@ impl ConversationHttp { var_name, ); - let (value, marf_proof) = if with_proof { + let (value_hex, marf_proof): (String, _) = if with_proof { clarity_db - .get_with_proof::(&key) + .get_with_proof(&key) .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? } else { - clarity_db.get::(&key).map(|a| (a, None))? + clarity_db.get(&key).map(|a| (a, None))? }; - let data = format!("0x{}", value.serialize()); + let data = format!("0x{}", value_hex); Some(DataVarResponse { data, marf_proof }) }) }) { @@ -1472,25 +1497,22 @@ impl ConversationHttp { map_name, key, ); - let (value, marf_proof) = if with_proof { + let (value_hex, marf_proof): (String, _) = if with_proof { clarity_db - .get_with_proof::(&key) + .get_with_proof(&key) .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) .unwrap_or_else(|| { test_debug!("No value for '{}' in {}", &key, tip); - (Value::none(), Some("".into())) + (Value::none().serialize_to_hex(), Some("".into())) }) } else { - clarity_db - .get::(&key) - .map(|a| (a, None)) - .unwrap_or_else(|| { - test_debug!("No value for '{}' in {}", &key, tip); - (Value::none(), None) - }) + clarity_db.get(&key).map(|a| (a, None)).unwrap_or_else(|| { + test_debug!("No value for '{}' in {}", &key, tip); + (Value::none().serialize_to_hex(), None) + }) }; - let data = format!("0x{}", value.serialize()); + let data = format!("0x{}", value_hex); MapEntryResponse { data, marf_proof } }) }) { @@ -1584,7 +1606,7 @@ impl ConversationHttp { response_metadata, CallReadOnlyResponse { okay: true, - result: Some(format!("0x{}", data.serialize())), + result: Some(format!("0x{}", data.serialize_to_hex())), cause: None, }, ), @@ -2171,6 +2193,7 @@ impl ConversationHttp { } else { match mempool.submit( chainstate, + sortdb, &consensus_hash, &block_hash, &tx, diff --git a/src/util_lib/bloom.rs b/src/util_lib/bloom.rs index e899236566..2d3e40beab 100644 --- a/src/util_lib/bloom.rs +++ b/src/util_lib/bloom.rs @@ -360,15 +360,15 @@ impl BloomCounter { let sql = format!("CREATE TABLE IF NOT EXISTS {}(counts BLOB NOT NULL, num_bins INTEGER NOT NULL, num_hashes INTEGER NOT NULL, hasher BLOB NOT NULL);", table_name); tx.execute(&sql, NO_PARAMS).map_err(db_error::SqliteError)?; - let (num_bits, num_hashes) = bloom_hash_count(error_rate, max_items); - let counts_vec = vec![0u8; (num_bits * 4) as usize]; + let (num_bins, num_hashes) = bloom_hash_count(error_rate, max_items); + let counts_vec = vec![0u8; (num_bins * 4) as usize]; let hasher_vec = hasher.serialize_to_vec(); let sql = format!( "INSERT INTO {} (counts, num_bins, num_hashes, hasher) VALUES (?1, ?2, ?3, ?4)", table_name ); - let args: &[&dyn ToSql] = &[&counts_vec, &num_bits, &num_hashes, &hasher_vec]; + let args: &[&dyn ToSql] = &[&counts_vec, &num_bins, &num_hashes, &hasher_vec]; tx.execute(&sql, args).map_err(db_error::SqliteError)?; @@ -379,7 +379,7 @@ impl BloomCounter { Ok(BloomCounter { hasher, table_name: table_name.to_string(), - num_bins: num_bits, + num_bins: num_bins, num_hashes, counts_rowid: counts_rowid as u32, }) @@ -528,8 +528,6 @@ impl BloomCounter { let new_bin = bin - 1; BloomCounter::::set_counts_bin(&mut fd, slot, new_bin); count = cmp::min(new_bin, count); - } else { - panic!("BUG: item is present in the bloom counter, but has a zero count (i = {}, slot = {})", i, slot); } } diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index a962386b5d..19cd58172e 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -37,7 +37,7 @@ version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] [dependencies.secp256k1] -version = "0.21.0" +version = "0.24.3" features = ["serde", "recovery"] [dependencies.rusqlite] diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index c065a33868..087251f719 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -71,11 +71,28 @@ pub enum StacksEpochId { Epoch20 = 0x02000, Epoch2_05 = 0x02005, Epoch21 = 0x0200a, + Epoch22 = 0x0200f, + Epoch23 = 0x02014, + Epoch24 = 0x02019, } impl StacksEpochId { pub fn latest() -> StacksEpochId { - StacksEpochId::Epoch21 + StacksEpochId::Epoch24 + } + + /// Returns whether or not this Epoch should perform + /// Clarity value sanitization + pub fn value_sanitizing(&self) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 => false, + StacksEpochId::Epoch24 => true, + } } } @@ -86,6 +103,9 @@ impl std::fmt::Display for StacksEpochId { StacksEpochId::Epoch20 => write!(f, "2.0"), StacksEpochId::Epoch2_05 => write!(f, "2.05"), StacksEpochId::Epoch21 => write!(f, "2.1"), + StacksEpochId::Epoch22 => write!(f, "2.2"), + StacksEpochId::Epoch23 => write!(f, "2.3"), + StacksEpochId::Epoch24 => write!(f, "2.4"), } } } @@ -99,6 +119,9 @@ impl TryFrom for StacksEpochId { x if x == StacksEpochId::Epoch20 as u32 => Ok(StacksEpochId::Epoch20), x if x == StacksEpochId::Epoch2_05 as u32 => Ok(StacksEpochId::Epoch2_05), x if x == StacksEpochId::Epoch21 as u32 => Ok(StacksEpochId::Epoch21), + x if x == StacksEpochId::Epoch22 as u32 => Ok(StacksEpochId::Epoch22), + x if x == StacksEpochId::Epoch23 as u32 => Ok(StacksEpochId::Epoch23), + x if x == StacksEpochId::Epoch24 as u32 => Ok(StacksEpochId::Epoch24), _ => Err("Invalid epoch"), } } diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 7b83d48739..589fc398c6 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -652,8 +652,5 @@ macro_rules! impl_byte_array_rusqlite_only { macro_rules! function_name { () => { stdext::function_name!() - .rsplit_once("::") - .expect("Failed to split current function name") - .1 }; } diff --git a/testnet/stacks-node/conf/mainnet-follower-conf.toml b/testnet/stacks-node/conf/mainnet-follower-conf.toml index f6b283e38e..9f992a7e2c 100644 --- a/testnet/stacks-node/conf/mainnet-follower-conf.toml +++ b/testnet/stacks-node/conf/mainnet-follower-conf.toml @@ -2,7 +2,7 @@ # working_dir = "/dir/to/save/chainstate" rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" -bootstrap_node = "02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444,02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444,03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444" +bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444" [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/mainnet-miner-conf.toml b/testnet/stacks-node/conf/mainnet-miner-conf.toml index 1769178740..319328b677 100644 --- a/testnet/stacks-node/conf/mainnet-miner-conf.toml +++ b/testnet/stacks-node/conf/mainnet-miner-conf.toml @@ -5,7 +5,7 @@ p2p_bind = "0.0.0.0:20444" seed = "" local_peer_seed = "" miner = true -bootstrap_node = "02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444,02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444,03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444" +bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444" [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml index 9283a6ca95..37381a60af 100644 --- a/testnet/stacks-node/conf/mainnet-mockminer-conf.toml +++ b/testnet/stacks-node/conf/mainnet-mockminer-conf.toml @@ -4,7 +4,7 @@ rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" miner = true mock_mining = true -bootstrap_node = "02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444,02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444,03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444" +bootstrap_node = "02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444" [burnchain] chain = "bitcoin" diff --git a/testnet/stacks-node/conf/mocknet-miner-conf.toml b/testnet/stacks-node/conf/mocknet-miner-conf.toml index 31d8ea830a..71add782b1 100644 --- a/testnet/stacks-node/conf/mocknet-miner-conf.toml +++ b/testnet/stacks-node/conf/mocknet-miner-conf.toml @@ -8,6 +8,9 @@ miner = true wait_time_for_microblocks = 10000 use_test_genesis_chainstate = true +[connection_options] +public_ip_address = "127.0.0.1:20444" + [burnchain] chain = "bitcoin" mode = "mocknet" diff --git a/testnet/stacks-node/conf/testnet-follower-conf.toml b/testnet/stacks-node/conf/testnet-follower-conf.toml index e1563d333a..6872666a2c 100644 --- a/testnet/stacks-node/conf/testnet-follower-conf.toml +++ b/testnet/stacks-node/conf/testnet-follower-conf.toml @@ -2,7 +2,7 @@ # working_dir = "/dir/to/save/chainstate" rpc_bind = "0.0.0.0:20443" p2p_bind = "0.0.0.0:20444" -bootstrap_node = "047435c194e9b01b3d7f7a2802d6684a3af68d05bbf4ec8f17021980d777691f1d51651f7f1d566532c804da506c117bbf79ad62eea81213ba58f8808b4d9504ad@testnet.stacks.co:20444" +bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:20444" wait_time_for_microblocks = 10000 [burnchain] diff --git a/testnet/stacks-node/conf/testnet-miner-conf.toml b/testnet/stacks-node/conf/testnet-miner-conf.toml index 3b1b0013e3..379cbd3822 100644 --- a/testnet/stacks-node/conf/testnet-miner-conf.toml +++ b/testnet/stacks-node/conf/testnet-miner-conf.toml @@ -5,7 +5,7 @@ p2p_bind = "0.0.0.0:20444" seed = "" local_peer_seed = "" miner = true -bootstrap_node = "047435c194e9b01b3d7f7a2802d6684a3af68d05bbf4ec8f17021980d777691f1d51651f7f1d566532c804da506c117bbf79ad62eea81213ba58f8808b4d9504ad@testnet.stacks.co:20444" +bootstrap_node = "029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:20444" wait_time_for_microblocks = 10000 [burnchain] diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index 7644af1f6b..7ae5db795b 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -13,7 +13,10 @@ use stacks::chainstate::burn::operations::{ TransferStxOp, UserBurnSupportOp, }; use stacks::chainstate::burn::BlockSnapshot; -use stacks::core::{StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, STACKS_EPOCH_MAX}; +use stacks::core::{ + StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + PEER_VERSION_EPOCH_2_1, STACKS_EPOCH_MAX, +}; use stacks::types::chainstate::{BurnchainHeaderHash, PoxId}; use stacks::util::get_epoch_time_secs; use stacks::util::hash::Sha256Sum; @@ -100,13 +103,29 @@ impl BurnchainController for MocknetController { fn get_stacks_epochs(&self) -> Vec { match &self.config.burnchain.epochs { Some(epochs) => epochs.clone(), - None => vec![StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: STACKS_EPOCH_MAX, - block_limit: ExecutionCost::max_value(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }], + None => vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 1, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 1, + end_height: 2, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 2, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + ], } } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d3c03f53f0..782f1adea7 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -206,7 +206,7 @@ impl ConfigFile { }; let node = NodeConfigFile { - bootstrap_node: Some("047435c194e9b01b3d7f7a2802d6684a3af68d05bbf4ec8f17021980d777691f1d51651f7f1d566532c804da506c117bbf79ad62eea81213ba58f8808b4d9504ad@xenon.blockstack.org:20444".to_string()), + bootstrap_node: Some("029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:20444".to_string()), miner: Some(false), ..NodeConfigFile::default() }; @@ -250,13 +250,8 @@ impl ConfigFile { ..BurnchainConfigFile::default() }; - let bootstrap_nodes = [ - "02da7a464ac770ae8337a343670778b93410f2f3fef6bea98dd1c3e9224459d36b@seed-0.mainnet.stacks.co:20444", - "02afeae522aab5f8c99a00ddf75fbcb4a641e052dd48836408d9cf437344b63516@seed-1.mainnet.stacks.co:20444", - "03652212ea76be0ed4cd83a25c06e57819993029a7b9999f7d63c36340b34a4e62@seed-2.mainnet.stacks.co:20444"].join(","); - let node = NodeConfigFile { - bootstrap_node: Some(bootstrap_nodes), + bootstrap_node: Some("02196f005965cebe6ddc3901b7b1cc1aa7a88f305bb8c5893456b8f9a605923893@seed.mainnet.hiro.so:20444".to_string()), miner: Some(false), ..NodeConfigFile::default() }; @@ -413,6 +408,34 @@ impl Config { burnchain.pox_constants.v1_unlock_height = v1_unlock_height; } + if let Some(epochs) = &self.burnchain.epochs { + // Iterate through the epochs vector and find the item where epoch_id == StacksEpochId::Epoch22 + if let Some(epoch) = epochs + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch22) + { + // Override v2_unlock_height to the start_height of epoch2.2 + debug!( + "Override v2_unlock_height from {} to {}", + burnchain.pox_constants.v2_unlock_height, + epoch.start_height + 1 + ); + burnchain.pox_constants.v2_unlock_height = epoch.start_height as u32 + 1; + } + + if let Some(epoch) = epochs + .iter() + .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch24) + { + // Override pox_3_activation_height to the start_height of epoch2.4 + debug!( + "Override pox_3_activation_height from {} to {}", + burnchain.pox_constants.pox_3_activation_height, epoch.start_height + ); + burnchain.pox_constants.pox_3_activation_height = epoch.start_height as u32; + } + } + if let Some(sunset_start) = self.burnchain.sunset_start { debug!( "Override sunset_start from {} to {}", @@ -512,6 +535,12 @@ impl Config { Ok(StacksEpochId::Epoch2_05) } else if epoch_name == EPOCH_CONFIG_2_1_0 { Ok(StacksEpochId::Epoch21) + } else if epoch_name == EPOCH_CONFIG_2_2_0 { + Ok(StacksEpochId::Epoch22) + } else if epoch_name == EPOCH_CONFIG_2_3_0 { + Ok(StacksEpochId::Epoch23) + } else if epoch_name == EPOCH_CONFIG_2_4_0 { + Ok(StacksEpochId::Epoch24) } else { Err(format!("Unknown epoch name specified: {}", epoch_name)) }?; @@ -534,6 +563,9 @@ impl Config { StacksEpochId::Epoch20, StacksEpochId::Epoch2_05, StacksEpochId::Epoch21, + StacksEpochId::Epoch22, + StacksEpochId::Epoch23, + StacksEpochId::Epoch24, ]; for (expected_epoch, configured_epoch) in expected_list .iter() @@ -670,6 +702,9 @@ impl Config { // chainstate fault_injection activation for hide_blocks. // you can't set this in the config file. fault_injection_hide_blocks: false, + chain_liveness_poll_time_secs: node + .chain_liveness_poll_time_secs + .unwrap_or(default_node_config.chain_liveness_poll_time_secs), }; (node_config, node.bootstrap_node, node.deny_nodes) } @@ -871,6 +906,9 @@ impl Config { candidate_retry_cache_size: miner .candidate_retry_cache_size .unwrap_or(miner_default_config.candidate_retry_cache_size), + unprocessed_block_deadline_secs: miner + .unprocessed_block_deadline_secs + .unwrap_or(miner_default_config.unprocessed_block_deadline_secs), }, None => miner_default_config, }; @@ -1395,6 +1433,9 @@ pub const EPOCH_CONFIG_1_0_0: &'static str = "1.0"; pub const EPOCH_CONFIG_2_0_0: &'static str = "2.0"; pub const EPOCH_CONFIG_2_0_5: &'static str = "2.05"; pub const EPOCH_CONFIG_2_1_0: &'static str = "2.1"; +pub const EPOCH_CONFIG_2_2_0: &'static str = "2.2"; +pub const EPOCH_CONFIG_2_3_0: &'static str = "2.3"; +pub const EPOCH_CONFIG_2_4_0: &'static str = "2.4"; #[derive(Clone, Deserialize, Default, Debug)] pub struct BurnchainConfigFile { @@ -1455,6 +1496,9 @@ pub struct NodeConfig { // fault injection for hiding blocks. // not part of the config file. pub fault_injection_hide_blocks: bool, + /// At most, how often should the chain-liveness thread + /// wake up the chains-coordinator. Defaults to 300s (5 min). + pub chain_liveness_poll_time_secs: u64, } #[derive(Clone, Debug)] @@ -1729,9 +1773,10 @@ impl NodeConfig { marf_defer_hashing: true, pox_sync_sample_secs: 30, use_test_genesis_chainstate: None, - always_use_affirmation_maps: true, + always_use_affirmation_maps: false, require_affirmed_anchor_blocks: true, fault_injection_hide_blocks: false, + chain_liveness_poll_time_secs: 300, } } @@ -1844,6 +1889,7 @@ pub struct MinerConfig { pub wait_for_block_download: bool, pub nonce_cache_size: u64, pub candidate_retry_cache_size: u64, + pub unprocessed_block_deadline_secs: u64, } impl MinerConfig { @@ -1859,6 +1905,7 @@ impl MinerConfig { wait_for_block_download: true, nonce_cache_size: 10_000, candidate_retry_cache_size: 10_000, + unprocessed_block_deadline_secs: 30, } } } @@ -1933,6 +1980,9 @@ pub struct NodeConfigFile { pub use_test_genesis_chainstate: Option, pub always_use_affirmation_maps: Option, pub require_affirmed_anchor_blocks: Option, + /// At most, how often should the chain-liveness thread + /// wake up the chains-coordinator. Defaults to 300s (5 min). + pub chain_liveness_poll_time_secs: Option, } #[derive(Clone, Deserialize, Debug)] @@ -1971,6 +2021,7 @@ pub struct MinerConfigFile { pub segwit: Option, pub nonce_cache_size: Option, pub candidate_retry_cache_size: Option, + pub unprocessed_block_deadline_secs: Option, } #[derive(Clone, Deserialize, Default, Debug)] diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 05cd9a4a5e..874e20569b 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -393,6 +393,7 @@ impl EventObserver { "anchored_cost": anchored_consumed, "confirmed_microblocks_cost": mblock_confirmed_consumed, "pox_v1_unlock_height": pox_constants.v1_unlock_height, + "pox_v2_unlock_height": pox_constants.v2_unlock_height, }) } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 5ee9caf838..4764f56e70 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -179,7 +179,7 @@ use stacks::chainstate::stacks::{ use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; -use stacks::core::STACKS_EPOCH_2_1_MARKER; +use stacks::core::STACKS_EPOCH_2_4_MARKER; use stacks::cost_estimates::metrics::CostMetric; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; @@ -1323,7 +1323,7 @@ impl BlockMinerThread { apparent_sender: sender, key_block_ptr: key.block_height as u32, key_vtxindex: key.op_vtxindex as u16, - memo: vec![STACKS_EPOCH_2_1_MARKER], + memo: vec![STACKS_EPOCH_2_4_MARKER], new_seed: vrf_seed, parent_block_ptr, parent_vtxindex, @@ -1591,6 +1591,7 @@ impl BlockMinerThread { fn load_and_vet_parent_microblocks( &mut self, chain_state: &mut StacksChainState, + sortdb: &SortitionDB, mem_pool: &mut MemPoolDB, parent_block_info: &mut ParentStacksBlockInfo, ) -> Option> { @@ -1659,6 +1660,7 @@ impl BlockMinerThread { // anchored block. if let Err(e) = mem_pool.miner_submit( chain_state, + sortdb, &parent_consensus_hash, &stacks_parent_header.anchored_header.block_hash(), &poison_microblock_tx, @@ -1755,6 +1757,7 @@ impl BlockMinerThread { burnchain: &Burnchain, sortdb: &SortitionDB, chainstate: &StacksChainState, + unprocessed_block_deadline: u64, ) -> bool { let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: could not query canonical sortition DB tip"); @@ -1763,13 +1766,21 @@ impl BlockMinerThread { .get_stacks_chain_tip(sortdb) .expect("FATAL: could not query canonical Stacks chain tip") { - let has_unprocessed = - StacksChainState::has_higher_unprocessed_blocks(chainstate.db(), stacks_tip.height) - .expect("FATAL: failed to query staging blocks"); + // if a block hasn't been processed within some deadline seconds of receipt, don't block + // mining + let process_deadline = get_epoch_time_secs() - unprocessed_block_deadline; + let has_unprocessed = StacksChainState::has_higher_unprocessed_blocks( + chainstate.db(), + stacks_tip.height, + process_deadline, + ) + .expect("FATAL: failed to query staging blocks"); if has_unprocessed { - let highest_unprocessed_opt = - StacksChainState::get_highest_unprocessed_block(chainstate.db()) - .expect("FATAL: failed to query staging blocks"); + let highest_unprocessed_opt = StacksChainState::get_highest_unprocessed_block( + chainstate.db(), + process_deadline, + ) + .expect("FATAL: failed to query staging blocks"); if let Some(highest_unprocessed) = highest_unprocessed_opt { let highest_unprocessed_block_sn_opt = @@ -1886,6 +1897,7 @@ impl BlockMinerThread { // target it to the microblock tail in parent_block_info let microblocks_opt = self.load_and_vet_parent_microblocks( &mut chain_state, + &burn_db, &mut mem_pool, &mut parent_block_info, ); @@ -2005,8 +2017,12 @@ impl BlockMinerThread { .expect("FATAL: mutex poisoned") .is_blocked(); - let has_unprocessed = - Self::unprocessed_blocks_prevent_mining(&self.burnchain, &burn_db, &chain_state); + let has_unprocessed = Self::unprocessed_blocks_prevent_mining( + &self.burnchain, + &burn_db, + &chain_state, + self.config.miner.unprocessed_block_deadline_secs, + ); if stacks_tip.anchored_block_hash != anchored_block.header.parent_block || parent_block_info.parent_consensus_hash != stacks_tip.consensus_hash || cur_burn_chain_tip.burn_header_hash != self.burn_block.burn_header_hash @@ -2971,6 +2987,7 @@ impl RelayerThread { &self.burnchain, self.sortdb_ref(), self.chainstate_ref(), + self.config.miner.unprocessed_block_deadline_secs, ); if has_unprocessed { debug!( diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 88a3ab73ef..3fabdefa1c 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -624,7 +624,7 @@ impl RunLoop { last_stacks_pox_reorg_recover_time: &mut u128, ) { let delay = cmp::max( - 1, + config.node.chain_liveness_poll_time_secs, cmp::max( config.miner.first_attempt_time_ms, config.miner.subsequent_attempt_time_ms, @@ -719,7 +719,9 @@ impl RunLoop { &stacks_tip_affirmation_map, &heaviest_affirmation_map ); - // do it anyway since it's harmless + // announce a new stacks block to force the chains coordinator + // to wake up anyways. this isn't free, so we have to make sure + // the chain-liveness thread doesn't wake up too often globals.coord().announce_new_stacks_block(); } @@ -742,7 +744,7 @@ impl RunLoop { last_announce_time: &mut u128, ) { let delay = cmp::max( - 1, + config.node.chain_liveness_poll_time_secs, cmp::max( config.miner.first_attempt_time_ms, config.miner.subsequent_attempt_time_ms, diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index d23302f221..7d0e0fcbaf 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -1,6 +1,10 @@ use std::thread; use std::time::{Duration, Instant}; +#[cfg(test)] +use stacks::burnchains::PoxConstants; +#[cfg(test)] +use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::db::sortdb::SortitionDBConn; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{ @@ -123,4 +127,14 @@ impl<'a> Tenure { .unwrap(); chain_state } + + #[cfg(test)] + pub fn open_fake_sortdb(&self) -> SortitionDB { + SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + PoxConstants::testnet_default(), + ) + .unwrap() + } } diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index a4bf3c7ed0..4fb199848f 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -371,6 +371,8 @@ fn bitcoind_integration(segwit_flag: bool) { // Use tenure's hook for submitting transactions run_loop.callbacks.on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); + match round { 1 => { // On round 1, publish the KV contract @@ -390,7 +392,7 @@ fn bitcoind_integration(segwit_flag: bool) { // ./blockstack-cli --testnet publish 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 0 0 store /tmp/out.clar let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash, PUBLISH_CONTRACT.to_owned(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash, PUBLISH_CONTRACT.to_owned(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, 2 => { @@ -399,7 +401,7 @@ fn bitcoind_integration(segwit_flag: bool) { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a0100b7ff8b6c20c427b4f4f09c1ad7e50027e2b076b2ddc0ab55e64ef5ea3771dd4763a79bc5a2b1a79b72ce03dd146ccf24b84942d675a815819a8b85aa8065dfaa030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, 3 => { @@ -408,7 +410,7 @@ fn bitcoind_integration(segwit_flag: bool) { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let set_foo_bar = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000002000000000000000a010142a01caf6a32b367664869182f0ebc174122a5a980937ba259d44cc3ebd280e769a53dd3913c8006ead680a6e1c98099fcd509ce94b0a4e90d9f4603b101922d030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265097365742d76616c7565000000020d00000003666f6f0d00000003626172"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, 4 => { @@ -417,7 +419,7 @@ fn bitcoind_integration(segwit_flag: bool) { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000003000000000000000a010046c2c1c345231443fef9a1f64fccfef3e1deacc342b2ab5f97612bb3742aa799038b20aea456789aca6b883e52f84a31adfee0bc2079b740464877af8f2f87d2030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, 5 => { @@ -426,7 +428,7 @@ fn bitcoind_integration(segwit_flag: bool) { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let transfer_1000_stx = "80800000000400b71a091b4b8b7661a661c620966ab6573bc2dcd30000000000000000000000000000000a0000393810832bacd44cfc4024980876135de6b95429bdb610d5ce96a92c9ee9bfd81ec77ea0f1748c8515fc9a1589e51d8b92bf028e3e84ade1249682c05271d5b803020000000000051a525b8a36ef8a73548cd0940c248d3b71ecf4a45100000000000003e800000000000000000000000000000000000000000000000000000000000000000000"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(transfer_1000_stx).unwrap().to_vec(), &ExecutionCost::max_value(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(transfer_1000_stx).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20,).unwrap(); }, _ => {} diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 0dce5a3f8f..cebf84b0f9 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -114,6 +114,8 @@ fn advance_to_2_1( u64::max_value() - 2, u64::max_value() - 1, u32::max_value(), + u32::MAX, + u32::MAX, )); burnchain_config.pox_constants = pox_constants.clone(); @@ -431,9 +433,12 @@ fn transition_adds_burn_block_height() { .unwrap(), ) .unwrap(); - let clarity_value = - Value::deserialize_read(&mut &clarity_serialized_value[..], None) - .unwrap(); + let clarity_value = Value::deserialize_read( + &mut &clarity_serialized_value[..], + None, + false, + ) + .unwrap(); let pair = clarity_value.expect_tuple(); let height = pair.get("height").unwrap().clone().expect_u128() as u64; let bhh_opt = @@ -608,6 +613,8 @@ fn transition_fixes_bitcoin_rigidity() { (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::max_value(), + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1050,6 +1057,8 @@ fn transition_adds_get_pox_addr_recipients() { u64::max_value() - 2, u64::max_value() - 1, v1_unlock_height, + u32::MAX, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1256,9 +1265,12 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap(), ) .unwrap(); - let clarity_value = - Value::deserialize_read(&mut &clarity_serialized_value[..], None) - .unwrap(); + let clarity_value = Value::deserialize_read( + &mut &clarity_serialized_value[..], + None, + false, + ) + .unwrap(); let pair = clarity_value.expect_tuple(); let burn_block_height = pair.get("burn-height").unwrap().clone().expect_u128() as u64; @@ -1351,6 +1363,8 @@ fn transition_adds_mining_from_segwit() { u64::MAX, u64::MAX, v1_unlock_height, + u32::MAX, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1514,6 +1528,8 @@ fn transition_removes_pox_sunset() { (sunset_start_rc * reward_cycle_len - 1).into(), (sunset_end_rc * reward_cycle_len).into(), (epoch_21 as u32) + 1, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1794,6 +1810,8 @@ fn transition_empty_blocks() { u64::max_value() - 2, u64::max_value() - 1, (epoch_2_1 + 1) as u32, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1968,7 +1986,7 @@ fn transition_empty_blocks() { } /// Check to see if there are stragglers between a set of nodes syncing -fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: u64) { +pub fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: u64) { loop { let mut straggler = false; let mut stacks_tip_ch = None; @@ -2151,6 +2169,8 @@ fn test_pox_reorgs_three_flaps() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -2686,6 +2706,8 @@ fn test_pox_reorg_one_flap() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3109,6 +3131,8 @@ fn test_pox_reorg_flap_duel() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3542,6 +3566,8 @@ fn test_pox_reorg_flap_reward_cycles() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3969,6 +3995,8 @@ fn test_pox_missing_five_anchor_blocks() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4368,6 +4396,8 @@ fn test_sortition_divergence_pre_21() { (1600 * reward_cycle_len - 1).into(), (1700 * reward_cycle_len).into(), v1_unlock_height, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4700,7 +4730,7 @@ fn trait_invocation_cross_epoch() { test_observer::spawn(); - let (mut conf, miner_account) = neon_integration_test_conf(); + let (mut conf, _) = neon_integration_test_conf(); let mut initial_balances = vec![InitialBalance { address: spender_addr.clone(), amount: 200_000_000, @@ -4717,8 +4747,6 @@ fn trait_invocation_cross_epoch() { epochs[3].start_height = epoch_2_1; conf.burnchain.epochs = Some(epochs); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let reward_cycle_len = 2000; @@ -4732,6 +4760,8 @@ fn trait_invocation_cross_epoch() { (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::max_value(), + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4976,6 +5006,8 @@ fn test_v1_unlock_height_with_current_stackers() { u64::max_value() - 2, u64::max_value() - 1, v1_unlock_height as u32, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5236,6 +5268,8 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { u64::max_value() - 2, u64::max_value() - 1, v1_unlock_height as u32, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs new file mode 100644 index 0000000000..2a42c7f083 --- /dev/null +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -0,0 +1,1680 @@ +use std::collections::HashMap; +use std::env; +use std::thread; + +use stacks::burnchains::Burnchain; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::miner::signal_mining_blocked; +use stacks::chainstate::stacks::miner::signal_mining_ready; +use stacks::core::STACKS_EPOCH_MAX; +use stacks::types::chainstate::StacksAddress; +use stacks::types::PrivateKey; + +use crate::config::EventKeyType; +use crate::config::EventObserverConfig; +use crate::config::InitialBalance; +use crate::neon; +use crate::neon_node::StacksNode; +use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::tests::epoch_21::wait_pox_stragglers; +use crate::tests::neon_integrations::*; +use crate::tests::*; +use crate::BitcoinRegtestController; +use crate::BurnchainController; +use stacks::core; + +use super::neon_integrations::get_account; +use crate::stacks_common::types::Address; +use crate::stacks_common::util::hash::bytes_to_hex; +use stacks::burnchains::PoxConstants; + +use stacks_common::util::hash::Hash160; +use stacks_common::util::secp256k1::Secp256k1PublicKey; + +use stacks::clarity_cli::vm_execute as execute; + +use clarity::vm::types::PrincipalData; +use clarity::vm::ClarityVersion; + +use stacks::util::sleep_ms; + +use stacks::util_lib::boot::boot_code_id; +use stacks_common::types::chainstate::StacksBlockId; + +#[test] +#[ignore] +/// Verify that it is acceptable to launch PoX-2 at the end of a reward cycle, and set v1 unlock +/// height to be at the start of the subsequent reward cycle. +/// +/// Verify that PoX-1 stackers continue to receive PoX payouts after v1 unlock height, and that +/// PoX-2 stackers only begin receiving rewards at the start of the reward cycle following the one +/// that contains v1 unlock height. +/// +/// Verify that both of the above work even if miners do not mine in the same block as the PoX-2 +/// start height or v1 unlock height (e.g. suppose there's a delay). +/// +/// Verify the (buggy) stacks-increase behavior in PoX-2, and then verify that Epoch-2.2 +/// **disables** PoX after it activates. +/// +/// Verification works using expected number of slots for burn and various PoX addresses. +/// +fn disable_pox() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 230; + let v1_unlock_height = 231; + let epoch_2_2 = 255; // two blocks before next prepare phase. + + let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + + let spender_sk = StacksPrivateKey::new(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let spender_2_sk = StacksPrivateKey::new(); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + + let spender_3_sk = StacksPrivateKey::new(); + let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); + + let mut initial_balances = vec![]; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: stacked + increase_by + 100_000, + }); + + initial_balances.push(InitialBalance { + address: spender_2_addr.clone(), + amount: stacked + 100_000, + }); + + // // create a third initial balance so that there's more liquid ustx than the stacked amount bug. + // // otherwise, it surfaces the DoS vector. + initial_balances.push(InitialBalance { + address: spender_3_addr.clone(), + amount: stacked + 100_000, + }); + + let pox_pubkey_1 = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash_1 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_1) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_2 = Secp256k1PublicKey::from_hex( + "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", + ) + .unwrap(); + let pox_pubkey_hash_2 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_2) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_3 = Secp256k1PublicKey::from_hex( + "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", + ) + .unwrap(); + let pox_pubkey_hash_3 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_3) + .to_bytes() + .to_vec(), + ); + + let (mut conf, _) = neon_integration_test_conf(); + + // we'll manually post a forked stream to the node + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = STACKS_EPOCH_MAX; + epochs.truncate(5); + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + u32::MAX, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // stack right away + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_1 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let pox_addr_tuple_3 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let tx = make_contract_call( + &spender_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_1.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.05 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_1); + assert!(sort_height > v1_unlock_height); + + // *now* advance to 2.1 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.1"); + + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_2 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + let tx = make_contract_call( + &spender_sk, + 1, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_2_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_3.clone(), + Value::UInt(sort_height as u128), + Value::UInt(10), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..5 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // invoke stack-increase + let tx = make_contract_call( + &spender_sk, + 2, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-increase", + &[Value::UInt(increase_by.into())], + ); + + info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + for _i in 0..15 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // invoke stack-increase again, in Epoch-2.2, it should + // runtime abort + let aborted_increase_nonce = 3; + let tx = make_contract_call( + &spender_sk, + aborted_increase_nonce, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-increase", + &[Value::UInt(5000)], + ); + + info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // finish the cycle after the 2.2 transition, + // and mine two more cycles + for _i in 0..25 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + let tip_info = get_chain_info(&conf); + let tip = StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + let mut reward_cycle_pox_addrs = HashMap::new(); + + info!("Last tip height = {}", tip_info.burn_block_height); + + for height in 211..tip_info.burn_block_height { + let reward_cycle = pox_constants + .block_height_to_reward_cycle(burnchain_config.first_block_height, height) + .unwrap(); + + if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { + reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); + } + + let iconn = sortdb.index_conn(); + let pox_addrs = chainstate + .clarity_eval_read_only( + &iconn, + &tip, + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", height), + ) + .expect_optional() + .unwrap() + .expect_tuple() + .get_owned("addrs") + .unwrap() + .expect_list(); + + debug!("Test burnchain height {}", height); + if !burnchain_config.is_in_prepare_phase(height) { + if pox_addrs.len() > 0 { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_0, 1); + } + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_1, 1); + } + } + } + } + + let reward_cycle_min = *reward_cycle_pox_addrs.keys().min().unwrap(); + let reward_cycle_max = *reward_cycle_pox_addrs.keys().max().unwrap(); + + let pox_addr_1 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_1).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_2 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_2).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_3 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_3).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let burn_pox_addr = PoxAddress::Standard( + StacksAddress::new( + 26, + Hash160::from_hex("0000000000000000000000000000000000000000").unwrap(), + ), + Some(AddressHashMode::SerializeP2PKH), + ); + + let expected_slots = HashMap::from([ + ( + 21u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 22u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 23u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + // cycle 24 is the first 2.1, it should have pox_2 and pox_3 with equal + // slots (because increase hasn't gone into effect yet) and 2 burn slots + ( + 24, + HashMap::from([ + (pox_addr_2.clone(), 6u64), + (pox_addr_3.clone(), 6), + (burn_pox_addr.clone(), 2), + ]), + ), + // stack-increase has been invoked, and so the reward set is skewed. + // pox_addr_2 should get the majority of slots (~ 67%) + ( + 25, + HashMap::from([ + (pox_addr_2.clone(), 9u64), + (pox_addr_3.clone(), 4), + (burn_pox_addr.clone(), 1), + ]), + ), + // Epoch 2.2 has started, so the reward set should be all burns. + (26, HashMap::from([(burn_pox_addr.clone(), 14)])), + (27, HashMap::from([(burn_pox_addr.clone(), 14)])), + ]); + + for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { + let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + for (pox_addr, slots) in cycle_counts.iter() { + assert_eq!( + *slots, + expected_slots[&reward_cycle][&pox_addr], + "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", + &pox_addr, + reward_cycle, + ); + info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); + } + } + + let mut abort_tested = false; + let blocks = test_observer::get_blocks(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); + if &tx_sender == &spender_addr + && parsed.auth.get_origin_nonce() == aborted_increase_nonce + { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + _ => panic!("Expected aborted_increase_nonce to be a contract call"), + }; + assert_eq!(contract_call.contract_name.as_str(), "pox-2"); + assert_eq!(contract_call.function_name.as_str(), "stack-increase"); + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + assert_eq!(result.to_string(), "(err none)"); + abort_tested = true; + } + } + } + + assert!(abort_tested, "The stack-increase transaction must have been aborted, and it must have been tested in the tx receipts"); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + +#[test] +#[ignore] +/// Verify that it is acceptable to launch PoX-2 at the end of a reward cycle, and set v1 unlock +/// height to be at the start of the subsequent reward cycle. +/// +/// Verify that PoX-1 stackers continue to receive PoX payouts after v1 unlock height, and that +/// PoX-2 stackers only begin receiving rewards at the start of the reward cycle following the one +/// that contains v1 unlock height. +/// +/// Verify that both of the above work even if miners do not mine in the same block as the PoX-2 +/// start height or v1 unlock height (e.g. suppose there's a delay). +/// +/// Verify that pox-2 locked funds unlock in Epoch-2.2 +/// +fn pox_2_unlock_all() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 5; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 222; + let v1_unlock_height = epoch_2_1 + 1; + let epoch_2_2 = 239; // one block before a prepare phase + + let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + + let spender_sk = StacksPrivateKey::new(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let spender_2_sk = StacksPrivateKey::new(); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + + let spender_3_sk = StacksPrivateKey::new(); + let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); + + let mut initial_balances = vec![]; + + let spender_1_initial_balance = stacked + 100_000; + let spender_2_initial_balance = stacked + 100_000; + let tx_fee = 3000; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: stacked + 100_000, + }); + + initial_balances.push(InitialBalance { + address: spender_2_addr.clone(), + amount: stacked + 100_000, + }); + + let pox_pubkey_1 = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash_1 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_1) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_2 = Secp256k1PublicKey::from_hex( + "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", + ) + .unwrap(); + let pox_pubkey_hash_2 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_2) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_3 = Secp256k1PublicKey::from_hex( + "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", + ) + .unwrap(); + let pox_pubkey_hash_3 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_3) + .to_bytes() + .to_vec(), + ); + + let (mut conf, _) = neon_integration_test_conf(); + + // we'll manually post a forked stream to the node + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = STACKS_EPOCH_MAX; + epochs.truncate(5); + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + u32::MAX, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // stack right away + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_1 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let pox_addr_tuple_3 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let tx = make_contract_call( + &spender_sk, + 0, + tx_fee, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_1.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.05 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_1); + assert!(sort_height > v1_unlock_height); + + // *now* advance to 2.1 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.1"); + + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_2 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let tx = make_contract_publish( + &spender_sk, + 1, + tx_fee, + "unlock-height", + "(define-public (unlock-height (x principal)) (ok (get unlock-height (stx-account x))))", + ); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_sk, + 2, + tx_fee, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_2_sk, + 0, + tx_fee, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_3.clone(), + Value::UInt(sort_height as u128), + Value::UInt(10), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + + // advance to 3 blocks before 2.2 activation + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_2 - 3 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + let tx = make_contract_call( + &spender_sk, + 3, + tx_fee, + &to_addr(&spender_sk), + "unlock-height", + "unlock-height", + &[spender_addr.clone().into()], + ); + + submit_tx(&http_origin, &tx); + let nonce_of_2_1_unlock_ht_call = 3; + // this mines bitcoin block epoch_2_2 - 2, and causes + // the stacks-node to mine the stacks block which will be included + // in bitcoin block epoch_2_2 - 1, so `nonce_of_2_1_unlock_ht_call` + // will be included in that bitcoin block. + // this will build the last block before 2.2 activates + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + + let tx = make_contract_call( + &spender_sk, + 4, + tx_fee, + &to_addr(&spender_sk), + "unlock-height", + "unlock-height", + &[spender_addr.clone().into()], + ); + + submit_tx(&http_origin, &tx); + let nonce_of_2_2_unlock_ht_call = 4; + + // this mines bitcoin block epoch_2_2 - 1, and causes + // the stacks-node to mine the stacks block which will be included + // in bitcoin block epoch_2_2, so `nonce_of_2_2_unlock_ht_call` + // will be included in that bitcoin block. + // this block activates 2.2 + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + + // this *burn block* is when the unlock occurs + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + + // and this will mine the first block whose parent is the unlock block + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + + let spender_1_account = get_account(&http_origin, &spender_addr); + let spender_2_account = get_account(&http_origin, &spender_2_addr); + + info!("spender_1_account = {:?}", spender_1_account); + info!("spender_2_account = {:?}", spender_1_account); + + assert_eq!( + spender_1_account.balance as u64, + spender_1_initial_balance - stacked - (5 * tx_fee), + "Spender 1 should still be locked" + ); + assert_eq!( + spender_1_account.locked as u64, stacked, + "Spender 1 should still be locked" + ); + assert_eq!( + spender_1_account.nonce, 5, + "Spender 1 should have 4 accepted transactions" + ); + + assert_eq!( + spender_2_account.balance as u64, + spender_2_initial_balance - stacked - (1 * tx_fee), + "Spender 2 should still be locked" + ); + assert_eq!( + spender_2_account.locked as u64, stacked, + "Spender 2 should still be locked" + ); + assert_eq!( + spender_2_account.nonce, 1, + "Spender 2 should have two accepted transactions" + ); + + // and this will mice the bitcoin block containing the first block whose parent has >= unlock burn block + // (which is the criterion for the unlock) + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + + let spender_1_account = get_account(&http_origin, &spender_addr); + let spender_2_account = get_account(&http_origin, &spender_2_addr); + + info!("spender_1_account = {:?}", spender_1_account); + info!("spender_2_account = {:?}", spender_1_account); + + assert_eq!( + spender_1_account.balance, + spender_1_initial_balance as u128 - (5 * tx_fee as u128), + "Spender 1 should be unlocked" + ); + assert_eq!(spender_1_account.locked, 0, "Spender 1 should be unlocked"); + assert_eq!( + spender_1_account.nonce, 5, + "Spender 1 should have 5 accepted transactions" + ); + + assert_eq!( + spender_2_account.balance, + spender_2_initial_balance as u128 - (1 * tx_fee as u128), + "Spender 2 should be unlocked" + ); + assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); + assert_eq!( + spender_2_account.nonce, 1, + "Spender 2 should have two accepted transactions" + ); + + // perform a transfer + let tx = make_stacks_transfer(&spender_sk, 5, tx_fee, &spender_3_addr, 1_000_000); + + info!("Submit stack transfer tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // this wakes up the node to mine the transaction + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + // this block selects the previously mined block + next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + + let spender_1_account = get_account(&http_origin, &spender_addr); + let spender_2_account = get_account(&http_origin, &spender_2_addr); + let spender_3_account = get_account(&http_origin, &spender_3_addr); + + info!("spender_1_account = {:?}", spender_1_account); + info!("spender_2_account = {:?}", spender_1_account); + + assert_eq!( + spender_3_account.balance, 1_000_000, + "Recipient account should have funds" + ); + assert_eq!( + spender_3_account.locked, 0, + "Burn account should be unlocked" + ); + assert_eq!( + spender_3_account.nonce, 0, + "Burn should have no accepted transactions" + ); + + assert_eq!( + spender_1_account.balance, + spender_1_initial_balance as u128 - (6 * tx_fee as u128) - 1_000_000, + "Spender 1 should be unlocked" + ); + assert_eq!(spender_1_account.locked, 0, "Spender 1 should be unlocked"); + assert_eq!( + spender_1_account.nonce, 6, + "Spender 1 should have three accepted transactions" + ); + + assert_eq!( + spender_2_account.balance, + spender_2_initial_balance as u128 - (1 * tx_fee as u128), + "Spender 2 should be unlocked" + ); + assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); + assert_eq!( + spender_2_account.nonce, 1, + "Spender 2 should have two accepted transactions" + ); + + // finish the cycle after the 2.2 transition, + // and mine two more cycles + for _i in 0..10 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + let tip_info = get_chain_info(&conf); + let tip = StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + let mut reward_cycle_pox_addrs = HashMap::new(); + + info!("Last tip height = {}", tip_info.burn_block_height); + + for height in 211..tip_info.burn_block_height { + let reward_cycle = pox_constants + .block_height_to_reward_cycle(burnchain_config.first_block_height, height) + .unwrap(); + + if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { + reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); + } + + let iconn = sortdb.index_conn(); + let pox_addrs = chainstate + .clarity_eval_read_only( + &iconn, + &tip, + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", height), + ) + .expect_optional() + .unwrap() + .expect_tuple() + .get_owned("addrs") + .unwrap() + .expect_list(); + + debug!("Test burnchain height {}", height); + if !burnchain_config.is_in_prepare_phase(height) { + if pox_addrs.len() > 0 { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_0, 1); + } + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_1, 1); + } + } + } + } + + let reward_cycle_min = *reward_cycle_pox_addrs.keys().min().unwrap(); + let reward_cycle_max = *reward_cycle_pox_addrs.keys().max().unwrap(); + + let pox_addr_1 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_1).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_2 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_2).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_3 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_3).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let burn_pox_addr = PoxAddress::Standard( + StacksAddress::new( + 26, + Hash160::from_hex("0000000000000000000000000000000000000000").unwrap(), + ), + Some(AddressHashMode::SerializeP2PKH), + ); + + let expected_slots = HashMap::from([ + (42u64, HashMap::from([(pox_addr_1.clone(), 4u64)])), + (43, HashMap::from([(pox_addr_1.clone(), 4)])), + (44, HashMap::from([(pox_addr_1.clone(), 4)])), + // cycle 45 is the first 2.1, and in the setup of this test, there's not + // enough time for the stackers to begin in this cycle + (45, HashMap::from([(burn_pox_addr.clone(), 4)])), + (46, HashMap::from([(burn_pox_addr.clone(), 4)])), + ( + 47, + HashMap::from([(pox_addr_2.clone(), 2), (pox_addr_3.clone(), 2)]), + ), + // Now 2.2 is active, everything should be a burn. + (48, HashMap::from([(burn_pox_addr.clone(), 4)])), + (49, HashMap::from([(burn_pox_addr.clone(), 4)])), + (50, HashMap::from([(burn_pox_addr.clone(), 4)])), + ]); + + for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { + let cycle_counts = match reward_cycle_pox_addrs.get(&reward_cycle) { + Some(x) => x, + None => { + info!("No reward cycle entry = {}", reward_cycle); + continue; + } + }; + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + for (pox_addr, slots) in cycle_counts.iter() { + assert_eq!( + *slots, + expected_slots[&reward_cycle][&pox_addr], + "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", + &pox_addr, + reward_cycle, + ); + info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); + } + } + + let mut unlock_ht_22_tested = false; + let mut unlock_ht_21_tested = false; + + let blocks = test_observer::get_blocks(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); + if &tx_sender == &spender_addr + && parsed.auth.get_origin_nonce() == nonce_of_2_2_unlock_ht_call + { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + _ => panic!("Expected aborted_increase_nonce to be a contract call"), + }; + assert_eq!(contract_call.contract_name.as_str(), "unlock-height"); + assert_eq!(contract_call.function_name.as_str(), "unlock-height"); + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + assert_eq!(result.to_string(), format!("(ok u{})", epoch_2_2 + 1)); + unlock_ht_22_tested = true; + } + if &tx_sender == &spender_addr + && parsed.auth.get_origin_nonce() == nonce_of_2_1_unlock_ht_call + { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + _ => panic!("Expected aborted_increase_nonce to be a contract call"), + }; + assert_eq!(contract_call.contract_name.as_str(), "unlock-height"); + assert_eq!(contract_call.function_name.as_str(), "unlock-height"); + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + assert_eq!(result.to_string(), format!("(ok u{})", 230 + 60)); + unlock_ht_21_tested = true; + } + } + } + + assert!(unlock_ht_21_tested); + assert!(unlock_ht_22_tested); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + +/// PoX reorg with just one flap. Epoch 2.2 activates during bootup +/// Miner 0 mines and hides the anchor block for cycle 22. +/// Miner 1 mines and hides the anchor block for cycle 23, causing a PoX reorg in miner 0. +/// At the very end, miners stop hiding their blocks, and the test verifies that both miners +/// converge on having anchor blocks for cycles 22 and 24, but not 23. +#[test] +#[ignore] +fn test_pox_reorg_one_flap() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_miners = 2; + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let v1_unlock_height = 152; + let epoch_2_2 = 175; + let v2_unlock_height = epoch_2_2 + 1; + + let (mut conf_template, _) = neon_integration_test_conf(); + let block_time_ms = 10_000; + conf_template.node.mine_microblocks = true; + conf_template.miner.microblock_attempt_time_ms = 2_000; + conf_template.node.wait_time_for_microblocks = 0; + conf_template.node.microblock_frequency = 0; + conf_template.miner.first_attempt_time_ms = 2_000; + conf_template.miner.subsequent_attempt_time_ms = 5_000; + conf_template.burnchain.max_rbf = 1000000; + conf_template.node.wait_time_for_blocks = 1_000; + conf_template.burnchain.pox_2_activation = Some(v1_unlock_height); + + conf_template.node.require_affirmed_anchor_blocks = false; + + // make epoch 2.1 and 2.2 start in the middle of boot-up + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = 101; + epochs[2].start_height = 101; + epochs[2].end_height = 151; + epochs[3].start_height = 151; + epochs[3].end_height = epoch_2_2; + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = STACKS_EPOCH_MAX; + epochs.truncate(5); + conf_template.burnchain.epochs = Some(epochs); + + let privks: Vec<_> = (0..5) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + + let stack_privks: Vec<_> = (0..5) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + + let balances: Vec<_> = privks + .iter() + .map(|privk| { + let addr = to_addr(privk); + InitialBalance { + address: addr.into(), + amount: 30_000_000, + } + }) + .collect(); + + let stack_balances: Vec<_> = stack_privks + .iter() + .map(|privk| { + let addr = to_addr(privk); + InitialBalance { + address: addr.into(), + amount: 2_000_000_000_000_000, + } + }) + .collect(); + + let mut confs = vec![]; + let mut burnchain_configs = vec![]; + let mut blocks_processed = vec![]; + let mut channels = vec![]; + let mut miner_status = vec![]; + + for i in 0..num_miners { + let seed = StacksPrivateKey::new().to_bytes(); + let (mut conf, _) = neon_integration_test_conf_with_seed(seed); + + conf.initial_balances.clear(); + conf.initial_balances.append(&mut balances.clone()); + conf.initial_balances.append(&mut stack_balances.clone()); + + conf.node.mine_microblocks = conf_template.node.mine_microblocks; + conf.miner.microblock_attempt_time_ms = conf_template.miner.microblock_attempt_time_ms; + conf.node.wait_time_for_microblocks = conf_template.node.wait_time_for_microblocks; + conf.node.microblock_frequency = conf_template.node.microblock_frequency; + conf.miner.first_attempt_time_ms = conf_template.miner.first_attempt_time_ms; + conf.miner.subsequent_attempt_time_ms = conf_template.miner.subsequent_attempt_time_ms; + conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; + conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; + conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.node.require_affirmed_anchor_blocks = + conf_template.node.require_affirmed_anchor_blocks; + + // multiple nodes so they must download from each other + conf.miner.wait_for_block_download = true; + + // nodes will selectively hide blocks from one another + conf.node.fault_injection_hide_blocks = true; + + let rpc_port = 41063 + 10 * i; + let p2p_port = 41063 + 10 * i + 1; + conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); + conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); + conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + + confs.push(conf); + } + + let node_privkey_1 = + StacksNode::make_node_private_key_from_seed(&confs[0].node.local_peer_seed); + for i in 1..num_miners { + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + + confs[i].node.set_bootstrap_nodes( + format!( + "{}@{}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex(), + p2p_bind + ), + chain_id, + peer_version, + ); + } + + // use short reward cycles + for i in 0..num_miners { + let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + (1600 * reward_cycle_len - 1).into(), + (1700 * reward_cycle_len).into(), + v1_unlock_height, + v2_unlock_height.try_into().unwrap(), + u32::MAX, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + burnchain_configs.push(burnchain_config); + } + + let mut btcd_controller = BitcoinCoreController::new(confs[0].clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + confs[0].clone(), + None, + Some(burnchain_configs[0].clone()), + None, + ); + + btc_regtest_controller.bootstrap_chain(1); + + // make sure all miners have BTC + for i in 1..num_miners { + let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); + btc_regtest_controller + .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + btc_regtest_controller.bootstrap_chain(1); + btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); + } + + btc_regtest_controller.bootstrap_chain((199 - num_miners) as u64); + + eprintln!("Chain bootstrapped..."); + + for (i, burnchain_config) in burnchain_configs.into_iter().enumerate() { + let mut run_loop = neon::RunLoop::new(confs[i].clone()); + let blocks_processed_arc = run_loop.get_blocks_processed_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); + let this_miner_status = run_loop.get_miner_status(); + + blocks_processed.push(blocks_processed_arc); + channels.push(channel); + miner_status.push(this_miner_status); + + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); + } + + let http_origin = format!("http://{}", &confs[0].node.rpc_bind); + + // give the run loops some time to start up! + for i in 0..num_miners { + wait_for_runloop(&blocks_processed[i as usize]); + } + + // activate miners + eprintln!("\n\nBoot miner 0\n\n"); + loop { + let tip_info_opt = get_chain_info_opt(&confs[0]); + if let Some(tip_info) = tip_info_opt { + eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + if tip_info.stacks_tip_height > 0 { + break; + } + } else { + eprintln!("\n\nWaiting for miner 0...\n\n"); + } + next_block_and_iterate( + &mut btc_regtest_controller, + &blocks_processed[0], + block_time_ms, + ); + } + + for i in 1..num_miners { + eprintln!("\n\nBoot miner {}\n\n", i); + loop { + let tip_info_opt = get_chain_info_opt(&confs[i]); + if let Some(tip_info) = tip_info_opt { + eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + if tip_info.stacks_tip_height > 0 { + break; + } + } else { + eprintln!("\n\nWaiting for miner {}...\n\n", i); + } + next_block_and_iterate( + &mut btc_regtest_controller, + &blocks_processed[i as usize], + 5_000, + ); + } + } + + eprintln!("\n\nBegin transactions\n\n"); + + let pox_pubkey = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey) + .to_bytes() + .to_vec(), + ); + + let sort_height = channels[0].get_sortitions_processed(); + + // make everyone stack + let stacking_txs: Vec<_> = stack_privks + .iter() + .enumerate() + .map(|(_i, pk)| { + make_contract_call( + pk, + 0, + 1360, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(2_000_000_000_000_000 - 30_000_000), + execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + ClarityVersion::Clarity1, + ) + .unwrap() + .unwrap(), + Value::UInt((sort_height + 1) as u128), + Value::UInt(12), + ], + ) + }) + .collect(); + + // keeps the mempool full, and makes it so miners will spend a nontrivial amount of time + // building blocks + let all_txs: Vec<_> = privks + .iter() + .enumerate() + .map(|(i, pk)| make_random_tx_chain(pk, (25 * i) as u64, false)) + .collect(); + + // everyone locks up + let mut cnt = 0; + for tx in stacking_txs { + eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + submit_tx(&http_origin, &tx); + cnt += 1; + } + + // run a reward cycle + let mut at_220 = false; + while !at_220 { + btc_regtest_controller.build_next_block(1); + sleep_ms(block_time_ms); + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + if tip_info.burn_block_height == 220 { + at_220 = true; + } + } + } + + // blast out the rest + let mut cnt = 0; + for tx_chain in all_txs { + for tx in tx_chain { + eprintln!("\n\nSubmit tx {}\n\n", &cnt); + submit_tx(&http_origin, &tx); + cnt += 1; + } + } + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + assert!(tip_info.burn_block_height <= 220); + } + + eprintln!("\n\nBegin mining\n\n"); + + info!("####################### end of cycle ##############################"); + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + info!("####################### end of cycle ##############################"); + + // prevent Stacks at these heights from propagating + env::set_var( + "STACKS_HIDE_BLOCKS_AT_HEIGHT", + "[226,227,228,229,230,236,237,238,239,240,246,247,248,249,250,256,257,258,259,260,266,267,268,269,270,276,277,278,279,280,286,287,288,289,290]" + ); + + // miner 0 mines a prepare phase and confirms a hidden anchor block. + // miner 1 is disabled for these prepare phases + for i in 0..10 { + eprintln!("\n\nBuild block {}\n\n", i); + btc_regtest_controller.build_next_block(1); + sleep_ms(block_time_ms); + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + + if i >= reward_cycle_len - prepare_phase_len - 2 { + signal_mining_blocked(miner_status[1].clone()); + } + } + signal_mining_ready(miner_status[1].clone()); + + info!("####################### end of cycle ##############################"); + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + info!("####################### end of cycle ##############################"); + + // miner 1 mines a prepare phase and confirms a hidden anchor block. + // miner 0 is disabled for this prepare phase + for i in 0..10 { + eprintln!("\n\nBuild block {}\n\n", i); + btc_regtest_controller.build_next_block(1); + sleep_ms(block_time_ms); + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + + if i >= reward_cycle_len - prepare_phase_len - 2 { + signal_mining_blocked(miner_status[0].clone()); + } + } + signal_mining_ready(miner_status[0].clone()); + + info!("####################### end of cycle ##############################"); + let mut max_stacks_tip = 0; + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + + // miner 1's history overtakes miner 0's. + // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle + // 23 and affirmed cycle 22's anchor block's absence. + max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); + } + info!("####################### end of cycle ##############################"); + + // advance to start of next reward cycle + eprintln!("\n\nBuild final block\n\n"); + btc_regtest_controller.build_next_block(1); + sleep_ms(block_time_ms); + + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Tip for miner {}: {:?}", i, &tip_info); + } + + // resume block propagation + env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); + + // wait for all blocks to propagate + eprintln!( + "Wait for all blocks to propagate; stacks tip height is {}", + max_stacks_tip + ); + wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); + + // nodes now agree on stacks affirmation map + for (i, c) in confs.iter().enumerate() { + let tip_info = get_chain_info(&c); + info!("Final tip for miner {}: {:?}", i, &tip_info); + } +} diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs new file mode 100644 index 0000000000..58313947d8 --- /dev/null +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -0,0 +1,631 @@ +// Copyright (C) 2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; +use std::env; +use std::thread; + +use stacks::burnchains::Burnchain; +use stacks::core::STACKS_EPOCH_MAX; +use stacks::vm::types::QualifiedContractIdentifier; + +use crate::config::EventKeyType; +use crate::config::EventObserverConfig; +use crate::config::InitialBalance; +use crate::neon; +use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::tests::neon_integrations::*; +use crate::tests::*; +use crate::BitcoinRegtestController; +use crate::BurnchainController; +use stacks::core; + +use stacks::burnchains::PoxConstants; + +use clarity::vm::types::PrincipalData; + +#[test] +#[ignore] +/// Test the trait invocation behavior for contracts instantiated in epoch 2.05 +/// * in epoch 2.1: the trait invocation works +/// * in epoch 2.2: trait invocation is broken, and returns a runtime error, even when wrapped +/// * in epoch 2.3: the trait invocation works +fn trait_invocation_behavior() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 230; + let v1_unlock_height = 231; + let epoch_2_2 = 235; + let epoch_2_3 = 241; + + let spender_sk = StacksPrivateKey::new(); + let contract_addr = to_addr(&spender_sk); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let impl_contract_id = + QualifiedContractIdentifier::new(contract_addr.clone().into(), "impl-simple".into()); + + let mut spender_nonce = 0; + let fee_amount = 10_000; + + let mut initial_balances = vec![]; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1_000_000, + }); + + let trait_contract = "(define-trait simple-method ((foo (uint) (response uint uint)) ))"; + let impl_contract = + "(impl-trait .simple-trait.simple-method) (define-read-only (foo (x uint)) (ok x))"; + let use_contract = "(use-trait simple .simple-trait.simple-method) + (define-public (call-simple (s )) (contract-call? s foo u0))"; + let invoke_contract = " + (use-trait simple .simple-trait.simple-method) + (define-public (invocation-1) + (contract-call? .use-simple call-simple .impl-simple)) + (define-public (invocation-2 (st )) + (contract-call? .use-simple call-simple st)) + "; + + let wrapper_contract = " + (use-trait simple .simple-trait.simple-method) + (define-public (invocation-1) + (contract-call? .invoke-simple invocation-1)) + (define-public (invocation-2 (st )) + (contract-call? .invoke-simple invocation-2 st)) + "; + + let (mut conf, _) = neon_integration_test_conf(); + + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = epoch_2_3; + epochs[5].start_height = epoch_2_3; + epochs[5].end_height = STACKS_EPOCH_MAX; + epochs.truncate(6); + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + u32::MAX, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // publish contracts right away! + let publish_trait = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "simple-trait", + trait_contract, + ); + + spender_nonce += 1; + + let publish_impl = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "impl-simple", + impl_contract, + ); + + spender_nonce += 1; + + let publish_use = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "use-simple", + use_contract, + ); + + spender_nonce += 1; + + let publish_invoke = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "invoke-simple", + invoke_contract, + ); + + spender_nonce += 1; + + info!("Submit 2.05 txs"); + submit_tx(&http_origin, &publish_trait); + submit_tx(&http_origin, &publish_impl); + submit_tx(&http_origin, &publish_use); + submit_tx(&http_origin, &publish_invoke); + + info!( + "At height = {}, epoch-2.1 = {}", + get_chain_info(&conf).burn_block_height, + epoch_2_1 + ); + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 3 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-1", + &[], + ); + let expected_good_205_1_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_205_2_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines bitcoin block epoch_2_1 - 2, and causes the the + // stacks node to mine the stacks block which will be included in + // epoch_2_1 - 1, so these are the last transactions processed pre-2.1. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-1", + &[], + ); + let expected_good_21_1_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_21_2_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines those transactions into epoch 2.1 + // mine until just before epoch 2.2 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_2 - 3 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-1", + &[], + ); + let expected_good_21_3_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_21_4_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines bitcoin block epoch_2_2 - 2, and causes the the + // stacks node to mine the stacks block which will be included in + // epoch_2_2 - 1, so these are the last transactions processed pre-2.2. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let publish_wrap = make_contract_publish( + &spender_sk, + spender_nonce, + fee_amount, + "wrap-simple", + wrapper_contract, + ); + + spender_nonce += 1; + submit_tx(&http_origin, &publish_wrap); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-1", + &[], + ); + let expected_bad_22_1_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_bad_22_2_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines those transactions into epoch 2.2 + // mine until just before epoch 2.3 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_3 - 3 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // submit invocation txs in epoch 2.2. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-1", + &[], + ); + let expected_bad_22_3_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_bad_22_4_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // this mines bitcoin block epoch_2_3 - 2, and causes the the + // stacks node to mine the stacks block which will be included in + // epoch_2_3 - 1, so these are the last transactions processed pre-2.3. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let tx_3 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-1", + &[], + ); + let expected_good_23_3_nonce = spender_nonce; + spender_nonce += 1; + + let tx_4 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "wrap-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_23_4_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_3); + submit_tx(&http_origin, &tx_4); + + // advance to epoch_2_3 before submitting the next transactions, + // so that they can pass the mempool. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // submit invocation txs. + let tx_1 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-1", + &[], + ); + let expected_good_23_1_nonce = spender_nonce; + spender_nonce += 1; + + let tx_2 = make_contract_call( + &spender_sk, + spender_nonce, + fee_amount, + &contract_addr, + "invoke-simple", + "invocation-2", + &[Value::Principal(impl_contract_id.clone().into())], + ); + let expected_good_23_2_nonce = spender_nonce; + spender_nonce += 1; + + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Total spender txs = {}", spender_nonce); + + let blocks = test_observer::get_blocks(); + + let mut transaction_receipts = Vec::new(); + + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); + if &tx_sender == &spender_addr { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + // only interested in contract calls + _ => continue, + }; + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + + transaction_receipts.push(( + parsed.auth.get_origin_nonce(), + (contract_call.clone(), result), + )); + } + } + } + + transaction_receipts.sort_by_key(|x| x.0); + + let transaction_receipts: HashMap<_, _> = transaction_receipts.into_iter().collect(); + + for tx_nonce in [ + expected_good_205_1_nonce, + expected_good_21_1_nonce, + expected_good_21_3_nonce, + expected_good_23_1_nonce, + ] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "invoke-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); + } + + for tx_nonce in [ + expected_good_205_2_nonce, + expected_good_21_2_nonce, + expected_good_21_4_nonce, + expected_good_23_2_nonce, + ] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "invoke-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); + } + + for tx_nonce in [expected_good_23_3_nonce] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); + } + + for tx_nonce in [expected_good_23_4_nonce] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); + } + + for tx_nonce in [expected_bad_22_1_nonce, expected_bad_22_3_nonce] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(err none)"); + } + + for tx_nonce in [expected_bad_22_2_nonce, expected_bad_22_4_nonce] { + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(err none)"); + } + + for (key, value) in transaction_receipts.iter() { + eprintln!("{} => {} of {}", key, value.0, value.1); + } + + test_observer::clear(); + channel.stop_chains_coordinator(); +} diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs new file mode 100644 index 0000000000..854031eebc --- /dev/null +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -0,0 +1,1350 @@ +// Copyright (C) 2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use crate::tests::neon_integrations::{ + get_account, get_chain_info, get_pox_info, neon_integration_test_conf, next_block_and_wait, + submit_tx, test_observer, wait_for_runloop, +}; +use crate::tests::{make_contract_call, to_addr}; +use clarity::boot_util::boot_code_id; +use clarity::vm::types::PrincipalData; +use clarity::vm::{ClarityVersion, Value}; +use stacks::burnchains::{Burnchain, PoxConstants}; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::boot::RawRewardSetEntry; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; +use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; +use stacks_common::util::hash::{bytes_to_hex, hex_bytes, Hash160}; +use stacks_common::util::secp256k1::Secp256k1PublicKey; +use std::collections::HashMap; +use std::{env, thread}; + +use crate::tests::bitcoin_regtest::BitcoinCoreController; +use crate::{neon, BitcoinRegtestController, BurnchainController}; +use stacks::clarity_cli::vm_execute as execute; +use stacks::core; +use stacks::core::{ + StacksEpoch, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, +}; +use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::consts::STACKS_EPOCH_MAX; +use stacks_common::types::{Address, StacksEpochId}; +use stacks_common::util::sleep_ms; + +#[cfg(test)] +pub fn get_reward_set_entries_at_block( + state: &mut StacksChainState, + burnchain: &Burnchain, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + burn_block_height: u64, +) -> Result, Error> { + state + .get_reward_addresses(burnchain, sortdb, burn_block_height, block_id) + .and_then(|mut addrs| { + addrs.sort_by_key(|k| k.reward_address.bytes()); + Ok(addrs) + }) +} + +#[test] +#[ignore] +/// Verify the buggy stacks-increase behavior that was possible in PoX-2 does not crash the +/// node in Epoch 2.4 +/// +/// Verify that the transition to Epoch 2.4 occurs smoothly even if miners do not mine in the +/// same block as the PoX-3 activation height. +/// +/// Verify the PoX-3 payouts get made to the expected recipients. +fn fix_to_pox_contract() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 230; + let v1_unlock_height = 231; + let epoch_2_2 = 255; // two blocks before next prepare phase. + let epoch_2_3 = 265; + let epoch_2_4 = 280; + let pox_3_activation_height = epoch_2_4; + + let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + + let spender_sk = StacksPrivateKey::new(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let spender_2_sk = StacksPrivateKey::new(); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + + let mut initial_balances = vec![]; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: stacked + increase_by + 100_000, + }); + + initial_balances.push(InitialBalance { + address: spender_2_addr.clone(), + amount: stacked + 100_000, + }); + + let pox_pubkey_1 = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash_1 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_1) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_2 = Secp256k1PublicKey::from_hex( + "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", + ) + .unwrap(); + let pox_pubkey_hash_2 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_2) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_3 = Secp256k1PublicKey::from_hex( + "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", + ) + .unwrap(); + let pox_pubkey_hash_3 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_3) + .to_bytes() + .to_vec(), + ); + + let (mut conf, _) = neon_integration_test_conf(); + + // we'll manually post a forked stream to the node + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = epoch_2_3; + epochs[5].start_height = epoch_2_3; + epochs[5].end_height = epoch_2_4; + epochs[6].start_height = epoch_2_4; + epochs[6].end_height = STACKS_EPOCH_MAX; + epochs.truncate(7); + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + pox_3_activation_height as u32, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // stack right away + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_1 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let pox_addr_tuple_3 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let tx = make_contract_call( + &spender_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_1.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.05 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_1); + assert!(sort_height > v1_unlock_height); + + // *now* advance to 2.1 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.1"); + + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_2 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + let tx = make_contract_call( + &spender_sk, + 1, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + sleep_ms(5_000); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..20 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // invoke stack-increase again, in Epoch-2.2, it should + // runtime abort + let aborted_increase_nonce_2_2 = 2; + let tx = make_contract_call( + &spender_sk, + aborted_increase_nonce_2_2, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-increase", + &[Value::UInt(5000)], + ); + + info!("Submit 2.2 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // transition to epoch 2.3 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_3 + 1 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // invoke stack-increase again, in Epoch-2.3, it should + // runtime abort + let aborted_increase_nonce_2_3 = 3; + let tx = make_contract_call( + &spender_sk, + aborted_increase_nonce_2_3, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-increase", + &[Value::UInt(5000)], + ); + + info!("Submit 2.3 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // transition to 2 blocks before epoch 2.4 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_4 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_4); + + // *now* advance to 2.4 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.4"); + + // now, try stacking in pox-3 + let sort_height = channel.get_sortitions_processed(); + let tx = make_contract_call( + &spender_sk, + 4, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.4 stacking tx to {:?}", &http_origin); + sleep_ms(5_000); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_2_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-stx", + &[ + Value::UInt(stacked.into()), + pox_addr_tuple_3.clone(), + Value::UInt(sort_height as u128), + Value::UInt(10), + ], + ); + + info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..5 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // invoke stack-increase + let tx = make_contract_call( + &spender_sk, + 5, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-increase", + &[Value::UInt(increase_by.into())], + ); + + info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + for _i in 0..19 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + let tip_info = get_chain_info(&conf); + let tip = StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + let mut reward_cycle_pox_addrs = HashMap::new(); + + info!("Last tip height = {}", tip_info.burn_block_height); + + for height in 211..tip_info.burn_block_height { + let reward_cycle = pox_constants + .block_height_to_reward_cycle(burnchain_config.first_block_height, height) + .unwrap(); + + if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { + reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); + } + + let iconn = sortdb.index_conn(); + let pox_addrs = chainstate + .clarity_eval_read_only( + &iconn, + &tip, + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", height), + ) + .expect_optional() + .unwrap() + .expect_tuple() + .get_owned("addrs") + .unwrap() + .expect_list(); + + debug!("Test burnchain height {}", height); + if !burnchain_config.is_in_prepare_phase(height) { + if pox_addrs.len() > 0 { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_0, 1); + } + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_1, 1); + } + } + } + } + + let reward_cycle_min = *reward_cycle_pox_addrs.keys().min().unwrap(); + let reward_cycle_max = *reward_cycle_pox_addrs.keys().max().unwrap(); + + let pox_addr_1 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_1).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_2 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_2).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_3 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_3).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let burn_pox_addr = PoxAddress::Standard( + StacksAddress::new( + 26, + Hash160::from_hex("0000000000000000000000000000000000000000").unwrap(), + ), + Some(AddressHashMode::SerializeP2PKH), + ); + + let expected_slots = HashMap::from([ + ( + 21u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 22u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 23u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + // cycle 24 is the first 2.1, it should have pox_2 and 1 burn slot + ( + 24, + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 25, + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + // Epoch 2.2 has started, so the reward set should be all burns. + (26, HashMap::from([(burn_pox_addr.clone(), 14)])), + // Epoch 2.3 has started, so the reward set should be all burns. + (27, HashMap::from([(burn_pox_addr.clone(), 14)])), + (28, HashMap::from([(burn_pox_addr.clone(), 14)])), + // cycle 29 is the first 2.4 cycle, it should have pox_2 and pox_3 with equal + // slots (because increase hasn't gone into effect yet) + ( + 29, + HashMap::from([ + (pox_addr_2.clone(), 6u64), + (pox_addr_3.clone(), 6), + (burn_pox_addr.clone(), 2), + ]), + ), + // stack-increase has been invoked, but this should not skew reward set heavily + // because pox-3 fixes the total-locked bug + ( + 30, + HashMap::from([ + (pox_addr_2.clone(), 7u64), + (pox_addr_3.clone(), 6), + (burn_pox_addr.clone(), 1), + ]), + ), + ]); + + for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { + let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + for (pox_addr, slots) in cycle_counts.iter() { + assert_eq!( + *slots, + expected_slots[&reward_cycle][&pox_addr], + "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", + &pox_addr, + reward_cycle, + ); + info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); + } + } + + let mut abort_tested_2_2 = false; + let mut abort_tested_2_3 = false; + let blocks = test_observer::get_blocks(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); + if &tx_sender == &spender_addr + && (parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 + || parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3) + { + let contract_call = match &parsed.payload { + TransactionPayload::ContractCall(cc) => cc, + _ => panic!("Expected aborted_increase_nonce to be a contract call"), + }; + assert_eq!(contract_call.contract_name.as_str(), "pox-2"); + assert_eq!(contract_call.function_name.as_str(), "stack-increase"); + let result = Value::try_deserialize_hex_untyped( + tx.get("raw_result").unwrap().as_str().unwrap(), + ) + .unwrap(); + assert_eq!(result.to_string(), "(err none)"); + if parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 { + abort_tested_2_2 = true; + } else if parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3 { + abort_tested_2_3 = true; + } else { + panic!("Unexpected nonce for the aborted stack-increase transaction.") + } + } + } + } + + assert!( + abort_tested_2_2, + "The stack-increase transaction must have been aborted in Epoch 2.2, \ + and it must have been tested in the tx receipts" + ); + assert!( + abort_tested_2_3, + "The stack-increase transaction must have been aborted in Epoch 2.3, \ + and it must have been tested in the tx receipts" + ); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + +#[test] +#[ignore] +/// Verify that stackers that don't meet the stacking threshold get auto-unlocked in PoX-3. +fn verify_auto_unlock_behavior() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let reward_cycle_len = 10; + let prepare_phase_len = 3; + let epoch_2_05 = 215; + let epoch_2_1 = 230; + let v1_unlock_height = 231; + let epoch_2_2 = 255; // two blocks before next prepare phase. + let epoch_2_3 = 265; + let epoch_2_4 = 280; + let pox_3_activation_height = epoch_2_4; + + let first_stacked_init = 200_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let first_stacked_incr = 40_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + let small_stacked = 17_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + + let spender_sk = StacksPrivateKey::new(); + let spender_stx_addr: StacksAddress = to_addr(&spender_sk); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + + let spender_2_sk = StacksPrivateKey::new(); + let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); + + let mut initial_balances = vec![]; + + initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: first_stacked_init + first_stacked_incr + 100_000, + }); + + initial_balances.push(InitialBalance { + address: spender_2_addr.clone(), + amount: small_stacked + 100_000, + }); + + let pox_pubkey_1 = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash_1 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_1) + .to_bytes() + .to_vec(), + ); + + let pox_pubkey_2 = Secp256k1PublicKey::from_hex( + "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", + ) + .unwrap(); + let pox_pubkey_hash_2 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_2) + .to_bytes() + .to_vec(), + ); + let pox_pubkey_2_stx_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![pox_pubkey_2], + ) + .unwrap(); + + let pox_pubkey_3 = Secp256k1PublicKey::from_hex( + "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", + ) + .unwrap(); + let pox_pubkey_hash_3 = bytes_to_hex( + &Hash160::from_node_public_key(&pox_pubkey_3) + .to_bytes() + .to_vec(), + ); + let pox_pubkey_3_stx_addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![pox_pubkey_3], + ) + .unwrap(); + + let (mut conf, _) = neon_integration_test_conf(); + + // we'll manually post a forked stream to the node + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.wait_for_block_download = false; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + conf.initial_balances.append(&mut initial_balances); + + let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); + epochs[1].end_height = epoch_2_05; + epochs[2].start_height = epoch_2_05; + epochs[2].end_height = epoch_2_1; + epochs[3].start_height = epoch_2_1; + epochs[3].end_height = epoch_2_2; + epochs[4].start_height = epoch_2_2; + epochs[4].end_height = epoch_2_3; + epochs[5].start_height = epoch_2_3; + epochs[5].end_height = epoch_2_4; + epochs[6].start_height = epoch_2_4; + epochs[6].end_height = STACKS_EPOCH_MAX; + epochs.truncate(7); + conf.burnchain.epochs = Some(epochs); + + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + u64::max_value() - 2, + u64::max_value() - 1, + v1_unlock_height as u32, + epoch_2_2 as u32 + 1, + pox_3_activation_height as u32, + ); + burnchain_config.pox_constants = pox_constants.clone(); + + let first_v3_cycle = burnchain_config + .block_height_to_reward_cycle(burnchain_config.pox_constants.pox_3_activation_height as u64) + .unwrap() + + 1; + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let runloop_burnchain = burnchain_config.clone(); + + let blocks_processed = run_loop.get_blocks_processed_arc(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // push us to block 205 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // stack right away + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_1 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let pox_addr_tuple_3 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + + let tx = make_contract_call( + &spender_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(first_stacked_init.into()), + pox_addr_tuple_1.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.05 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // wait until just before epoch 2.1 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_1 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_1); + assert!(sort_height > v1_unlock_height); + + // *now* advance to 2.1 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.1"); + + let sort_height = channel.get_sortitions_processed(); + let pox_addr_tuple_2 = execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(); + let tx = make_contract_call( + &spender_sk, + 1, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "stack-stx", + &[ + Value::UInt(first_stacked_init.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.1 stacking tx to {:?}", &http_origin); + sleep_ms(5_000); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..20 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + info!("Successfully transitioned to Epoch 2.2"); + + // transition to epoch 2.3 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_3 + 1 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let pox_info = get_pox_info(&http_origin); + info!( + "curr height: {}, curr cycle id: {}, pox active: {}", + tip_info.burn_block_height, + pox_info.current_cycle.id, + pox_info.current_cycle.is_pox_active + ); + } + + info!("Successfully transitioned to Epoch 2.3"); + + // transition to 2 blocks before epoch 2.4 + loop { + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height >= epoch_2_4 - 2 { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let pox_info = get_pox_info(&http_origin); + info!( + "curr height: {}, curr cycle id: {}, pox active: {}", + tip_info.burn_block_height, + pox_info.current_cycle.id, + pox_info.current_cycle.is_pox_active + ); + } + + // skip a couple sortitions + btc_regtest_controller.bootstrap_chain(4); + sleep_ms(5000); + + let sort_height = channel.get_sortitions_processed(); + assert!(sort_height > epoch_2_4); + + // *now* advance to 2.4 + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + info!("Test passed processing 2.4"); + + // now, try stacking in pox-3 + let sort_height = channel.get_sortitions_processed(); + let tx = make_contract_call( + &spender_sk, + 2, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-stx", + &[ + Value::UInt(first_stacked_init.into()), + pox_addr_tuple_2.clone(), + Value::UInt(sort_height as u128), + Value::UInt(12), + ], + ); + + info!("Submit 2.4 stacking tx to {:?}", &http_origin); + sleep_ms(5_000); + submit_tx(&http_origin, &tx); + + let tx = make_contract_call( + &spender_2_sk, + 0, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-stx", + &[ + Value::UInt(small_stacked.into()), + pox_addr_tuple_3.clone(), + Value::UInt(sort_height as u128), + Value::UInt(10), + ], + ); + + info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + // that it can mine _at all_ is a success criterion + let mut last_block_height = get_chain_info(&conf).burn_block_height; + for _i in 0..5 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // Check the locked balance of addr 1. + let account = get_account(&http_origin, &spender_stx_addr); + assert_eq!(account.locked, first_stacked_init as u128); + + // Check the locked balance of addr 2. + let account = get_account(&http_origin, &spender_2_stx_addr); + assert_eq!(account.locked, small_stacked as u128); + + // Check that the "raw" reward sets for all cycles just contains entries for both addrs + // for the next few cycles. + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + let tip_info = get_chain_info(&conf); + let tip_block_id = + StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let reward_set_entries = get_reward_set_entries_at_block( + &mut chainstate, + &burnchain_config, + sortdb, + &tip_block_id, + tip_info.burn_block_height, + ) + .unwrap(); + + assert_eq!(reward_set_entries.len(), 2); + info!("reward set entries: {:?}", reward_set_entries); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + pox_pubkey_2_stx_addr.bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[0].amount_stacked, + first_stacked_init as u128 + ); + assert_eq!( + reward_set_entries[1].reward_address.bytes(), + pox_pubkey_3_stx_addr.bytes.0.to_vec() + ); + assert_eq!(reward_set_entries[1].amount_stacked, small_stacked as u128); + } + + // invoke stack-increase + let tx = make_contract_call( + &spender_sk, + 3, + 3000, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-3", + "stack-increase", + &[Value::UInt(first_stacked_incr.into())], + ); + + info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + submit_tx(&http_origin, &tx); + + for _i in 0..19 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tip_info = get_chain_info(&conf); + if tip_info.burn_block_height > last_block_height { + last_block_height = tip_info.burn_block_height; + } else { + panic!("FATAL: failed to mine"); + } + } + + // Check that the locked balance of addr 1 has not changed. + let account = get_account(&http_origin, &spender_stx_addr); + assert_eq!( + account.locked, + (first_stacked_init + first_stacked_incr) as u128 + ); + + // Check that addr 2 has no locked tokens at this height (was auto-unlocked). + let account = get_account(&http_origin, &spender_2_stx_addr); + assert_eq!(account.locked, 0); + + let (mut chainstate, _) = StacksChainState::open( + false, + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + let sortdb = btc_regtest_controller.sortdb_mut(); + + // Check that the "raw" reward sets for all cycles just contains entries for the first + // address at the cycle start, since addr 2 was auto-unlocked. + for cycle_number in first_v3_cycle..(first_v3_cycle + 6) { + let tip_info = get_chain_info(&conf); + let tip_block_id = + StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let reward_set_entries = get_reward_set_entries_at_block( + &mut chainstate, + &burnchain_config, + sortdb, + &tip_block_id, + tip_info.burn_block_height, + ) + .unwrap(); + + assert_eq!(reward_set_entries.len(), 1); + assert_eq!( + reward_set_entries[0].reward_address.bytes(), + pox_pubkey_2_stx_addr.bytes.0.to_vec() + ); + assert_eq!( + reward_set_entries[0].amount_stacked, + (first_stacked_init + first_stacked_incr) as u128 + ); + } + + let tip_info = get_chain_info(&conf); + let tip = StacksBlockId::new(&tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip); + + let mut reward_cycle_pox_addrs = HashMap::new(); + + info!("Last tip height = {}", tip_info.burn_block_height); + + for height in 211..tip_info.burn_block_height { + let reward_cycle = pox_constants + .block_height_to_reward_cycle(burnchain_config.first_block_height, height) + .unwrap(); + + if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { + reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); + } + + let iconn = sortdb.index_conn(); + let pox_addrs = chainstate + .clarity_eval_read_only( + &iconn, + &tip, + &boot_code_id("pox-2", false), + &format!("(get-burn-block-info? pox-addrs u{})", height), + ) + .expect_optional() + .unwrap() + .expect_tuple() + .get_owned("addrs") + .unwrap() + .expect_list(); + + if !burnchain_config.is_in_prepare_phase(height) { + if pox_addrs.len() > 0 { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_0, 1); + } + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .insert(pox_addr_1, 1); + } + } + } + } + + let reward_cycle_min = *reward_cycle_pox_addrs.keys().min().unwrap(); + let reward_cycle_max = *reward_cycle_pox_addrs.keys().max().unwrap(); + + let pox_addr_1 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_1).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_2 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_2).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let pox_addr_3 = PoxAddress::Standard( + StacksAddress::new(26, Hash160::from_hex(&pox_pubkey_hash_3).unwrap()), + Some(AddressHashMode::SerializeP2PKH), + ); + let burn_pox_addr = PoxAddress::Standard( + StacksAddress::new( + 26, + Hash160::from_hex("0000000000000000000000000000000000000000").unwrap(), + ), + Some(AddressHashMode::SerializeP2PKH), + ); + + let expected_slots = HashMap::from([ + ( + 21u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 22u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 23u64, + HashMap::from([(pox_addr_1.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + // cycle 24 is the first 2.1, it should have pox_2 and 1 burn slot + ( + 24, + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ( + 25, + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + // Epoch 2.2 has started, so the reward set should be all burns. + (26, HashMap::from([(burn_pox_addr.clone(), 14)])), + // Epoch 2.3 has started, so the reward set should be all burns. + (27, HashMap::from([(burn_pox_addr.clone(), 14)])), + (28, HashMap::from([(burn_pox_addr.clone(), 14)])), + // cycle 29 is the first 2.4 cycle, it should have pox_2 and pox_3 with equal + // slots (because increase hasn't gone into effect yet). + ( + 29, + HashMap::from([ + (pox_addr_2.clone(), 12u64), + (pox_addr_3.clone(), 1), + (burn_pox_addr.clone(), 1), + ]), + ), + // stack-increase has been invoked, which causes spender_addr_2 to be below the stacking + // minimum, and thus they have zero reward addresses in reward cycle 30. + ( + 30, + HashMap::from([(pox_addr_2.clone(), 13u64), (burn_pox_addr.clone(), 1)]), + ), + ]); + + for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { + let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + for (pox_addr, slots) in cycle_counts.iter() { + assert_eq!( + *slots, + expected_slots[&reward_cycle][&pox_addr], + "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", + &pox_addr, + reward_cycle, + ); + info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); + } + } + + test_observer::clear(); + channel.stop_chains_coordinator(); +} diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 4f07c9f9d3..3f3cf1a385 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -31,7 +31,6 @@ use stacks::vm::{ contract_interface_builder::{build_contract_interface, ContractInterface}, mem_type_check, }, - database::ClaritySerializable, types::{QualifiedContractIdentifier, ResponseData, TupleData}, Value, }; @@ -204,6 +203,7 @@ fn integration_test_get_info() { .callbacks .on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let principal_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -220,6 +220,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -233,6 +234,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -246,6 +248,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -280,6 +283,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -303,6 +307,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, tx, @@ -324,6 +329,7 @@ fn integration_test_get_info() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, tx_xfer, @@ -484,7 +490,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize()) + .json(&key.serialize_to_hex()) .send() .unwrap().json::>().unwrap(); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); @@ -499,7 +505,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize()) + .json(&key.serialize_to_hex()) .send() .unwrap().json::>().unwrap(); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); @@ -516,7 +522,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize()) + .json(&key.serialize_to_hex()) .send() .unwrap().json::>().unwrap(); @@ -537,7 +543,7 @@ fn integration_test_get_info() { eprintln!("Test: POST {}", path); let res = client.post(&path) - .json(&key.serialize()) + .json(&key.serialize_to_hex()) .send() .unwrap().json::>().unwrap(); @@ -665,7 +671,7 @@ fn integration_test_get_info() { let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), sponsor: None, - arguments: vec![Value::UInt(3).serialize()] + arguments: vec![Value::UInt(3).serialize_to_hex()] }; let res = client.post(&path) @@ -733,7 +739,7 @@ fn integration_test_get_info() { let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), sponsor: None, - arguments: vec![Value::UInt(3).serialize()] + arguments: vec![Value::UInt(3).serialize_to_hex()] }; let res = client.post(&path) @@ -756,7 +762,7 @@ fn integration_test_get_info() { let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), sponsor: None, - arguments: vec![Value::UInt(100).serialize()] + arguments: vec![Value::UInt(100).serialize_to_hex()] }; let res = client.post(&path) @@ -1094,6 +1100,7 @@ fn contract_stx_transfer() { .callbacks .on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -1116,6 +1123,7 @@ fn contract_stx_transfer() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, xfer_to_contract, @@ -1131,6 +1139,7 @@ fn contract_stx_transfer() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -1151,6 +1160,7 @@ fn contract_stx_transfer() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -1172,6 +1182,7 @@ fn contract_stx_transfer() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, tx, @@ -1196,6 +1207,7 @@ fn contract_stx_transfer() { .mem_pool .submit( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, &xfer_to_contract, @@ -1214,6 +1226,7 @@ fn contract_stx_transfer() { .mem_pool .submit( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, &xfer_to_contract, @@ -1412,6 +1425,7 @@ fn mine_transactions_out_of_order() { .callbacks .on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let sk = StacksPrivateKey::from_hex(SK_3).unwrap(); let header_hash = chain_tip.block.block_hash(); @@ -1432,6 +1446,7 @@ fn mine_transactions_out_of_order() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, xfer_to_contract, @@ -1446,6 +1461,7 @@ fn mine_transactions_out_of_order() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, @@ -1460,6 +1476,7 @@ fn mine_transactions_out_of_order() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, xfer_to_contract, @@ -1474,6 +1491,7 @@ fn mine_transactions_out_of_order() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, xfer_to_contract, @@ -1563,6 +1581,7 @@ fn mine_contract_twice() { .callbacks .on_new_tenure(|round, _burnchain_tip, _chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); if round == 1 { @@ -1577,6 +1596,7 @@ fn mine_contract_twice() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -1648,6 +1668,7 @@ fn bad_contract_tx_rollback() { .callbacks .on_new_tenure(|round, _burnchain_tip, _chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -1673,6 +1694,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, xfer_to_contract, @@ -1691,6 +1713,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, xfer_to_contract, @@ -1705,6 +1728,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, xfer_to_contract, @@ -1719,6 +1743,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -1733,6 +1758,7 @@ fn bad_contract_tx_rollback() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -1959,6 +1985,7 @@ fn block_limit_runtime_test() { .callbacks .on_new_tenure(|round, _burnchain_tip, _chain_tip, tenure| { let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( @@ -1984,6 +2011,7 @@ fn block_limit_runtime_test() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, publish_tx, @@ -2013,6 +2041,7 @@ fn block_limit_runtime_test() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, consensus_hash, block_hash, tx, @@ -2086,6 +2115,7 @@ fn mempool_errors() { .on_new_tenure(|round, _burnchain_tip, chain_tip, tenure| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; @@ -2099,6 +2129,7 @@ fn mempool_errors() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx, diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index 2ee80b3bed..d45a3c01cf 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -19,6 +19,7 @@ use stacks::net::Error as NetError; use stacks::types::chainstate::{BlockHeaderHash, StacksAddress}; use stacks::util::{hash::*, secp256k1::*}; use stacks::vm::costs::ExecutionCost; +use stacks::vm::database::NULL_BURN_STATE_DB; use stacks::vm::{ representations::ContractName, types::PrincipalData, types::QualifiedContractIdentifier, types::StandardPrincipalData, Value, @@ -108,6 +109,7 @@ fn mempool_setup_chainstate() { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; + let sortdb = tenure.open_fake_sortdb(); if round == 1 { eprintln!("Tenure in 1 started!"); @@ -118,6 +120,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx1, @@ -132,6 +135,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx2, @@ -151,6 +155,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx3, @@ -170,6 +175,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx4, @@ -189,6 +195,7 @@ fn mempool_setup_chainstate() { .mem_pool .submit_raw( &mut chainstate_copy, + &sortdb, &consensus_hash, &header_hash, publish_tx4, @@ -230,7 +237,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); let tx_bytes = make_contract_call( @@ -245,14 +258,26 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); let tx_bytes = make_stacks_transfer(&contract_sk, 5, 200, &other_addr, 1000); let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); // bad signature @@ -260,7 +285,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!( @@ -296,7 +327,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); assert!(if let MemPoolRejection::BadAddressVersionByte = e { @@ -319,7 +356,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); assert!(if let MemPoolRejection::BadAddressVersionByte = e { true @@ -332,7 +375,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::FeeTooLow(0, _) = e { @@ -346,7 +395,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::BadNonces(_) = e { @@ -360,7 +415,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { @@ -375,7 +436,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::TransferRecipientIsSender(r) = e { @@ -392,7 +459,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::BadAddressVersionByte = e { @@ -419,7 +492,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::BadTransactionVersion = e { @@ -433,7 +512,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::TransferAmountMustBePositive = e { @@ -447,7 +532,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { @@ -460,7 +551,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NotEnoughFunds(100700, 99500) = e { @@ -481,7 +578,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NoSuchContract = e { @@ -502,7 +605,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NoSuchPublicFunction = e { @@ -523,7 +632,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { @@ -537,7 +652,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::ContractAlreadyExists(_) = e { @@ -566,7 +687,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!( @@ -597,7 +724,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::InvalidMicroblocks = e { @@ -629,7 +762,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!( @@ -644,7 +783,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); eprintln!("Err: {:?}", e); assert!(if let MemPoolRejection::NoCoinbaseViaMempool = e { @@ -696,7 +841,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); let contract_id = QualifiedContractIdentifier::new( @@ -717,7 +868,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap(); let contract_id = QualifiedContractIdentifier::new( @@ -738,7 +895,13 @@ fn mempool_setup_chainstate() { let tx = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let e = chain_state - .will_admit_mempool_tx(consensus_hash, block_hash, &tx, tx_bytes.len() as u64) + .will_admit_mempool_tx( + &NULL_BURN_STATE_DB, + consensus_hash, + block_hash, + &tx, + tx_bytes.len() as u64, + ) .unwrap_err(); assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { true diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index e1cd12ee51..f85970104b 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -41,6 +41,9 @@ mod atlas; mod bitcoin_regtest; mod epoch_205; mod epoch_21; +mod epoch_22; +mod epoch_23; +mod epoch_24; mod integrations; mod mempool; pub mod neon_integrations; @@ -528,11 +531,12 @@ fn should_succeed_mining_valid_txs() { let consensus_hash = chain_tip.metadata.consensus_hash; let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); match round { 1 => { // On round 1, publish the KV contract - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash, PUBLISH_CONTRACT.to_owned(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash, PUBLISH_CONTRACT.to_owned(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -541,7 +545,7 @@ fn should_succeed_mining_valid_txs() { // On round 2, publish a "get:foo" transaction // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 1 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store get-value -e \"foo\" let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a0100b7ff8b6c20c427b4f4f09c1ad7e50027e2b076b2ddc0ab55e64ef5ea3771dd4763a79bc5a2b1a79b72ce03dd146ccf24b84942d675a815819a8b85aa8065dfaa030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -550,7 +554,7 @@ fn should_succeed_mining_valid_txs() { // On round 3, publish a "set:foo=bar" transaction // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 2 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store set-value -e \"foo\" -e \"bar\" let set_foo_bar = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000002000000000000000a010142a01caf6a32b367664869182f0ebc174122a5a980937ba259d44cc3ebd280e769a53dd3913c8006ead680a6e1c98099fcd509ce94b0a4e90d9f4603b101922d030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265097365742d76616c7565000000020d00000003666f6f0d00000003626172"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -559,7 +563,7 @@ fn should_succeed_mining_valid_txs() { // On round 4, publish a "get:foo" transaction // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 3 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store get-value -e \"foo\" let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000003000000000000000a010046c2c1c345231443fef9a1f64fccfef3e1deacc342b2ab5f97612bb3742aa799038b20aea456789aca6b883e52f84a31adfee0bc2079b740464877af8f2f87d2030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -568,7 +572,7 @@ fn should_succeed_mining_valid_txs() { // On round 5, publish a stacks transaction // ./blockstack-cli --testnet token-transfer b1cf9cee5083f421c84d7cb53be5edf2801c3c78d63d53917aee0bdc8bd160ee01 10 0 ST195Q2HPXY576N4CT2A0R94D7DRYSX54A5X3YZTH 1000 let transfer_1000_stx = "80800000000400b71a091b4b8b7661a661c620966ab6573bc2dcd30000000000000000000000000000000a0000393810832bacd44cfc4024980876135de6b95429bdb610d5ce96a92c9ee9bfd81ec77ea0f1748c8515fc9a1589e51d8b92bf028e3e84ade1249682c05271d5b803020000000000051a525b8a36ef8a73548cd0940c248d3b71ecf4a45100000000000003e800000000000000000000000000000000000000000000000000000000000000000000"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(transfer_1000_stx).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(transfer_1000_stx).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ).unwrap(); @@ -811,13 +815,14 @@ fn should_succeed_handling_malformed_and_valid_txs() { let header_hash = chain_tip.block.block_hash(); let consensus_hash = chain_tip.metadata.consensus_hash; let mut chainstate_copy = tenure.open_chainstate(); + let sortdb = tenure.open_fake_sortdb(); match round { 1 => { // On round 1, publish the KV contract let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let publish_contract = make_contract_publish(&contract_sk, 0, 10, "store", STORE_CONTRACT); - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,publish_contract, + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,publish_contract, &ExecutionCost::max_value(), &StacksEpochId::Epoch20, ).unwrap(); @@ -827,7 +832,7 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Will not be mined // ./blockstack-cli contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 1 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store get-value -e \"foo\" let get_foo = "0000000001040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a0101ef2b00e7e55ee5cb7684d5313c7c49680c97e60cb29f0166798e6ffabd984a030cf0a7b919bcf5fa052efd5d9efd96b927213cb3af1cfb8d9c5a0be0fccda64d030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20, ).unwrap(); @@ -837,7 +842,7 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Will not be mined // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 1 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store set-value -e \"foo\" -e \"bar\" let set_foo_bar = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a010093f733efcebe2b239bb22e2e1ed25612547403af66b29282ed1f6fdfbbbf8f7f6ef107256d07947cbb72e165d723af99c447d6e25e7fbb6a92fd9a51c5ef7ee9030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265097365742d76616c7565000000020d00000003666f6f0d00000003626172"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(set_foo_bar).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20, ).unwrap(); @@ -846,7 +851,7 @@ fn should_succeed_handling_malformed_and_valid_txs() { // On round 4, publish a "get:foo" transaction // ./blockstack-cli --testnet contract-call 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 10 1 STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A store get-value -e \"foo\" let get_foo = "8080000000040021a3c334fc0ee50359353799e8b2605ac6be1fe40000000000000001000000000000000a0100b7ff8b6c20c427b4f4f09c1ad7e50027e2b076b2ddc0ab55e64ef5ea3771dd4763a79bc5a2b1a79b72ce03dd146ccf24b84942d675a815819a8b85aa8065dfaa030200000000021a21a3c334fc0ee50359353799e8b2605ac6be1fe40573746f7265096765742d76616c7565000000010d00000003666f6f"; - tenure.mem_pool.submit_raw(&mut chainstate_copy, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), + tenure.mem_pool.submit_raw(&mut chainstate_copy, &sortdb, &consensus_hash, &header_hash,hex_bytes(get_foo).unwrap().to_vec(), &ExecutionCost::max_value(), &StacksEpochId::Epoch20, ).unwrap(); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 2360d7dec2..2be1d739da 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -57,7 +57,6 @@ use stacks::util::hash::{bytes_to_hex, hex_bytes, to_hex}; use stacks::util::secp256k1::Secp256k1PublicKey; use stacks::util::{get_epoch_time_ms, get_epoch_time_secs, sleep_ms}; use stacks::util_lib::boot::boot_code_id; -use stacks::vm::database::ClarityDeserializable; use stacks::vm::types::PrincipalData; use stacks::vm::types::QualifiedContractIdentifier; use stacks::vm::types::StandardPrincipalData; @@ -595,7 +594,7 @@ pub fn submit_tx(http_origin: &str, tx: &Vec) -> String { ); return res; } else { - eprintln!("{}", res.text().unwrap()); + eprintln!("Submit tx error: {}", res.text().unwrap()); panic!(""); } } @@ -1382,7 +1381,7 @@ fn liquid_ustx_integration() { eprintln!("{}", contract_call.function_name.as_str()); if contract_call.function_name.as_str() == "execute" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); let liquid_ustx = parsed.expect_result_ok().expect_u128(); assert!(liquid_ustx > 0, "Should be more liquid ustx than 0"); tested = true; @@ -1813,6 +1812,8 @@ fn stx_delegate_btc_integration_test() { (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4662,7 +4663,7 @@ fn cost_voting_integration() { serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); } else if contract_call.function_name.as_str() == "propose-vote-confirm" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); assert_eq!(parsed.to_string(), "(ok u0)"); tested = true; } @@ -4708,7 +4709,7 @@ fn cost_voting_integration() { eprintln!("{}", contract_call.function_name.as_str()); if contract_call.function_name.as_str() == "confirm-miners" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); assert_eq!(parsed.to_string(), "(err 13)"); tested = true; } @@ -4757,7 +4758,7 @@ fn cost_voting_integration() { eprintln!("{}", contract_call.function_name.as_str()); if contract_call.function_name.as_str() == "confirm-miners" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); assert_eq!(parsed.to_string(), "(ok true)"); tested = true; } @@ -5839,6 +5840,8 @@ fn pox_integration_test() { (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -6010,8 +6013,7 @@ fn pox_integration_test() { eprintln!("{}", contract_call.function_name.as_str()); if contract_call.function_name.as_str() == "stack-stx" { let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = - >::deserialize(&raw_result[2..]); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); // should unlock at height 300 (we're in reward cycle 13, lockup starts in reward cycle // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward // cycle length of 15 blocks, is a burnchain height of 300) @@ -7722,6 +7724,8 @@ fn atlas_stress_integration_test() { } eprintln!("attachment_indexes = {:?}", &attachment_indexes); + let max_request_time_ms = 100; + for (ibh, attachments) in attachment_indexes.iter() { let l = attachments.len(); for i in 0..(l / MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + 1) { @@ -7763,12 +7767,13 @@ fn atlas_stress_integration_test() { let total_time = ts_end.saturating_sub(ts_begin); eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); - // requests should take no more than 20ms + // requests should take no more than max_request_time_ms assert!( - total_time < attempts * 50, - "Atlas inventory request is too slow: {} >= {} * 50", + total_time < attempts * max_request_time_ms, + "Atlas inventory request is too slow: {} >= {} * {}", total_time, - attempts + attempts, + max_request_time_ms ); } @@ -7804,12 +7809,13 @@ fn atlas_stress_integration_test() { let total_time = ts_end.saturating_sub(ts_begin); eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); - // requests should take no more than 40ms + // requests should take no more than max_request_time_ms assert!( - total_time < attempts * 50, - "Atlas chunk request is too slow: {} >= {} * 50", + total_time < attempts * max_request_time_ms, + "Atlas chunk request is too slow: {} >= {} * {}", total_time, - attempts + attempts, + max_request_time_ms ); } } @@ -10472,6 +10478,8 @@ fn test_competing_miners_build_on_same_chain( (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone();