Skip to content

Commit

Permalink
feat: docker & k8s (#12)
Browse files Browse the repository at this point in the history
* feat: docker & k8s

Signed-off-by: Thomas Chataigner <[email protected]>

* feat: wip docker

* feat: wip docker flexible lc

* feat: simplify k8s conf

* feat: refactor aptos proof server to one bin

* feat: one server bin eth + k8s aptos

* feat: ethereum client configuration

* chore: lint

* ci: revise docker publish

* refactor: base review integrated

* chore: lint

* refactor: router for proof server + health check

* refactor: ethereum health check

* refactor: probes

* refactor: multiple routes eth proof server

* refactor: fix compilation

* refactor: accept octet stream

* refactor: change handling request proof server

* refactor: all routes working

* refactor: not using serde json

* refactor: only one request

* refactor: health does not count as increment

* fix: fix middleware

* refactor: working aptos proof_server

* chore: use let-else more effectively (#197)

* refactor: replicas

---------

Signed-off-by: Thomas Chataigner <[email protected]>
Co-authored-by: François Garillot <[email protected]>
  • Loading branch information
tchataigner and huitseeker authored Aug 30, 2024
1 parent b2ba63a commit da581a5
Show file tree
Hide file tree
Showing 45 changed files with 1,634 additions and 803 deletions.
82 changes: 82 additions & 0 deletions .github/workflows/docker-publish.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# Source: https://raw.githubusercontent.com/foundry-rs/foundry/master/.github/workflows/docker-publish.yml
name: docker

on:
workflow_dispatch:
inputs:
light-client:
description: 'aptos or ethereum'
type: choice
options:
- aptos
- ethereum

env:
REGISTRY: ghcr.io
jobs:
container:
runs-on: ubuntu-latest
# https://docs.github.com/en/actions/reference/authentication-in-a-workflow
permissions:
id-token: write
packages: write
contents: read
timeout-minutes: 120
steps:
- name: Checkout repository
id: checkout
uses: actions/checkout@v4

- name: Install Docker BuildX
uses: docker/setup-buildx-action@v2
id: buildx
with:
install: true

# Login against a Docker registry except on PR
# https://github.com/docker/login-action
- name: Log into registry ${{ env.REGISTRY }}
# Ensure this doesn't trigger on PR's
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.REPO_TOKEN }}

# Extract metadata (tags, labels) for Docker
# https://github.com/docker/metadata-action
- name: Extract Docker metadata
id: meta
uses: docker/metadata-action@v4
with:
images: "argumentcomputer/${{ inputs.light-client }}"

# Creates an additional 'latest'
- name: Finalize Docker Metadata
id: docker_tagging
run: |
echo "Neither scheduled nor manual release from main branch. Just tagging as branch name"
echo "docker_tags=argumentcomputer/${{ inputs.light-client }}:${GITHUB_REF##*/}" >> $GITHUB_OUTPUT
# Log docker metadata to explicitly know what is being pushed
- name: Inspect Docker Metadata
run: |
echo "TAGS -> ${{ steps.docker_tagging.outputs.docker_tags }}"
echo "LABELS -> ${{ steps.meta.outputs.labels }}"
# Build and push Docker image
# https://github.com/docker/build-push-action
# https://github.com/docker/build-push-action/blob/master/docs/advanced/cache.md
- name: Build and push Docker image
uses: docker/build-push-action@v3
with:
context: .
file: ./docker/Dockerfile
push: true
tags: ${{ steps.docker_tagging.outputs.docker_tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: |
LIGHT_CLIENT=${{ inputs.light-client }}
10 changes: 10 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,16 @@
# Rust assets
**/target
Cargo.lock

# IDE config
.idea

# Secrets
*secret*
*.env
!.example.env

# Contract assets
ethereum/move/build/*
ethereum/move/gas-profiling/*

16 changes: 16 additions & 0 deletions aptos/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions aptos/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ aptos-temppath = { git = "https://github.com/aptos-labs/aptos-core/", tag = "apt
aptos-types = { git = "https://github.com/aptos-labs/aptos-core/", tag = "aptos-node-v1.14.0" }
aptos-vm = { git = "https://github.com/aptos-labs/aptos-core/", tag = "aptos-node-v1.14.0" }
aptos-vm-genesis = { git = "https://github.com/aptos-labs/aptos-core/", tag = "aptos-node-v1.14.0" }
axum = "0.7.5"
backoff = { version = "0.4.0", features = ["tokio"] }
# From https://github.com/aptos-labs/aptos-core/blob/aptos-node-v1.14.0/Cargo.toml#L485
bcs = { git = "https://github.com/aptos-labs/bcs.git", rev = "d31fab9d81748e2594be5cd5cdf845786a30562d" }
bls12_381 = { git = "https://github.com/argumentcomputer/bls12_381.git", branch = "zkvm" }
Expand Down
41 changes: 21 additions & 20 deletions aptos/README.md
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
## Aptos Light Client

This is a light client for the Aptos blockchain. It is written in Rust and lives in the workspace defined in this
directory.
In this README we will go over a few details that need to be known before hopping into development.
This is a light client for the Aptos blockchain. It is written in Rust and lives in the workspace defined in this directory. In this README we will go over a few details that need to be known before hopping into development.

For a more detailed overview of the Light Client and its components, and how to run and benchmark it, you can refer to
the
mdBook. To read it run:
For a more detailed overview of the Light Client and its components, and how to run and benchmark it, you can refer to the mdBook. To read it run:

```bash
cd docs && \
Expand All @@ -19,31 +15,36 @@ Then navigate to [`localhost:3000`](http://localhost:3000).

The workspace is divided into the following:

- `proof-server`: The server layer on top of the proving library. It exposes a REST API to generate proofs for the
light client.
- `light-client`: The main library that contains the light client implementation. It is in charge of producing proofs
regarding the consensus of the chain and inclusion of some account values in a Merkle Tree.
-
`proof-server`: The server layer on top of the proving library. It exposes a REST API to generate proofs for the light client.
-
`light-client`: The main library that contains the light client implementation. It is in charge of producing proofs regarding the consensus of the chain and inclusion of some account values in a Merkle Tree.
- `core`: The core library that contains the data structures and utilities used by the light client.
- `aptos-programs`: A library that exposes the Sphinx programs used to generate proofs for our light client.*
- `programs/*`: Actual implementations of the Sphinx programs.

## Development

When developing, you might have to update the programs' implementation. The
programs implementations are located in `./programs/*` and the compiled binaries
are located in `./aptos-programs/artifacts`. Currently, artifacts binaries are
generated in two ways:
When developing, you might have to update the programs' implementation. The programs implementations are located in
`./programs/*` and the compiled binaries are located in
`./aptos-programs/artifacts`. Currently, artifacts binaries are generated in two ways:

- Automated: There is a build script located at `./aptos-programs/build.rs` that
will compile all the programs and place them in the `./aptos-programs/artifacts`
- Automated: There is a build script located at
`./aptos-programs/build.rs` that will compile all the programs and place them in the `./aptos-programs/artifacts`
folder. To enable this feature, it is needed to set the environment variable `LC_PROGRAM_AUTOBUILD=1`.
- Manual: You can also compile the programs manually using `make` by running the following
command in the `./aptos-programs` folder:
- Manual: You can also compile the programs manually using `make` by running the following command in the
`./aptos-programs` folder:
```shell
make
```

## Running the Project

To run all the Light Client components, you can either run them manually (refer to [the README in the `proof-server`
crate](./proof-server/README.md))
or leverage our docker files (see [the README in the `docker` folder](../docker/README.md)).

## Benchmarks

For more information about how to run the benchmarks, please refer to the dedicated section of the mdBook. Otherwise,
the READMEs can be found in the [`docs/src/benchmark`](./docs/src/benchmark/overview.md) folder.
For more information about how to run the benchmarks, please refer to the dedicated section of the mdBook. Otherwise, the READMEs can be found in the [
`docs/src/benchmark`](./docs/src/benchmark/overview.md) folder.
4 changes: 2 additions & 2 deletions aptos/docs/src/run/setup_proof_server.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ Now that our deployment machine is properly configured, we can run the secondary
```bash
git clone [email protected]:argumentcomputer/zk-light-clients.git && \
cd zk-light-clients/aptos/proof-server && \
SHARD_BATCH_SIZE=0 RUSTFLAGS="-C target-cpu=native --cfg tokio_unstable -C opt-level=3" cargo run --release --bin server_secondary -- -a <NETWORK_ADDRESS>
SHARD_BATCH_SIZE=0 RUSTFLAGS="-C target-cpu=native --cfg tokio_unstable -C opt-level=3" cargo run --release --bin proof_server -- --mode "single" -a <NETWORK_ADDRESS>
```

## Deploy the primary server
Expand All @@ -48,5 +48,5 @@ Finally, once the primary server is configured in the same fashion, run it:
```bash
git clone [email protected]:argumentcomputer/zk-light-clients.git && \
cd zk-light-clients/aptos/proof-server && \
SHARD_BATCH_SIZE=0 RUSTFLAGS="-C target-cpu=native --cfg tokio_unstable -C opt-level=3" cargo run --release --bin server_primary -- -a <NETWORK_ADDESS> --snd-addr <SECONDARY_SERVER_ADDRESS>
SHARD_BATCH_SIZE=0 RUSTFLAGS="-C target-cpu=native --cfg tokio_unstable -C opt-level=3" cargo run --release --bin proof_server -- --mode "split" -a <NETWORK_ADDESS> --snd-addr <SECONDARY_SERVER_ADDRESS>
```
10 changes: 4 additions & 6 deletions aptos/proof-server/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,8 @@ name = "client"
path = "src/bin/client.rs"

[[bin]]
name = "server_primary"
path = "src/bin/server_primary.rs"

[[bin]]
name = "server_secondary"
path = "src/bin/server_secondary.rs"
name = "proof_server"
path = "src/bin/proof_server.rs"

[dependencies]
# local
Expand All @@ -26,6 +22,8 @@ aptos-lc-core = { path = "../core" }

# workspace
anyhow = { workspace = true }
axum = { workspace = true }
backoff = { workspace = true, features = ["tokio"] }
bcs = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true }
Expand Down
78 changes: 51 additions & 27 deletions aptos/proof-server/benches/proof_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@

use anyhow::anyhow;
use bcs::from_bytes;
use proof_server::error::ClientError;
use proof_server::types::aptos::{AccountInclusionProofResponse, EpochChangeProofResponse};
use proof_server::types::proof_server::{EpochChangeData, InclusionData, Request};
use proof_server::utils::{read_bytes, write_bytes};
use proof_server::types::proof_server::{EpochChangeData, InclusionData, ProvingMode, Request};
use serde::Serialize;
use sphinx_sdk::artifacts::try_install_plonk_bn254_artifacts;
use std::env;
Expand Down Expand Up @@ -337,9 +337,7 @@ async fn bench_proving_inclusion(final_snark: bool) -> Result<ProofData, anyhow:
// Connect to primary server
let primary_address =
env::var("PRIMARY_ADDR").map_err(|_| anyhow::anyhow!("PRIMARY_ADDR not set"))?;
let mut tcp_stream = TcpStream::connect(primary_address)
.await
.map_err(|e| anyhow!(e))?;
let client = reqwest::Client::new();

// Read the binary file
let mut file = File::open(ACCOUNT_INCLUSION_DATA_PATH).map_err(|e| anyhow!(e))?;
Expand All @@ -354,21 +352,35 @@ async fn bench_proving_inclusion(final_snark: bool) -> Result<ProofData, anyhow:
let inclusion_data: InclusionData = account_inclusion_proof_response.into();

// Send the InclusionData as a request payload to the primary server
let request_bytes = if final_snark {
bcs::to_bytes(&Request::SnarkProveInclusion(inclusion_data)).map_err(|e| anyhow!(e))?
let proving_type = if final_snark {
ProvingMode::SNARK
} else {
bcs::to_bytes(&Request::ProveInclusion(inclusion_data)).map_err(|e| anyhow!(e))?
ProvingMode::STARK
};

write_bytes(&mut tcp_stream, &request_bytes)
.await
.map_err(|e| anyhow!(e))?;
let request_bytes = bcs::to_bytes(&Request::ProveInclusion(Box::new((
proving_type,
inclusion_data,
))))
.map_err(|e| anyhow!(e))?;

// Start measuring proving time
let start = Instant::now();

// Measure the time taken to get a response and the size of the response payload
let response_bytes = read_bytes(&mut tcp_stream).await.map_err(|e| anyhow!(e))?;
let response = client
.post(format!("http://{primary_address}/inclusion/proof"))
.header("Accept", "application/octet-stream")
.body(request_bytes)
.send()
.await
.map_err(|err| ClientError::Request {
endpoint: primary_address,
source: err.into(),
})?;

let response_bytes = response
.bytes()
.await
.map_err(|err| ClientError::Internal { source: err.into() })?;

Ok(ProofData {
proving_time: start.elapsed().as_millis(),
Expand All @@ -380,9 +392,7 @@ async fn bench_proving_epoch_change(final_snark: bool) -> Result<ProofData, anyh
// Connect to primary server
let primary_address =
env::var("PRIMARY_ADDR").map_err(|_| anyhow::anyhow!("PRIMARY_ADDR not set"))?;
let mut tcp_stream = TcpStream::connect(primary_address)
.await
.map_err(|e| anyhow!(e))?;
let client = reqwest::Client::new();

// Read the binary file
let mut file = File::open(EPOCH_CHANGE_DATA_PATH).map_err(|e| anyhow!(e))?;
Expand All @@ -394,24 +404,38 @@ async fn bench_proving_epoch_change(final_snark: bool) -> Result<ProofData, anyh
from_bytes(&buffer).map_err(|e| anyhow!(e))?;

// Convert the EpochChangeProofResponse structure into an EpochChangeData structure
let inclusion_data: EpochChangeData = account_inclusion_proof_response.into();
let epoch_change_data: EpochChangeData = account_inclusion_proof_response.into();

// Send the InclusionData as a request payload to the primary server
let request_bytes = if final_snark {
bcs::to_bytes(&Request::SnarkProveEpochChange(inclusion_data)).map_err(|e| anyhow!(e))?
let proving_type = if final_snark {
ProvingMode::SNARK
} else {
bcs::to_bytes(&Request::ProveEpochChange(inclusion_data)).map_err(|e| anyhow!(e))?
ProvingMode::STARK
};

write_bytes(&mut tcp_stream, &request_bytes)
.await
.map_err(|e| anyhow!(e))?;
let request_bytes = bcs::to_bytes(&Request::ProveEpochChange(Box::new((
proving_type,
epoch_change_data,
))))
.map_err(|e| anyhow!(e))?;

// Start measuring proving time
let start = Instant::now();

// Measure the time taken to get a response and the size of the response payload
let response_bytes = read_bytes(&mut tcp_stream).await.map_err(|e| anyhow!(e))?;
let response = client
.post(format!("http://{primary_address}/epoch/proof"))
.header("Accept", "application/octet-stream")
.body(request_bytes)
.send()
.await
.map_err(|err| ClientError::Request {
endpoint: primary_address,
source: err.into(),
})?;

let response_bytes = response
.bytes()
.await
.map_err(|err| ClientError::Internal { source: err.into() })?;

Ok(ProofData {
proving_time: start.elapsed().as_millis(),
Expand Down
Loading

0 comments on commit da581a5

Please sign in to comment.