-
Notifications
You must be signed in to change notification settings - Fork 101
159 lines (151 loc) · 5.93 KB
/
rust.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
name: Rust
on:
merge_group:
push:
branches: [main, dev]
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
branches: [main, dev]
env:
CARGO_TERM_COLOR: always
# Disable incremental compilation.
#
# Incremental compilation is useful as part of an edit-build-test-edit cycle,
# as it lets the compiler avoid recompiling code that hasn't changed. However,
# on CI, we're not making small edits; we're almost always building the entire
# project from scratch. Thus, incremental compilation on CI actually
# introduces *additional* overhead to support making future builds
# faster...but no future builds will ever occur in any given CI environment.
#
# See https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
# for details.
CARGO_INCREMENTAL: 0
# Allow more retries for network requests in cargo (downloading crates) and
# rustup (installing toolchains). This should help to reduce flaky CI failures
# from transient network timeouts or other issues.
CARGO_NET_RETRY: 10
RUSTUP_MAX_RETRIES: 10
# Don't emit giant backtraces in the CI logs.
RUST_BACKTRACE: short
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os:
- ubuntu-latest
fail-fast: false
env:
RUSTFLAGS: -D warnings
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
# make sure benches don't bit-rot
- name: build benches
# TODO: --all-features
run: cargo build --benches --release
- name: cargo test
# TODO: --all-features
run: |
cargo nextest run --release
- name: Doctests
run: |
cargo test --doc
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
components: rustfmt, clippy
- uses: Swatinem/rust-cache@v2
# See '.cargo/config' for list of enabled/disabled clippy lints
- name: rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all --check
- name: cargo clippy
run: cargo xclippy -D warnings
msrv:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install rustup
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- name: Install cargo-msrv
run: cargo install cargo-msrv
- name: Check Rust MSRV
run: cargo msrv verify
# Runs the test suite on a self-hosted GPU machine with CUDA enabled
test-cuda:
name: Rust tests on CUDA
runs-on: self-hosted
env:
NVIDIA_VISIBLE_DEVICES: all
NVIDIA_DRIVER_CAPABILITITES: compute,utility
EC_GPU_FRAMEWORK: cuda
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
# Check we have access to the machine's Nvidia drivers
- run: nvidia-smi
# The `compute`/`sm` number corresponds to the Nvidia GPU architecture
# In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable
# See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/
- name: Set env for CUDA compute
run: echo "CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader | sed 's/\.//g')" >> $GITHUB_ENV
- name: set env for EC_GPU
run: echo 'EC_GPU_CUDA_NVCC_ARGS=--fatbin --gpu-architecture=sm_${{ env.CUDA_ARCH }} --generate-code=arch=compute_${{ env.CUDA_ARCH }},code=sm_${{ env.CUDA_ARCH }}' >> $GITHUB_ENV
- run: echo "${{ env.EC_GPU_CUDA_NVCC_ARGS}}"
# Check that CUDA is installed with a driver-compatible version
# This must also be compatible with the GPU architecture, see above link
- run: nvcc --version
- name: CUDA tests
run: |
cargo nextest run --release --no-default-features --features cuda,pasta,bls,arity2,arity4,arity8,arity11,arity16,arity24,arity36
# Runs the test suite on a self-hosted GPU machine with CUDA and OpenCL enabled (that is using the OpenCL backend for NVIDIA GPUs)
test-opencl:
name: Rust tests on OpenCL
runs-on: self-hosted
env:
NVIDIA_VISIBLE_DEVICES: all
NVIDIA_DRIVER_CAPABILITITES: compute,utility
EC_GPU_FRAMEWORK: opencl
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
- name: Install GPU deps
run: |
apt-get update
apt-get -y install ocl-icd-opencl-dev
# Check we have access to the machine's Nvidia drivers
- run: nvidia-smi
# The `compute`/`sm` number corresponds to the Nvidia GPU architecture
# In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable
# See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/
- name: Set env for CUDA compute
run: echo "CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader | sed 's/\.//g')" >> $GITHUB_ENV
- name: set env for EC_GPU
run: echo 'EC_GPU_CUDA_NVCC_ARGS=--fatbin --gpu-architecture=sm_${{ env.CUDA_ARCH }} --generate-code=arch=compute_${{ env.CUDA_ARCH }},code=sm_${{ env.CUDA_ARCH }}' >> $GITHUB_ENV
- run: echo "${{ env.EC_GPU_CUDA_NVCC_ARGS}}"
# Check that CUDA is installed with a driver-compatible version
# This must also be compatible with the GPU architecture, see above link
- run: nvcc --version
# Check that we can access the OpenCL headers
- run: clinfo
- name: OpenCL tests
run: |
cargo nextest run --release --all-features