diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 141b18292..7e3011fa1 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -48,11 +48,13 @@ matrix: setup: version: - # - "1.6" - # - "1.7" - # - "1.8" - # - "1.9" + - "1.6" + - "1.7" + - "1.8" + - "1.9" - "1.10" + concurrency: 1 + concurrency_group: mpi_cuda plugins: - JuliaCI/julia#v1: version: "{{matrix.version}}" @@ -104,7 +106,7 @@ key: "rocm-build-openmpi" agents: queue: "juliagpu" - rocm: "*" # todo fix ROCM version + rocm: "*" env: OPENMPI_VER: "5.0" OPENMPI_VER_FULL: "5.0.3" diff --git a/src/environment.jl b/src/environment.jl index b13e673a7..52597c430 100644 --- a/src/environment.jl +++ b/src/environment.jl @@ -324,6 +324,9 @@ provides a mechanism to check, so it will return `false` with other implementati This can be overriden by setting the `JULIA_MPI_HAS_CUDA` environment variable to `true` or `false`. + +!!! note + For OpenMPI or OpenMPI-based implementations you first need to call [Init()](@ref). """ function has_cuda() flag = get(ENV, "JULIA_MPI_HAS_CUDA", nothing) diff --git a/test/runtests.jl b/test/runtests.jl index 99b7d186c..2a2fdd7db 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -38,6 +38,11 @@ using DoubleFloats end CUDA.precompile_runtime() ArrayType = CUDA.CuArray + + @info """ + Running CUDA tests. Ensure that your MPI implementation is + CUDA-aware using `MPI.has_cuda` before reporting issues. + """ elseif backend_name == "AMDGPU" Pkg.add("AMDGPU") ENV["JULIA_MPI_TEST_ARRAYTYPE"] = "ROCArray"