Skip to content
This repository has been archived by the owner on Nov 4, 2024. It is now read-only.

Commit

Permalink
test: lazy install cuda and amdgpu
Browse files Browse the repository at this point in the history
  • Loading branch information
avik-pal committed Jul 7, 2024
1 parent dc89db8 commit ee3338f
Show file tree
Hide file tree
Showing 12 changed files with 57 additions and 43 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ jobs:
${{ runner.os }}-
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
env:
LUXLIB_TEST_GROUP: ${{ matrix.test_group }}
- uses: julia-actions/julia-processcoverage@v1
with:
directories: src,ext
Expand Down Expand Up @@ -137,6 +139,8 @@ jobs:
- uses: julia-actions/julia-downgrade-compat@v1
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
env:
LUXLIB_TEST_GROUP: ${{ matrix.test_group }}
- uses: julia-actions/julia-processcoverage@v1
with:
directories: src,ext
Expand Down
13 changes: 5 additions & 8 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -38,26 +38,26 @@ LuxLibTrackercuDNNExt = ["CUDA", "Tracker", "cuDNN"]
LuxLibcuDNNExt = ["CUDA", "cuDNN"]

[compat]
AMDGPU = "0.8.4, 0.9"
AMDGPU = "0.9.6"
Aqua = "0.8.7"
ArrayInterface = "7.9"
CUDA = "5.3.2"
ChainRulesCore = "1.23"
ComponentArrays = "0.15.8"
DispatchDoctor = "0.4.7"
EnzymeCore = "0.7"
ExplicitImports = "1.4.1"
ExplicitImports = "1.9.0"
FastBroadcast = "0.2.8, 0.3"
FastClosures = "0.3.2"
ForwardDiff = "0.10.36"
GPUArraysCore = "0.1.6"
LinearAlgebra = "1.10"
LuxCUDA = "0.3.2"
LuxCore = "0.1.13"
LuxDeviceUtils = "0.1.23"
LuxTestUtils = "0.1.15"
Markdown = "1.10"
NNlib = "0.9.13"
Pkg = "1.10"
Random = "1.10"
ReTestItems = "1.23.1"
Reexport = "1"
Expand All @@ -71,22 +71,19 @@ cuDNN = "1.3"
julia = "1.10"

[extras]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
LuxCUDA = "d0bbae9a-e099-4d5b-a835-1c6931763bda"
LuxDeviceUtils = "34f89e08-e1d5-43b4-8944-0b49ac560553"
LuxTestUtils = "ac9de150-d08f-4546-94fb-7472b5760531"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823"
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"

[targets]
test = ["AMDGPU", "Aqua", "CUDA", "ComponentArrays", "ExplicitImports", "ForwardDiff", "LuxCUDA", "LuxDeviceUtils", "LuxTestUtils", "ReTestItems", "ReverseDiff", "StableRNGs", "Test", "Tracker", "Zygote", "cuDNN"]
test = ["Aqua", "ComponentArrays", "ExplicitImports", "ForwardDiff", "LuxDeviceUtils", "LuxTestUtils", "Pkg", "ReTestItems", "ReverseDiff", "StableRNGs", "Test", "Tracker", "Zygote"]
2 changes: 1 addition & 1 deletion test/batchnorm_tests.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
@testitem "Batch Normalization" tags=[:normalization] setup=[SharedTestSetup] timeout=3600 begin
rng = get_stable_rng(12345)
rng = StableRNG(12345)

function _setup_batchnorm(aType, T, sz; affine::Bool=true, track_stats::Bool)
x = __generate_fixed_array(T, sz) |> aType
Expand Down
6 changes: 3 additions & 3 deletions test/conv_tests.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
@testitem "Fused Conv Bias Activation" tags=[:common_ops] setup=[SharedTestSetup] begin
rng = get_stable_rng(12345)
rng = StableRNG(12345)

_expand(N, i::Tuple) = i
_expand(N, i::Integer) = ntuple(_ -> i, N)
Expand Down Expand Up @@ -64,7 +64,7 @@
__f = (σ, w, x, b, cdims) -> sum(
abs2, fused_conv_bias_activation(σ, w, x, b, cdims))

if mode != "AMDGPU" && activation !== anonact
if mode != "amdgpu" && activation !== anonact
@inferred Zygote.gradient(__f, activation, weight, x, bias, cdims)
else
try
Expand All @@ -74,7 +74,7 @@
@test_broken false
end
end
if mode === "AMDGPU"
if mode === "amdgpu"
@eval @test_gradients $__f $activation $weight $x $bias $cdims gpu_testing=$on_gpu soft_fail=$fp16 atol=$atol rtol=$rtol skip_tracker=true skip_finite_differences=$(Tx !=
Tw)
else
Expand Down
2 changes: 1 addition & 1 deletion test/dense_tests.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
@testitem "Fused Dense Bias Activation" tags=[:common_ops] setup=[SharedTestSetup] begin
rng = get_stable_rng(12345)
rng = StableRNG(12345)

@testset "$mode" for (mode, aType, on_gpu) in MODES
# These are not all possible combinations but rather a representative set to keep
Expand Down
14 changes: 7 additions & 7 deletions test/dropout_tests.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
@testitem "Dropout" tags=[:common_ops] setup=[SharedTestSetup] begin
using Statistics

rng = get_stable_rng(12345)
rng = StableRNG(12345)

@testset "$mode" for (mode, aType, on_gpu) in MODES
for T in (Float16, Float32, Float64),
x_shape in ((2, 3), (2, 2, 3), (2, 2, 3, 1), (2, 2, 1, 3, 1))

T === Float16 && mode == "AMDGPU" && continue
T === Float16 && mode == "amdgpu" && continue

x = randn(rng, T, x_shape) |> aType

Expand Down Expand Up @@ -42,13 +42,13 @@ end
@testitem "Dropout with Preset Mask" tags=[:common_ops] setup=[SharedTestSetup] begin
using Statistics

rng = get_stable_rng(12345)
rng = StableRNG(12345)

@testset "$mode" for (mode, aType, on_gpu) in MODES
for T in (Float16, Float32, Float64),
x_shape in ((2, 3), (2, 2, 3), (2, 2, 3, 1), (2, 2, 1, 3, 1))

T === Float16 && mode == "AMDGPU" && continue
T === Float16 && mode == "amdgpu" && continue

x = randn(rng, T, x_shape) |> aType
mask = rand(T, x_shape) |> aType
Expand Down Expand Up @@ -132,13 +132,13 @@ end
@testitem "Alpha Dropout" tags=[:common_ops] setup=[SharedTestSetup] begin
using Statistics

rng = get_stable_rng(12345)
rng = StableRNG(12345)

@testset ExtendedTestSet "$mode" for (mode, aType, on_gpu) in MODES
@testset "$mode" for (mode, aType, on_gpu) in MODES
for T in (Float16, Float32, Float64),
x_shape in ((2, 3), (2, 2, 3), (2, 2, 3, 1), (2, 2, 1, 3, 1))

T === Float16 && mode == "AMDGPU" && continue
T === Float16 && mode == "amdgpu" && continue

x = randn(rng, T, x_shape) |> aType

Expand Down
2 changes: 1 addition & 1 deletion test/forwarddiff_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ end
@testitem "ForwardDiff dropout" tags=[:common_ops] setup=[SharedTestSetup] begin
using ForwardDiff

rng = get_stable_rng(12345)
rng = StableRNG(12345)

@testset "$mode: dropout" for (mode, aType, on_gpu) in MODES
x = randn(rng, Float32, 10, 2) |> aType
Expand Down
2 changes: 1 addition & 1 deletion test/groupnorm_tests.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
@testitem "Group Normalization" tags=[:normalization] setup=[SharedTestSetup] timeout=3600 begin
rng = get_stable_rng(12345)
rng = StableRNG(12345)

function _setup_groupnorm(aType, T, sz, groups)
x = __generate_fixed_array(T, sz) |> aType
Expand Down
2 changes: 1 addition & 1 deletion test/instancenorm_tests.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
@testitem "Instance Normalization" tags=[:normalization] setup=[SharedTestSetup] timeout=3600 begin
using Statistics

rng = get_stable_rng(12345)
rng = StableRNG(12345)

function _setup_instancenorm(aType, T, sz; affine::Bool=true)
x = __generate_fixed_array(T, sz) |> aType
Expand Down
3 changes: 1 addition & 2 deletions test/qa_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
end

@testitem "Explicit Imports" tags=[:others] begin
import cuDNN, CUDA, ForwardDiff, ReverseDiff, Tracker, AMDGPU, NNlib

import ForwardDiff, ReverseDiff, Tracker, NNlib
using ExplicitImports

@test check_no_implicit_imports(LuxLib) === nothing
Expand Down
16 changes: 15 additions & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,18 @@
using ReTestItems
using ReTestItems, Pkg

const BACKEND_GROUP = lowercase(get(ENV, "BACKEND_GROUP", "all"))
const EXTRA_PKGS = String[]

(BACKEND_GROUP == "all" || BACKEND_GROUP == "cuda") && push!(EXTRA_PKGS, "LuxCUDA")
(BACKEND_GROUP == "all" || BACKEND_GROUP == "amdgpu") && push!(EXTRA_PKGS, "AMDGPU")

if !isempty(EXTRA_PKGS)
@info "Installing Extra Packages for testing" EXTRA_PKGS=EXTRA_PKGS
Pkg.add(EXTRA_PKGS)
Pkg.update()
Base.retry_load_extensions()
Pkg.instantiate()
end

const LUXLIB_TEST_GROUP = get(ENV, "LUXLIB_TEST_GROUP", "all")
@info "Running tests for group: $LUXLIB_TEST_GROUP"
Expand Down
34 changes: 17 additions & 17 deletions test/shared_testsetup.jl
Original file line number Diff line number Diff line change
@@ -1,38 +1,38 @@
@testsetup module SharedTestSetup
import Reexport: @reexport

using LuxLib, LuxCUDA, AMDGPU
using LuxDeviceUtils
using LuxLib, LuxDeviceUtils
@reexport using LuxTestUtils, StableRNGs, Test, Zygote
import LuxTestUtils: @jet, @test_gradients, check_approx

const BACKEND_GROUP = get(ENV, "BACKEND_GROUP", "All")
const BACKEND_GROUP = lowercase(get(ENV, "BACKEND_GROUP", "All"))

cpu_testing() = BACKEND_GROUP == "All" || BACKEND_GROUP == "CPU"
if BACKEND_GROUP == "all" || BACKEND_GROUP == "cuda"
using LuxCUDA
end

if BACKEND_GROUP == "all" || BACKEND_GROUP == "amdgpu"
using AMDGPU
end

cpu_testing() = BACKEND_GROUP == "all" || BACKEND_GROUP == "cpu"
function cuda_testing()
return (BACKEND_GROUP == "All" || BACKEND_GROUP == "CUDA") &&
return (BACKEND_GROUP == "all" || BACKEND_GROUP == "cuda") &&
LuxDeviceUtils.functional(LuxCUDADevice)
end
function amdgpu_testing()
return (BACKEND_GROUP == "All" || BACKEND_GROUP == "AMDGPU") &&
return (BACKEND_GROUP == "all" || BACKEND_GROUP == "amdgpu") &&
LuxDeviceUtils.functional(LuxAMDGPUDevice)
end

const MODES = begin
# Mode, Array Type, GPU?
cpu_mode = ("CPU", Array, false)
cuda_mode = ("CUDA", CuArray, true)
amdgpu_mode = ("AMDGPU", ROCArray, true)

modes = []
cpu_testing() && push!(modes, cpu_mode)
cuda_testing() && push!(modes, cuda_mode)
amdgpu_testing() && push!(modes, amdgpu_mode)
cpu_testing() && push!(modes, ("cpu", Array, false))
cuda_testing() && push!(modes, ("cuda", CuArray, true))
amdgpu_testing() && push!(modes, ("amdgpu", ROCArray, true))
modes
end

get_stable_rng(seed=12345) = StableRNG(seed)

__istraining(::Val{training}) where {training} = training

@inline __generate_fixed_array(::Type{T}, sz...) where {T} = __generate_fixed_array(T, sz)
Expand All @@ -41,6 +41,6 @@ __istraining(::Val{training}) where {training} = training
end
@inline __generate_fixed_array(::Type{T}, sz::Int) where {T} = T.(collect(1:sz) ./ sz)

export cpu_testing, cuda_testing, amdgpu_testing, MODES, get_stable_rng, __istraining,
export cpu_testing, cuda_testing, amdgpu_testing, MODES, StableRNG, __istraining,
check_approx, @jet, @test_gradients, __generate_fixed_array
end

0 comments on commit ee3338f

Please sign in to comment.