Skip to content

Commit

Permalink
update test for switching gpu backend
Browse files Browse the repository at this point in the history
  • Loading branch information
chengchingwen committed Apr 26, 2024
1 parent f5c1904 commit bc28f14
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 37 deletions.
2 changes: 1 addition & 1 deletion test/collapseddims.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
if !USE_CUDA
if !USE_GPU
@testset "CollapsedDim" begin
using NeuralAttentionlib.Matmul
using NeuralAttentionlib: collapseddims_fdim1, collapseddims_nonbatch, collapseddims_nonbatch_fdim1
Expand Down
12 changes: 6 additions & 6 deletions test/functional.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
layer_norm, rms_layer_norm, get_sincos_position_embeddings

@testset "score" begin
if !USE_CUDA
if !USE_GPU
@testset "AD" begin
test_rrule(dot_product_score, randn(5, 3, 2), randn(5, 4, 2); check_inferred = false)
test_rrule(dot_product_score, randn(5, 3, 2, 2), randn(5, 4, 2, 2))
Expand Down Expand Up @@ -84,7 +84,7 @@
end
end

if !USE_CUDA
if !USE_GPU
@testset "AD" begin
test_rrule(
scalar_relative_position_embedding, t5_bucketed_position_id(8, 20), randn(3, 8),
Expand Down Expand Up @@ -182,7 +182,7 @@
@test with_rotary_position_embedding(x) naive_rotary_pe(x)
@test with_rotary_position_embedding(256, x) naive_rotary_pe_w_dim(256, x)
@test with_rotary_position_embedding(256)(x) naive_rotary_pe_w_dim(256, x)
if !USE_CUDA
if !USE_GPU
@testset "AD" begin
x = randn(512, 5, 3, 2)
@test Zygote.gradient(x->sum(sin.(with_rotary_position_embedding(x))), x)[1]
Expand Down Expand Up @@ -226,7 +226,7 @@
atol = 5e-1
)

if !USE_CUDA
if !USE_GPU
@testset "AD" begin
g = randn(20)
b = randn(20)
Expand Down Expand Up @@ -258,7 +258,7 @@

@testset "attention" begin
@testset "multihead_qkv_attention" begin
if !USE_CUDA
if !USE_GPU
@testset "AD" begin
for i = 1:3
a = randn(20, 3, 2)
Expand Down Expand Up @@ -296,7 +296,7 @@
@test grad[2] ngrad[2]
@test grad[3] ngrad[3]

if !USE_CUDA
if !USE_GPU
@testset "AD" begin
for i = 1:3
a = randn(30, 3, 2)
Expand Down
2 changes: 1 addition & 1 deletion test/mask.jl
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@
@test_throws DimensionMismatch drandn(5, 4) .* (GenericAttenMask(drand(Bool, 3, 4)) | SymLengthMask([2]))
end

if !USE_CUDA
if !USE_GPU
@testset "AD" begin
m = (LocalMask(1) | CausalMask() & !(BandPartMask(5,5)) | BiLengthMask([2,3], [3, 7]))

Expand Down
4 changes: 2 additions & 2 deletions test/matmul.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
end
uwcs(x) = size(unwrap_collapse(x))

if USE_CUDA
if USE_GPU
eltype_list = (Float64, Float32, Float16, ComplexF64, ComplexF32)
else
eltype_list = (Float64, Float32, ComplexF64, ComplexF32)
Expand Down Expand Up @@ -178,7 +178,7 @@
end
end

if !USE_CUDA
if !USE_GPU
@testset "AD" begin
test_rrule(matmul, randn(7,6,5), randn(6, 2), randn())
test_rrule(matmul, randn(7,6,5,4), randn(6), randn())
Expand Down
53 changes: 26 additions & 27 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,40 +24,39 @@ include("old_impl/old_impl.jl")
using .Old_Impl
using .Old_Impl: batched_triu!, batched_tril!

function should_test_cuda()
e = get(ENV, "JL_PKG_TEST_CUDA", false)
e isa Bool && return e
function testing_gpu()
e = get(ENV, "JL_PKG_TEST_GPU", nothing)
isnothing(e) && return nothing
if e isa String
x = tryparse(Bool, e)
return isnothing(x) ? false : x
else
return false
x = lowercase(e)
if isempty(x)
return nothing
elseif x == "cuda"
return :cuda
elseif x == "amdgpu"
return :amdgpu
end
end
error("Unknown value for `JL_PKG_TEST_GPU`: $x")
end

function should_test_amdgpu()
e = get(ENV, "JL_PKG_TEST_AMDGPU", false)
e isa Bool && return e
if e isa String
x = tryparse(Bool, e)
return isnothing(x) ? false : x
else
return false
const GPUBACKEND = testing_gpu()
if isnothing(GPUBACKEND)
const USE_GPU = false
else
const USE_GPU = true
if GPUBACKEND == :cuda
using CUDA
CUDA.allowscalar(false)
elseif GPUBACKEND == :amdgpu
using AMDGPU
AMDGPU.allowscalar(false)
end
end
@show GPUBACKEND
@show USE_GPU

const USE_CUDA = @show should_test_cuda()
const USE_AMDGPU = @show should_test_amdgpu()

if USE_CUDA
CUDA.allowscalar(false)
end

if USE_AMDGPU
AMDGPU.allowscalar(false)
end

device(x) = USE_CUDA || USE_AMDGPU ? gpu(x) : x
device(x) = USE_GPU ? gpu(x) : x

drandn(arg...) = randn(arg...) |> device
drand(arg...) = rand(arg...) |> device
Expand Down

0 comments on commit bc28f14

Please sign in to comment.