Skip to content
This repository has been archived by the owner on Nov 4, 2024. It is now read-only.

Commit

Permalink
test: remove deprecated API
Browse files Browse the repository at this point in the history
  • Loading branch information
avik-pal committed Jul 5, 2024
1 parent aa82fa5 commit b8348d3
Show file tree
Hide file tree
Showing 6 changed files with 40 additions and 41 deletions.
14 changes: 6 additions & 8 deletions test/batchnorm_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,18 @@
track_stats in (true, false),
act in (identity, relu, tanh_fast, sigmoid_fast, x -> x^3)

_f = (args...) -> batchnorm(args..., act; epsilon, training, momentum=T(0.9))
_f = (args...) -> batchnorm(args..., training, act, T(0.9), epsilon)

epsilon = T(1e-5)
x, scale, bias, rm, rv = _setup_batchnorm(aType, T, sz; track_stats, affine)

y, nt = batchnorm(
x, scale, bias, rm, rv, act; epsilon, training, momentum=T(0.9))
y, nt = batchnorm(x, scale, bias, rm, rv, training, act, T(0.9), epsilon)

@inferred batchnorm(
x, scale, bias, rm, rv, act; epsilon, training, momentum=T(0.9))
@inferred batchnorm(x, scale, bias, rm, rv, training, act, T(0.9), epsilon)

# Stresses CI too much
T !== Float16 && @jet batchnorm(
x, scale, bias, rm, rv; act, epsilon, training, momentum=T(0.9))
T !== Float16 &&
@jet batchnorm(x, scale, bias, rm, rv, training, act, T(0.9), epsilon)

@test y isa aType{T, length(sz)}
@test size(y) == sz
Expand All @@ -49,7 +47,7 @@
if __istraining(training) && affine
fp16 = T == Float16
__f = (args...) -> sum(first(batchnorm(
x, args..., rm, rv, act; epsilon, training, momentum=T(0.9))))
x, args..., rm, rv, training, act, T(0.9), epsilon)))
skip_fd = act === relu
@eval @test_gradients $__f $scale $bias gpu_testing=$on_gpu soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2 skip_finite_differences=$(skip_fd)
end
Expand Down
40 changes: 20 additions & 20 deletions test/dropout_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,25 +11,25 @@

x = randn(rng, T, x_shape) |> aType

@inferred dropout(rng, x, T(0.5), Val(true); dims=Colon())
@inferred dropout(rng, x, T(0.5), Val(true), T(2), Colon())

y, mask_, rng_ = dropout(rng, x, T(0.5), Val(true); dims=Colon())
y, mask_, rng_ = dropout(rng, x, T(0.5), Val(true), T(2), Colon())

@test y isa aType{T, length(x_shape)}
@test size(y) == x_shape
@test mask_ isa aType{T, length(x_shape)}
@test size(mask_) == x_shape
@test rng != rng_

__f = x -> sum(first(dropout(rng, x, T(0.5), Val(true); dims=Colon())))
__f = x -> sum(first(dropout(rng, x, T(0.5), Val(true), T(2), Colon())))

fp16 = T == Float16
@eval @test_gradients $__f $x atol=1.0f-2 rtol=1.0f-2 soft_fail=$fp16 gpu_testing=$on_gpu
@jet sum(first(dropout(rng, x, T(0.5), Val(true); dims=Colon())))

@inferred dropout(rng, x, T(0.5), Val(true); dims=Colon())
@jet sum(first(dropout(rng, x, T(0.5), Val(true), T(2), Colon())))
@inferred dropout(rng, x, T(0.5), Val(true), T(2), Colon())

y, mask_, rng_ = dropout(rng, x, T(0.5), Val(false); dims=Colon())
y, mask_, rng_ = dropout(rng, x, T(0.5), Val(false), T(2), Colon())

@test y isa aType{T, length(x_shape)}
@test size(y) == x_shape
Expand All @@ -54,10 +54,10 @@ end
mask = rand(T, x_shape) |> aType

# Update mask
@inferred dropout(rng, x, mask, T(0.5), Val(true), Val(true); dims=Colon())
@inferred dropout(rng, x, mask, T(0.5), Val(true), Val(true), T(2), Colon())

y, mask_, rng_ = dropout(
rng, x, mask, T(0.5), Val(true), Val(true); dims=Colon())
rng, x, mask, T(0.5), Val(true), Val(true), T(2), Colon())

@test y isa aType{T, length(x_shape)}
@test size(y) == x_shape
Expand All @@ -67,18 +67,18 @@ end
@test mask != mask_

__f = x -> sum(first(dropout(
rng, x, mask, T(0.5), Val(true), Val(true); dims=Colon())))
rng, x, mask, T(0.5), Val(true), Val(true), T(2), Colon())))

fp16 = T == Float16
@eval @test_gradients $__f $x atol=1.0f-2 rtol=1.0f-2 soft_fail=$fp16 gpu_testing=$on_gpu
@jet sum(first(dropout(
rng, x, mask, T(0.5), Val(true), Val(true); dims=Colon())))
rng, x, mask, T(0.5), Val(true), Val(true), T(2), Colon())))

# Try using mask if possible (possible!!)
@inferred dropout(rng, x, mask, T(0.5), Val(true), Val(false); dims=Colon())
@inferred dropout(rng, x, mask, T(0.5), Val(true), Val(false), T(2), Colon())

y, mask_, rng_ = dropout(
rng, x, mask, T(0.5), Val(true), Val(false); dims=Colon())
rng, x, mask, T(0.5), Val(true), Val(false), T(2), Colon())

@test y isa aType{T, length(x_shape)}
@test size(y) == x_shape
Expand All @@ -88,18 +88,18 @@ end
@test mask == mask_

__f = x -> sum(first(dropout(
rng, x, mask, T(0.5), Val(true), Val(false); dims=Colon())))
rng, x, mask, T(0.5), Val(true), Val(false, T(2), Colon())))
fp16 = T == Float16
@eval @test_gradients $__f $x atol=1.0f-2 rtol=1.0f-2 soft_fail=$fp16 gpu_testing=$on_gpu
@jet sum(first(dropout(
rng, x, mask, T(0.5), Val(true), Val(false); dims=Colon())))
rng, x, mask, T(0.5), Val(true), Val(false, T(2), Colon())))
mask = rand(T, (x_shape[1:(end - 1)]..., 13)) |> aType

# Try using mask if possible (not possible!!)
@inferred dropout(rng, x, mask, T(0.5), Val(true), Val(false); dims=Colon())
@inferred dropout(rng, x, mask, T(0.5), Val(true), Val(false, T(2), Colon())

y, mask_, rng_ = dropout(
rng, x, mask, T(0.5), Val(true), Val(false); dims=Colon())
rng, x, mask, T(0.5), Val(true), Val(false, T(2), Colon())

@test y isa aType{T, length(x_shape)}
@test size(y) == x_shape
Expand All @@ -109,16 +109,16 @@ end
@test mask != mask_

__f = x -> sum(first(dropout(
rng, x, mask, T(0.5), Val(true), Val(false); dims=Colon())))
rng, x, mask, T(0.5), Val(true), Val(false, T(2), Colon())))
fp16 = T == Float16
@eval @test_gradients $__f $x atol=1.0f-2 rtol=1.0f-2 soft_fail=$fp16 gpu_testing=$on_gpu
@jet sum(first(dropout(
rng, x, mask, T(0.5), Val(true), Val(false); dims=Colon())))
rng, x, mask, T(0.5), Val(true), Val(false, T(2), Colon())))
# Testing Mode
@inferred dropout(rng, x, mask, T(0.5), Val(false), Val(false); dims=Colon())
@inferred dropout(rng, x, mask, T(0.5), Val(false), Val(false, T(2), Colon())

y, mask_, rng_ = dropout(
rng, x, mask, T(0.5), Val(false), Val(false); dims=Colon())
rng, x, mask, T(0.5), Val(false), Val(false, T(2), Colon())

@test y isa aType{T, length(x_shape)}
@test size(y) == x_shape
Expand Down
8 changes: 4 additions & 4 deletions test/groupnorm_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,22 @@
groups in (2, 3),
act in (identity, relu, tanh_fast, sigmoid_fast, x -> x^3)

_f = (args...) -> groupnorm(args..., act; groups, epsilon)
_f = (args...) -> groupnorm(args..., groups, act, epsilon)

epsilon = T(1e-5)
x, scale, bias = _setup_groupnorm(aType, T, sz, groups)
y = _f(x, scale, bias)

@inferred groupnorm(x, scale, bias, act; groups, epsilon)
@inferred groupnorm(x, scale, bias, groups, act, epsilon)

# Stresses CI too much
T !== Float16 && @jet groupnorm(x, scale, bias, act; groups, epsilon)
T !== Float16 && @jet groupnorm(x, scale, bias, groups, act, epsilon)

@test y isa aType{T, length(sz)}
@test size(y) == sz

fp16 = T == Float16
__f = (args...) -> sum(groupnorm(x, args..., act; groups, epsilon))
__f = (args...) -> sum(groupnorm(x, args..., groups, act, epsilon))
skip_fd = act === relu
@eval @test_gradients $__f $scale $bias gpu_testing=$on_gpu atol=1.0f-2 rtol=1.0f-2 soft_fail=$fp16 skip_finite_differences=$(skip_fd)
end
Expand Down
11 changes: 6 additions & 5 deletions test/instancenorm_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,16 @@
affine in (true, false),
act in (identity, relu, tanh_fast, sigmoid_fast, x -> x^3)

_f = (args...) -> instancenorm(args..., act; epsilon, training)
_f = (args...) -> instancenorm(args..., training, act, epsilon)

epsilon = T(1e-5)
x, scale, bias = _setup_instancenorm(aType, T, sz; affine)

y, nt = instancenorm(x, scale, bias, act; epsilon, training)
y, nt = instancenorm(x, scale, bias, training, act, epsilon)

@inferred instancenorm(x, scale, bias, training, act, epsilon)
@jet instancenorm(x, scale, bias, training, act, epsilon)

@inferred instancenorm(x, scale, bias, act; epsilon, training)
@jet instancenorm(x, scale, bias, act; epsilon, training)
@test y isa aType{T, length(sz)}
@test size(y) == sz

Expand All @@ -40,7 +41,7 @@
if __istraining(training) && affine
fp16 = T == Float16
__f = (args...) -> sum(first(instancenorm(
x, args..., act; epsilon, training)))
x, args..., training, act, epsilon)))
skip_fd = act === relu
@eval @test_gradients $__f $scale $bias soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2 gpu_testing=$on_gpu skip_finite_differences=$(skip_fd)
end
Expand Down
6 changes: 3 additions & 3 deletions test/layernorm_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@

dims = Colon()
epsilon = T(1e-5)
_f = (args...) -> layernorm(args..., act; dims, epsilon)
_f = (args...) -> layernorm(args..., act, dims, epsilon)

x, scale, bias = _setup_layernorm(aType, T, x_shape, affine_shape)

@inferred layernorm(x, scale, bias, act; dims, epsilon)
@jet layernorm(x, scale, bias, act; dims, epsilon)
@inferred layernorm(x, scale, bias, act, dims, epsilon)
@jet layernorm(x, scale, bias, act, dims, epsilon)

y = _f(x, scale, bias)

Expand Down
2 changes: 1 addition & 1 deletion test/qa_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ end
using ExplicitImports

@test check_no_implicit_imports(LuxLib) === nothing
@test check_no_stale_explicit_imports(LuxLib, ignore=(:TrackedVector,)) === nothing
@test check_no_stale_explicit_imports(LuxLib; ignore=(:TrackedVector,)) === nothing
@test check_no_self_qualified_accesses(LuxLib) === nothing
@test check_all_explicit_imports_via_owners(LuxLib) === nothing
@test check_all_qualified_accesses_via_owners(LuxLib) === nothing
Expand Down

0 comments on commit b8348d3

Please sign in to comment.