diff --git a/Project.toml b/Project.toml index bf474dfe..0c081ab4 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "LuxLib" uuid = "82251201-b29d-42c6-8e01-566dec8acb11" authors = ["Avik Pal and contributors"] -version = "0.3.39" +version = "1.0.0" [deps] ArrayInterface = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" diff --git a/src/LuxLib.jl b/src/LuxLib.jl index 67796493..766f7642 100644 --- a/src/LuxLib.jl +++ b/src/LuxLib.jl @@ -54,8 +54,6 @@ include("impl/forward_diff.jl") include("impl/matmul.jl") include("impl/normalization.jl") -include("deprecations.jl") - export batchnorm, groupnorm, instancenorm, layernorm, alpha_dropout, dropout export fused_dense_bias_activation, fused_conv_bias_activation export fast_activation, fast_activation!! diff --git a/src/deprecations.jl b/src/deprecations.jl deleted file mode 100644 index 3b002bf4..00000000 --- a/src/deprecations.jl +++ /dev/null @@ -1,39 +0,0 @@ -# Deprecations for version 0.4 -## normalization -@deprecate batchnorm(x, scale, bias, running_mean, running_var, σ::F=identity; - momentum::Real, training::Val, epsilon::Real) where {F} batchnorm( - x, scale, bias, running_mean, running_var, training, σ, momentum, epsilon) - -@deprecate groupnorm(x, scale, bias, σ::F=identity; groups::Int, epsilon::Real) where {F} groupnorm( - x, scale, bias, groups, σ, epsilon) - -@deprecate instancenorm(x, scale, bias, σ::F=identity; epsilon, training) where {F} instancenorm( - x, scale, bias, training, σ, epsilon) - -@deprecate layernorm(x, scale, bias, σ::F=identity; dims, epsilon) where {F} layernorm( - x, scale, bias, σ, dims, epsilon) - -## dropout -@deprecate dropout( - rng::AbstractRNG, x::AbstractArray, p::T, training::Val, invp::T; dims) where {T} dropout( - rng, x, p, training, invp, dims) - -@deprecate dropout( - rng::AbstractRNG, x::AbstractArray, p::T, training::Val; dims, invp::T=inv(p)) where {T} dropout( - rng, x, p, training, invp, dims) - -@deprecate dropout(rng::AbstractRNG, x::AbstractArray{T1, N}, mask::AbstractArray{T2, N}, - p::T, training::Val, um::Val, invp::T; dims) where {T, T1, T2, N} dropout( - rng, x, mask, p, training, um, invp, dims) - -@deprecate dropout(rng::AbstractRNG, x::AbstractArray{T1, N}, mask::AbstractArray{T2, N}, - p::T, training::Val, um::Val; dims, invp::T=inv(p)) where {T, T1, T2, N} dropout( - rng, x, mask, p, training, um, invp, dims) - -# bias activation. While this is not public, we used it in Lux -function __apply_bias_activation(σ::F, x, bias::AbstractArray) where {F} - __depwarn("`__apply_bias_activation` is deprecated and will be removed in the next \ - release. Use `bias_activation` instead.", - :__apply_bias_activation) - return __bias_activation_impl(σ, x, _vec(bias)) -end