Skip to content

Commit

Permalink
Changed API for the AutoEncoder on outdims and innerdims
Browse files Browse the repository at this point in the history
  • Loading branch information
sylvaticus committed Dec 29, 2023
1 parent 480566c commit f2b4e62
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 16 deletions.
40 changes: 25 additions & 15 deletions src/Utils/Utils_extra.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,14 @@ $(FIELDS)
"""
Base.@kwdef mutable struct AutoEncoderHyperParametersSet <: BetaMLHyperParametersSet
"The layers (vector of `AbstractLayer`s) responsable of the encoding of the data [def: `nothing`, i.e. one single layer]"
"The layers (vector of `AbstractLayer`s) responsable of the encoding of the data [def: `nothing`, i.e. two dense layers with the inner one of `innerdims`]"
e_layers::Union{Nothing,Vector{AbstractLayer}} = nothing
"The layers (vector of `AbstractLayer`s) responsable of the decoding of the data [def: `nothing`, i.e. one single layer]"
"The layers (vector of `AbstractLayer`s) responsable of the decoding of the data [def: `nothing`, i.e. two dense layers with the inner one of `innerdims`]"
d_layers::Union{Nothing,Vector{AbstractLayer}} = nothing
"The number of neurons (i.e. dimensions) of the encoded data [def: `nothing`, i.e. `max(1,round(x_ndims/3))`]"
outdims::Union{Nothing,Int64} = nothing
"The number of neurons (i.e. dimensions) of the encoded data. If the value is a float it is consiered a percentual (to be rounded) of the dimensionality of the data [def: `0.33`]"
outdims::Union{Float64,Int64} = 0.333
"Inner layer dimension (i.e. number of neurons). If the value is a float it is consiered a percentual (to be rounded) of the dimensionality of the data [def: `nothing` that applies a specific heuristic]. If `e_layers` or `d_layers` are specified, this parameter is ignored for the respective part."
innerdims::Union{Int64,Float64,Nothing} = nothing
"""Loss (cost) function [def: `squared_cost`]
It must always assume y and ŷ as (n x d) matrices, eventually using `dropdims` inside.
"""
Expand All @@ -45,7 +47,7 @@ Base.@kwdef mutable struct AutoEncoderHyperParametersSet <: BetaMLHyperParameter
See [`SuccessiveHalvingSearch`](@ref) for the default method.
To implement automatic hyperparameter tuning during the (first) `fit!` call simply set `autotune=true` and eventually change the default `tunemethod` options (including the parameter ranges, the resources to employ and the loss function to adopt).
"""
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(hpranges = Dict("epochs"=>[50,100,150],"batch_size"=>[2,4,8,16,32],"opt_alg"=>[SGD=2),SGD=1),SGD=3),ADAM=0.5),ADAM=1),ADAM=0.25)], "shuffle"=>[false,true]),multithreads=false)
tunemethod::AutoTuneMethod = SuccessiveHalvingSearch(hpranges = Dict("epochs"=>[100,150,200],"batch_size"=>[8,16,32],"outdims"=>[0.2,0.3,0.5],"innerdims"=>[1.3,2.0,5.0]),multithreads=false)
end

Base.@kwdef mutable struct AutoEncoderLearnableParameters <: BetaMLLearnableParametersSet
Expand Down Expand Up @@ -152,6 +154,7 @@ function fit!(m::AutoEncoder,X)
e_layers = m.hpar.e_layers
d_layers = m.hpar.d_layers
outdims = m.hpar.outdims
innerdims = m.hpar.innerdims
loss = m.hpar.loss
dloss = m.hpar.dloss
epochs = m.hpar.epochs
Expand All @@ -172,25 +175,32 @@ function fit!(m::AutoEncoder,X)
outdims_actual = m.par.outdims_actual
fullnn = m.par.fullnn
else
isnothing(outdims) ? outdims_actual = max(1,Int64(round(D/3))) : outdims_actual = copy(outdims)
if D == 1
innerSize = 3
elseif D < 5
innerSize = Int(round(D*D))
elseif D < 10
innerSize = Int(round(D*1.3*D/3))
typeof(outdims) <: Integer ? outdims_actual = outdims : outdims_actual = D * outdims
if isnothing(innerdims)
if D == 1
innerSize = 3
elseif D < 5
innerSize = Int(round(D*D))
elseif D < 10
innerSize = Int(round(D*1.3*D/3))
else
innerSize = Int(round(D*1.3*log(2,D)))
end
elseif typeof(innerdims) <: Integer
innerSize = innerdims
else
innerSize = Int(round(D*1.3*log(2,D)))
innerSize = Int(round(D*innerdims))
end
if e_layers == nothing

if isnothing(e_layers)
l1 = DenseLayer(D,innerSize, f=relu, df=drelu, rng=rng)
l2 = DenseLayer(innerSize,innerSize, f=relu, df=drelu, rng=rng)
l3 = DenseLayer(innerSize, outdims_actual, f=identity, df=didentity, rng=rng)
e_layers_actual = [l1,l2,l3]
else
e_layers_actual = copy(e_layers)
end
if d_layers == nothing
if isnothing(d_layers)
l1d = DenseLayer(outdims_actual,innerSize, f=relu, df=drelu, rng=rng)
l2d = DenseLayer(innerSize,innerSize, f=relu, df=drelu, rng=rng)
l3d = DenseLayer(innerSize, D, f=identity, df=didentity, rng=rng)
Expand Down
7 changes: 6 additions & 1 deletion test/Utils_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -340,11 +340,16 @@ em2 = accuracy(ytest,ŷ2)
@test em2 > 0.85

x = [0.12 0.31 0.29 3.21 0.21;
0.44 1.21 1.18 13.54 0.85
0.22 0.61 0.58 6.43 0.42;
0.35 0.93 0.91 10.04 0.71;
0.51 1.47 1.46 16.12 0.99;
0.35 0.93 0.91 10.04 0.71;
0.51 1.47 1.46 16.12 0.99;
0.22 0.61 0.58 6.43 0.42;
0.12 0.31 0.29 3.21 0.21;
0.44 1.21 1.18 13.54 0.85];
m = AutoEncoder(outdims=1,epochs=400)
m = AutoEncoder(outdims=1,epochs=400,autotune=false) #TODO: check why autotune is broken here
x_reduced = fit!(m,x)
= inverse_predict(m,x_reduced)
info(m)["rme"]
Expand Down

0 comments on commit f2b4e62

Please sign in to comment.