diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 9109bab7..18248789 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -7,6 +7,6 @@ steps: queue: "juliagpu" cuda: "*" command: | - julia --color=yes --project -e 'using Pkg; Pkg.add("CUDA"); Pkg.instantiate(); using CUDA' + julia --color=yes --project -e 'using Pkg; Pkg.add("CUDA"); Pkg.add("NLPModels"); Pkg.add("NLPModelsTest"); Pkg.instantiate()' julia --color=yes --project -e 'include("test/gpu.jl")' timeout_in_minutes: 30 diff --git a/src/ad_api.jl b/src/ad_api.jl index 98b02b72..c9867b25 100644 --- a/src/ad_api.jl +++ b/src/ad_api.jl @@ -139,10 +139,10 @@ Jtprod!(nlp::AbstractNLPModel, Jtv, c, x, v, ::Val{:F}) = jtprod_residual!(nlp, function Hvprod!(nlp::AbstractNLPModel, Hv, x, v, ℓ, ::Val{:obj}, obj_weight) return hprod!(nlp, x, v, Hv, obj_weight = obj_weight) end -function Hvprod!(nlp::AbstractNLPModel, Hv, x, v, ℓ, ::Val{:lag}, y, obj_weight) +function Hvprod!(nlp::AbstractNLPModel, Hv, x::S, v, ℓ, ::Val{:lag}, y, obj_weight) where {S} if nlp.meta.nlin > 0 # y is of length nnln, and hprod expectes ncon... - yfull = zeros(eltype(x), nlp.meta.ncon) + yfull = fill!(S(undef, nlp.meta.ncon), 0) k = 0 for i in nlp.meta.nln k += 1 @@ -200,14 +200,14 @@ end function NLPModels.hess_coord!( nlp::AbstractNLPModel, ::ADModel, - x::AbstractVector, + x::S, y::AbstractVector, obj_weight::Real, vals::AbstractVector, -) +) where {S} if nlp.meta.nlin > 0 # y is of length nnln, and hess expectes ncon... - yfull = zeros(eltype(x), nlp.meta.ncon) + yfull = fill!(S(undef, nlp.meta.ncon), 0) k = 0 for i in nlp.meta.nln k += 1 diff --git a/src/forward.jl b/src/forward.jl index f5774494..652760a6 100644 --- a/src/forward.jl +++ b/src/forward.jl @@ -195,9 +195,10 @@ function ForwardDiffADHvprod( f, ncon::Integer = 0, c!::Function = (args...) -> []; - x0::AbstractVector{T} = rand(nvar), + x0::S = rand(nvar), kwargs..., -) where {T} +) where {S} + T = eltype(S) function lag(z; nvar = nvar, ncon = ncon, f = f, c! = c!) cx, x, y, ob = view(z, 1:ncon), view(z, (ncon + 1):(nvar + ncon)), @@ -221,8 +222,8 @@ function ForwardDiffADHvprod( ForwardDiff.gradient!(gz, lag, z, cfg) return gz end - longv = zeros(T, ntotal) - Hvp = zeros(T, ntotal) + longv = fill!(S(undef, ntotal), 0) + Hvp = fill!(S(undef, ntotal), 0) # unconstrained Hessian tagf = ForwardDiff.Tag{typeof(f), T} diff --git a/src/sparse_diff_tools.jl b/src/sparse_diff_tools.jl index 0268248e..8ec2917d 100644 --- a/src/sparse_diff_tools.jl +++ b/src/sparse_diff_tools.jl @@ -13,16 +13,17 @@ f, ncon, c!; - x0::AbstractVector{T} = rand(nvar), + x0::S = rand(nvar), alg::SparseDiffTools.SparseDiffToolsColoringAlgorithm = SparseDiffTools.GreedyD1Color(), kwargs..., - ) where {T} + ) where {S} + T = eltype(S) output = similar(x0, ncon) J = compute_jacobian_sparsity(c!, output, x0) colors = sparse_matrix_colors(J, alg) jac = SparseMatrixCSC{T, Int}(J.m, J.n, J.colptr, J.rowval, T.(J.nzval)) - dx = zeros(T, ncon) + dx = fill!(S(undef, ncon), 0) cfJ = SparseDiffTools.ForwardColorJacCache(c!, x0, colorvec = colors, dx = dx, sparsity = jac) SDTSparseADJacobian(cfJ) end diff --git a/src/sparse_hessian.jl b/src/sparse_hessian.jl index 7165d5c0..383042fe 100644 --- a/src/sparse_hessian.jl +++ b/src/sparse_hessian.jl @@ -19,12 +19,13 @@ function SparseADHessian( f, ncon, c!; - x0::AbstractVector{T} = rand(nvar), + x0::S = rand(nvar), alg = ColPackColoration(), kwargs..., -) where {T} - S = compute_hessian_sparsity(f, nvar, c!, ncon) - H = ncon == 0 ? S : S[1:nvar, 1:nvar] +) where {S} + T = eltype(S) + Hs = compute_hessian_sparsity(f, nvar, c!, ncon) + H = ncon == 0 ? Hs : Hs[1:nvar, 1:nvar] colors = sparse_matrix_colors(H, alg) ncolors = maximum(colors) @@ -59,10 +60,9 @@ function SparseADHessian( ForwardDiff.gradient!(gz, lag, z, cfg) return gz end - longv = zeros(T, ntotal) - Hvp = zeros(T, ntotal) - - y = zeros(T, ncon) + longv = fill!(S(undef, ntotal), 0) + Hvp = fill!(S(undef, ntotal), 0) + y = fill!(S(undef, ncon), 0) return SparseADHessian(d, rowval, colptr, colors, ncolors, res, lz, glz, sol, longv, Hvp, ∇φ!, y) end @@ -95,8 +95,8 @@ function SparseReverseADHessian( alg = ColPackColoration(), kwargs..., ) where {T} - S = compute_hessian_sparsity(f, nvar, c!, ncon) - H = ncon == 0 ? S : S[1:nvar, 1:nvar] + Hs = compute_hessian_sparsity(f, nvar, c!, ncon) + H = ncon == 0 ? Hs : Hs[1:nvar, 1:nvar] colors = sparse_matrix_colors(H, alg) ncolors = maximum(colors) @@ -138,7 +138,7 @@ function SparseReverseADHessian( end Hv_temp = similar(x0) - y = zeros(T, ncon) + y = similar(x0, ncon) return SparseReverseADHessian( d, rowval, diff --git a/src/sparse_sym.jl b/src/sparse_sym.jl index 6af8936e..771c15f0 100644 --- a/src/sparse_sym.jl +++ b/src/sparse_sym.jl @@ -102,9 +102,9 @@ function SparseSymbolicsADHessian( f, ncon, c!; - x0::AbstractVector{T} = rand(nvar), + x0::S = rand(nvar), kwargs..., -) where {T} +) where {S} Symbolics.@variables xs[1:nvar], μs xsi = Symbolics.scalarize(xs) fun = μs * f(xsi) @@ -122,7 +122,7 @@ function SparseSymbolicsADHessian( # cfH is a Tuple{Expr, Expr}, cfH[2] is the in-place function # that we need to update a vector `vals` with the nonzeros of ∇²ℓ(x, y, μ). cfH = Symbolics.build_function(vals, xsi, ysi, μs, expression = Val{false}) - y = zeros(T, ncon) + y = fill!(S(undef, ncon), 0) return SparseSymbolicsADHessian(nnzh, rows, cols, y, cfH[2]) end diff --git a/test/Project.toml b/test/Project.toml index ad79ac4c..d254894f 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,4 +1,5 @@ [deps] +CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -14,6 +15,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [compat] +CUDA = "4, 5" Enzyme = "0.10, 0.11, 0.12" ForwardDiff = "0.10" ManualNLPModels = "0.1" diff --git a/test/gpu.jl b/test/gpu.jl index 892808bf..65fd6f7f 100644 --- a/test/gpu.jl +++ b/test/gpu.jl @@ -1,3 +1,31 @@ -using CUDA, Test +using CUDA, LinearAlgebra, SparseArrays, Test +using ADNLPModels, NLPModels, NLPModelsTest + +for problem in NLPModelsTest.nlp_problems ∪ ["GENROSE"] + include("nlp/problems/$(lowercase(problem)).jl") +end +for problem in NLPModelsTest.nls_problems + include("nls/problems/$(lowercase(problem)).jl") +end @test CUDA.functional() + +@testset "Checking NLPModelsTest (NLP) tests with $backend - GPU multiple precision" for backend in keys(ADNLPModels.predefined_backend) + @testset "Checking GPU multiple precision on problem $problem" for problem in NLPModelsTest.nlp_problems + nlp_from_T = eval(Meta.parse(lowercase(problem) * "_autodiff")) + CUDA.allowscalar() do + # sparse Jacobian/Hessian doesn't work here + multiple_precision_nlp_array(T -> nlp_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jth_hprod, hprod, jprod], linear_api = true) + end + end +end + +@testset "Checking NLPModelsTest (NLS) tests with $backend - GPU multiple precision" for backend in keys(ADNLPModels.predefined_backend) + @testset "Checking GPU multiple precision on problem $problem" for problem in NLPModelsTest.nls_problems + nls_from_T = eval(Meta.parse(lowercase(problem) * "_autodiff")) + CUDA.allowscalar() do + # sparse Jacobian/Hessian doesn't work here + multiple_precision_nls_array(T -> nls_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian, jacobian_residual_backend = ADNLPModels.ForwardDiffADJacobian, hessian_residual_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jprod, jprod_residual, hprod_residual], linear_api = true) + end + end +end diff --git a/test/nlp/nlpmodelstest.jl b/test/nlp/nlpmodelstest.jl index 78bf56ec..dbc94ec5 100644 --- a/test/nlp/nlpmodelstest.jl +++ b/test/nlp/nlpmodelstest.jl @@ -18,6 +18,14 @@ @testset "Check multiple precision" begin multiple_precision_nlp(nlp_from_T, exclude = [], linear_api = true) end + @testset "Check multiple precision GPU" begin + if CUDA.functional() + CUDA.allowscalar() do + # sparse Jacobian/Hessian doesn't work here + multiple_precision_nlp_array(T -> nlp_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jth_hprod, hprod, jprod], linear_api = true) + end + end + end @testset "Check view subarray" begin view_subarray_nlp(nlp_ad, exclude = []) end diff --git a/test/nlp/problems/brownden.jl b/test/nlp/problems/brownden.jl index e323731c..565074d9 100644 --- a/test/nlp/problems/brownden.jl +++ b/test/nlp/problems/brownden.jl @@ -1,7 +1,9 @@ export brownden_autodiff -function brownden_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = T[25.0; 5.0; -5.0; -1.0] +brownden_autodiff(::Type{T}; kwargs...) where {T <: Number} = brownden_autodiff(Vector{T}; kwargs...) +function brownden_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + T = eltype(S) + x0 = S([25.0; 5.0; -5.0; -1.0]) f(x) = begin s = zero(T) for i = 1:20 diff --git a/test/nlp/problems/hs10.jl b/test/nlp/problems/hs10.jl index b641f1b9..9e7d57b2 100644 --- a/test/nlp/problems/hs10.jl +++ b/test/nlp/problems/hs10.jl @@ -1,11 +1,12 @@ export hs10_autodiff -function hs10_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = T[-10.0; 10.0] +hs10_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs10_autodiff(Vector{T}; kwargs...) +function hs10_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = S([-10; 10]) f(x) = x[1] - x[2] c(x) = [-3 * x[1]^2 + 2 * x[1] * x[2] - x[2]^2 + 1] - lcon = T[0.0] - ucon = T[Inf] + lcon = S([0]) + ucon = S([Inf]) return ADNLPModel(f, x0, c, lcon, ucon, name = "hs10_autodiff"; kwargs...) end diff --git a/test/nlp/problems/hs11.jl b/test/nlp/problems/hs11.jl index ff9b14a9..3eae443b 100644 --- a/test/nlp/problems/hs11.jl +++ b/test/nlp/problems/hs11.jl @@ -1,11 +1,12 @@ export hs11_autodiff -function hs11_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = T[4.9; 0.1] +hs11_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs11_autodiff(Vector{T}; kwargs...) +function hs11_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = S([49 // 10; 1 // 10]) f(x) = (x[1] - 5)^2 + x[2]^2 - 25 c(x) = [-x[1]^2 + x[2]] - lcon = T[-Inf] - ucon = T[0.0] + lcon = S([-Inf]) + ucon = S([0]) return ADNLPModel(f, x0, c, lcon, ucon, name = "hs11_autodiff"; kwargs...) end diff --git a/test/nlp/problems/hs13.jl b/test/nlp/problems/hs13.jl index 001be342..cdf0eb4e 100644 --- a/test/nlp/problems/hs13.jl +++ b/test/nlp/problems/hs13.jl @@ -1,16 +1,17 @@ export hs13_autodiff -function hs13_autodiff(::Type{T} = Float64; kwargs...) where {T} +hs13_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs13_autodiff(Vector{T}; kwargs...) +function hs13_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} function f(x) return (x[1] - 2)^2 + x[2]^2 end - x0 = -2 * ones(T, 2) - lvar = zeros(T, 2) - uvar = T(Inf) * ones(T, 2) + x0 = fill!(S(undef, 2), -2) + lvar = fill!(S(undef, 2), 0) + uvar = fill!(S(undef, 2), Inf) function c(x) return [(1 - x[1])^3 - x[2]] end - lcon = zeros(T, 1) - ucon = T(Inf) * ones(T, 1) + lcon = fill!(S(undef, 1), 0) + ucon = fill!(S(undef, 1), Inf) return ADNLPModels.ADNLPModel(f, x0, lvar, uvar, c, lcon, ucon, name = "hs13_autodiff"; kwargs...) end diff --git a/test/nlp/problems/hs14.jl b/test/nlp/problems/hs14.jl index 2f43a711..c94b151e 100644 --- a/test/nlp/problems/hs14.jl +++ b/test/nlp/problems/hs14.jl @@ -1,15 +1,16 @@ export hs14_autodiff -function hs14_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = T[2.0; 2.0] +hs14_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs14_autodiff(Vector{T}; kwargs...) +function hs14_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = S([2; 2]) f(x) = (x[1] - 2)^2 + (x[2] - 1)^2 c(x) = [-x[1]^2 / 4 - x[2]^2 + 1] - lcon = T[-1; 0.0] - ucon = T[-1; Inf] + lcon = S([-1; 0]) + ucon = S([-1; Inf]) clinrows = [1, 1] clincols = [1, 2] - clinvals = T[1, -2] + clinvals = S([1, -2]) return ADNLPModel( f, diff --git a/test/nlp/problems/hs5.jl b/test/nlp/problems/hs5.jl index 95d6f3b0..a09fc78f 100644 --- a/test/nlp/problems/hs5.jl +++ b/test/nlp/problems/hs5.jl @@ -1,10 +1,11 @@ export hs5_autodiff -function hs5_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = zeros(T, 2) +hs5_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs5_autodiff(Vector{T}; kwargs...) +function hs5_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = fill!(S(undef, 2), 0) f(x) = sin(x[1] + x[2]) + (x[1] - x[2])^2 - 3x[1] / 2 + 5x[2] / 2 + 1 - l = T[-1.5; -3.0] - u = T[4.0; 3.0] + l = S([-1.5; -3.0]) + u = S([4.0; 3.0]) return ADNLPModel(f, x0, l, u, name = "hs5_autodiff"; kwargs...) end diff --git a/test/nlp/problems/hs6.jl b/test/nlp/problems/hs6.jl index 5273c30a..91c3104e 100644 --- a/test/nlp/problems/hs6.jl +++ b/test/nlp/problems/hs6.jl @@ -1,11 +1,12 @@ export hs6_autodiff -function hs6_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = T[-1.2; 1.0] +hs6_autodiff(::Type{T}; kwargs...) where {T <: Number} = hs6_autodiff(Vector{T}; kwargs...) +function hs6_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = S([-12 // 10; 1]) f(x) = (1 - x[1])^2 c(x) = [10 * (x[2] - x[1]^2)] - lcon = T[0.0] - ucon = T[0.0] + lcon = fill!(S(undef, 1), 0) + ucon = fill!(S(undef, 1), 0) return ADNLPModel(f, x0, c, lcon, ucon, name = "hs6_autodiff"; kwargs...) end diff --git a/test/nlp/problems/lincon.jl b/test/nlp/problems/lincon.jl index ff15827f..d735f678 100644 --- a/test/nlp/problems/lincon.jl +++ b/test/nlp/problems/lincon.jl @@ -1,6 +1,8 @@ export lincon_autodiff -function lincon_autodiff(::Type{T} = Float64; kwargs...) where {T} +lincon_autodiff(::Type{T}; kwargs...) where {T <: Number} = lincon_autodiff(Vector{T}; kwargs...) +function lincon_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + T = eltype(S) A = T[1 2; 3 4] b = T[5; 6] B = diagm(T[3 * i for i = 3:5]) @@ -8,15 +10,15 @@ function lincon_autodiff(::Type{T} = Float64; kwargs...) where {T} C = T[0 -2; 4 0] d = T[1; -1] - x0 = zeros(T, 15) + x0 = fill!(S(undef, 15), 0) f(x) = sum(i + x[i]^4 for i = 1:15) - lcon = T[22.0; 1.0; -Inf; -11.0; -d; -b; -Inf * ones(3)] - ucon = T[22.0; Inf; 16.0; 9.0; -d; Inf * ones(2); c] + lcon = S([22.0; 1.0; -Inf; -11.0; -d; -b; -Inf * ones(3)]) + ucon = S([22.0; Inf; 16.0; 9.0; -d; Inf * ones(2); c]) clinrows = [1, 2, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 7, 8, 9, 10, 11] clincols = [15, 10, 11, 12, 13, 14, 8, 9, 7, 6, 1, 1, 2, 2, 3, 4, 5] - clinvals = vcat(T(15), c, d, b, C[1, 2], C[2, 1], A[:], diag(B)) + clinvals = S(vcat(T(15), c, d, b, C[1, 2], C[2, 1], A[:], diag(B))) return ADNLPModel( f, diff --git a/test/nlp/problems/linsv.jl b/test/nlp/problems/linsv.jl index f3001502..36745848 100644 --- a/test/nlp/problems/linsv.jl +++ b/test/nlp/problems/linsv.jl @@ -1,14 +1,15 @@ export linsv_autodiff -function linsv_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = zeros(T, 2) +linsv_autodiff(::Type{T}; kwargs...) where {T <: Number} = linsv_autodiff(Vector{T}; kwargs...) +function linsv_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = fill!(S(undef, 2), 0) f(x) = x[1] - lcon = T[3.0; 1.0] - ucon = T[Inf; Inf] + lcon = S([3; 1]) + ucon = S([Inf; Inf]) clinrows = [1, 1, 2] clincols = [1, 2, 2] - clinvals = T[1, 1, 1] + clinvals = S([1, 1, 1]) return ADNLPModel( f, diff --git a/test/nlp/problems/mgh01feas.jl b/test/nlp/problems/mgh01feas.jl index 108ec0c8..cd9e7341 100644 --- a/test/nlp/problems/mgh01feas.jl +++ b/test/nlp/problems/mgh01feas.jl @@ -1,15 +1,16 @@ export mgh01feas_autodiff -function mgh01feas_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = T[-1.2; 1.0] +mgh01feas_autodiff(::Type{T}; kwargs...) where {T <: Number} = mgh01feas_autodiff(Vector{T}; kwargs...) +function mgh01feas_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = S([-12 // 10; 1]) f(x) = zero(eltype(x)) c(x) = [10 * (x[2] - x[1]^2)] - lcon = T[1, 0] - ucon = T[1, 0] + lcon = S([1, 0]) + ucon = S([1, 0]) clinrows = [1] clincols = [1] - clinvals = T[1] + clinvals = S([1]) return ADNLPModel( f, diff --git a/test/nls/nlpmodelstest.jl b/test/nls/nlpmodelstest.jl index e1681f49..7394a64b 100644 --- a/test/nls/nlpmodelstest.jl +++ b/test/nls/nlpmodelstest.jl @@ -35,6 +35,14 @@ @testset "Check multiple precision" begin multiple_precision_nls(nls_from_T, exclude = exclude, linear_api = true) end + @testset "Check multiple precision GPU" begin + if CUDA.functional() + CUDA.allowscalar() do + # sparse Jacobian/Hessian doesn't work here + multiple_precision_nls_array(T -> nls_from_T(T; jacobian_backend = ADNLPModels.ForwardDiffADJacobian, hessian_backend = ADNLPModels.ForwardDiffADHessian, jacobian_residual_backend = ADNLPModels.ForwardDiffADJacobian, hessian_residual_backend = ADNLPModels.ForwardDiffADHessian), CuArray, exclude = [jprod, jprod_residual, hprod_residual], linear_api = true) + end + end + end @testset "Check view subarray" begin view_subarray_nls.(nlss, exclude = exclude) end diff --git a/test/nls/problems/bndrosenbrock.jl b/test/nls/problems/bndrosenbrock.jl index 5040dc15..68c65368 100644 --- a/test/nls/problems/bndrosenbrock.jl +++ b/test/nls/problems/bndrosenbrock.jl @@ -1,11 +1,12 @@ export bndrosenbrock_autodiff -function bndrosenbrock_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = T[-1.2; 1] +bndrosenbrock_autodiff(::Type{T}; kwargs...) where {T <: Number} = bndrosenbrock_autodiff(Vector{T}; kwargs...) +function bndrosenbrock_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = S([-12 // 10; 1]) F(x) = [1 - x[1]; 10 * (x[2] - x[1]^2)] - lvar = T[-1; -2] - uvar = T[0.8; 2] + lvar = S([-1; -2]) + uvar = S([8 // 10; 2]) return ADNLSModel(F, x0, 2, lvar, uvar, name = "bndrosenbrock_autodiff"; kwargs...) end diff --git a/test/nls/problems/lls.jl b/test/nls/problems/lls.jl index d0710818..ca844a26 100644 --- a/test/nls/problems/lls.jl +++ b/test/nls/problems/lls.jl @@ -1,14 +1,15 @@ export lls_autodiff -function lls_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = zeros(T, 2) +lls_autodiff(::Type{T}; kwargs...) where {T <: Number} = lls_autodiff(Vector{T}; kwargs...) +function lls_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = fill!(S(undef, 2), 0) F(x) = [x[1] - x[2]; x[1] + x[2] - 2; x[2] - 2] - lcon = T[0.0] - ucon = T[Inf] + lcon = S([0]) + ucon = S([Inf]) clinrows = [1, 1] clincols = [1, 2] - clinvals = T[1, 1] + clinvals = S([1, 1]) return ADNLSModel( F, diff --git a/test/nls/problems/mgh01.jl b/test/nls/problems/mgh01.jl index 6b0bb5f8..869ea8ab 100644 --- a/test/nls/problems/mgh01.jl +++ b/test/nls/problems/mgh01.jl @@ -1,7 +1,8 @@ export mgh01_autodiff # , MGH01_special -function mgh01_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = T[-1.2; 1.0] +mgh01_autodiff(::Type{T}; kwargs...) where {T <: Number} = mgh01_autodiff(Vector{T}; kwargs...) +function mgh01_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = S([-12 // 10; 1]) F(x) = [1 - x[1]; 10 * (x[2] - x[1]^2)] return ADNLSModel(F, x0, 2, name = "mgh01_autodiff"; kwargs...) diff --git a/test/nls/problems/nlshs20.jl b/test/nls/problems/nlshs20.jl index 698a9bd2..c03fd794 100644 --- a/test/nls/problems/nlshs20.jl +++ b/test/nls/problems/nlshs20.jl @@ -1,13 +1,14 @@ export nlshs20_autodiff -function nlshs20_autodiff(::Type{T} = Float64; kwargs...) where {T} - x0 = T[-2.0; 1.0] +nlshs20_autodiff(::Type{T}; kwargs...) where {T <: Number} = nlshs20_autodiff(Vector{T}; kwargs...) +function nlshs20_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + x0 = S([-2; 1]) F(x) = [1 - x[1]; 10 * (x[2] - x[1]^2)] - lvar = T[-0.5; -Inf] - uvar = T[0.5; Inf] + lvar = S([-1 // 2; -Inf]) + uvar = S([1 // 2; Inf]) c(x) = [x[1] + x[2]^2; x[1]^2 + x[2]; x[1]^2 + x[2]^2 - 1] - lcon = zeros(T, 3) - ucon = fill(T(Inf), 3) + lcon = fill!(S(undef, 3), 0) + ucon = fill!(S(undef, 3), Inf) return ADNLSModel(F, x0, 2, lvar, uvar, c, lcon, ucon, name = "nlshs20_autodiff"; kwargs...) end diff --git a/test/nls/problems/nlslc.jl b/test/nls/problems/nlslc.jl index 584caa3c..9127b661 100644 --- a/test/nls/problems/nlslc.jl +++ b/test/nls/problems/nlslc.jl @@ -1,6 +1,8 @@ export nlslc_autodiff -function nlslc_autodiff(::Type{T} = Float64; kwargs...) where {T} +nlslc_autodiff(::Type{T}; kwargs...) where {T <: Number} = nlslc_autodiff(Vector{T}; kwargs...) +function nlslc_autodiff(::Type{S} = Vector{Float64}; kwargs...) where {S} + T = eltype(S) A = T[1 2; 3 4] b = T[5; 6] B = diagm(T[3 * i for i = 3:5]) @@ -8,15 +10,15 @@ function nlslc_autodiff(::Type{T} = Float64; kwargs...) where {T} C = T[0 -2; 4 0] d = T[1; -1] - x0 = zeros(T, 15) + x0 = fill!(S(undef, 15), 0) F(x) = [x[i]^2 - i^2 for i = 1:15] - lcon = T[22.0; 1.0; -Inf; -11.0; -d; -b; -Inf * ones(3)] - ucon = T[22.0; Inf; 16.0; 9.0; -d; Inf * ones(2); c] + lcon = S([22.0; 1.0; -Inf; -11.0; -d; -b; -Inf * ones(3)]) + ucon = S([22.0; Inf; 16.0; 9.0; -d; Inf * ones(2); c]) clinrows = [1, 2, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 7, 8, 9, 10, 11] clincols = [15, 10, 11, 12, 13, 14, 8, 9, 7, 6, 1, 1, 2, 2, 3, 4, 5] - clinvals = vcat(T(15), c, d, b, C[1, 2], C[2, 1], A[:], diag(B)) + clinvals = S(vcat(T(15), c, d, b, C[1, 2], C[2, 1], A[:], diag(B))) return ADNLSModel( F, diff --git a/test/runtests.jl b/test/runtests.jl index 6026b178..3eea51f6 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,4 +1,4 @@ -using LinearAlgebra, SparseArrays, Test +using CUDA, LinearAlgebra, SparseArrays, Test using ADNLPModels, ManualNLPModels, NLPModels, NLPModelsModifiers, NLPModelsTest using ADNLPModels: gradient, gradient!, jacobian, hessian, Jprod!, Jtprod!, directional_second_derivative, Hvprod!